X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6d2010ae8f7a6078e10b361c6962983bab233e0f..4d15aeb193b2c68f1d38666c317f8d3734f5f083:/bsd/vfs/vfs_subr.c diff --git a/bsd/vfs/vfs_subr.c b/bsd/vfs/vfs_subr.c index 462fbef79..6b16ca6cb 100644 --- a/bsd/vfs/vfs_subr.c +++ b/bsd/vfs/vfs_subr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -76,7 +76,6 @@ * External virtual filesystem routines */ - #include #include #include @@ -105,14 +104,19 @@ #include #include #include +#include #include +#include #include #include #include - +#include #include +#include +#include +#include #include @@ -123,7 +127,7 @@ #include /* kalloc()/kfree() */ #include /* delay_for_interval() */ #include /* OSAddAtomic() */ - +#include #ifdef JOE_DEBUG #include @@ -163,6 +167,8 @@ extern void memory_object_mark_unused( memory_object_control_t control, boolean_t rage); +extern void memory_object_mark_io_tracking( + memory_object_control_t control); /* XXX next protptype should be from */ extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int); @@ -178,14 +184,17 @@ extern kern_return_t adjust_vm_object_cache(vm_size_t oval, vm_size_t nval); __private_extern__ void vntblinit(void); __private_extern__ kern_return_t reset_vmobjectcache(unsigned int val1, unsigned int val2); -__private_extern__ int unlink1(vfs_context_t, struct nameidata *, int); +__private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t, + enum uio_seg, int); extern int system_inshutdown; static void vnode_list_add(vnode_t); +static void vnode_async_list_add(vnode_t); static void vnode_list_remove(vnode_t); static void vnode_list_remove_locked(vnode_t); +static void vnode_abort_advlocks(vnode_t); static errno_t vnode_drain(vnode_t); static void vgone(vnode_t, int flags); static void vclean(vnode_t vp, int flag); @@ -197,6 +206,8 @@ static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev); static int vnode_reload(vnode_t); static int vnode_isinuse_locked(vnode_t, int, int); +static int unmount_callback(mount_t, __unused void *); + static void insmntque(vnode_t vp, mount_t mp); static int mount_getvfscnt(void); static int mount_fillfsids(fsid_t *, int ); @@ -216,6 +227,12 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *); static void record_vp(vnode_t vp, int count); #endif +#if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) +extern int bootarg_no_vnode_jetsam; /* from bsd_init.c default value is 0 */ +#endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */ + +boolean_t root_is_CF_drive = FALSE; + #if CONFIG_TRIGGERS static int vnode_resolver_create(mount_t, vnode_t, struct vnode_trigger_param *, boolean_t external); static void vnode_resolver_detach(vnode_t); @@ -223,6 +240,8 @@ static void vnode_resolver_detach(vnode_t); TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */ +TAILQ_HEAD(async_work_lst, vnode) vnode_async_work_list; + TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */ struct timeval rage_tv; @@ -262,7 +281,6 @@ static int nummounts = 0; } while(0) - /* remove a vnode from dead vnode list */ #define VREMDEAD(fun, vp) \ do { \ @@ -274,6 +292,17 @@ static int nummounts = 0; } while(0) +/* remove a vnode from async work vnode list */ +#define VREMASYNC_WORK(fun, vp) \ + do { \ + VLISTCHECK((fun), (vp), "async_work"); \ + TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \ + VLISTNONE((vp)); \ + vp->v_listflag &= ~VLIST_ASYNC_WORK; \ + async_work_vnodes--; \ + } while(0) + + /* remove a vnode from rage vnode list */ #define VREMRAGE(fun, vp) \ do { \ @@ -304,15 +333,21 @@ u_int32_t vnodetarget; /* target for vnreclaim() */ */ #define VNODE_FREE_MIN CONFIG_VNODE_FREE_MIN /* freelist should have at least this many */ + +static void async_work_continue(void); + /* * Initialize the vnode management data structures. */ __private_extern__ void vntblinit(void) { + thread_t thread = THREAD_NULL; + TAILQ_INIT(&vnode_free_list); TAILQ_INIT(&vnode_rage_list); TAILQ_INIT(&vnode_dead_list); + TAILQ_INIT(&vnode_async_work_list); TAILQ_INIT(&mountlist); if (!vnodetarget) @@ -329,6 +364,12 @@ vntblinit(void) * we want to cache */ (void) adjust_vm_object_cache(0, desiredvnodes - VNODE_FREE_MIN); + + /* + * create worker threads + */ + kernel_thread_start((thread_continue_t)async_work_continue, NULL, &thread); + thread_deallocate(thread); } /* Reset the VM Object Cache with the values passed in */ @@ -466,13 +507,7 @@ vnode_hascleanblks(vnode_t vp) void vnode_iterate_setup(mount_t mp) { - while (mp->mnt_lflag & MNT_LITER) { - mp->mnt_lflag |= MNT_LITERWAIT; - msleep((caddr_t)mp, &mp->mnt_mlock, PVFS, "vnode_iterate_setup", NULL); - } - mp->mnt_lflag |= MNT_LITER; - } int @@ -481,28 +516,31 @@ vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags) vnode_t vp; TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { - /* disable preflight only for udf, a hack to be removed after 4073176 is fixed */ - if (vp->v_tag == VT_UDF) - return 0; if (vp->v_type == VDIR) continue; if (vp == skipvp) continue; - if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || - (vp->v_flag & VNOFLUSH))) + if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) continue; if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) continue; - if ((flags & WRITECLOSE) && - (vp->v_writecount == 0 || vp->v_type != VREG)) + if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG)) continue; + /* Look for busy vnode */ - if (((vp->v_usecount != 0) && - ((vp->v_usecount - vp->v_kusecount) != 0))) - return(1); + if ((vp->v_usecount != 0) && ((vp->v_usecount - vp->v_kusecount) != 0)) { + return 1; + + } else if (vp->v_iocount > 0) { + /* Busy if iocount is > 0 for more than 3 seconds */ + tsleep(&vp->v_iocount, PVFS, "vnode_drain_network", 3 * hz); + if (vp->v_iocount > 0) + return 1; + continue; } + } - return(0); + return 0; } /* @@ -578,13 +616,42 @@ void vnode_iterate_clear(mount_t mp) { mp->mnt_lflag &= ~MNT_LITER; - if (mp->mnt_lflag & MNT_LITERWAIT) { - mp->mnt_lflag &= ~MNT_LITERWAIT; - wakeup(mp); - } } +#include + +struct vnode_iterate_panic_hook { + panic_hook_t hook; + mount_t mp; + struct vnode *vp; +}; + +static void vnode_iterate_panic_hook(panic_hook_t *hook_) +{ + extern int kdb_log(const char *fmt, ...); + struct vnode_iterate_panic_hook *hook = (struct vnode_iterate_panic_hook *)hook_; + panic_phys_range_t range; + uint64_t phys; + + if (panic_phys_range_before(hook->mp, &phys, &range)) { + kdb_log("mp = %p, phys = %p, prev (%p: %p-%p)\n", + hook->mp, phys, range.type, range.phys_start, + range.phys_start + range.len); + } else { + kdb_log("mp = %p, phys = %p, prev (!)\n", hook->mp, phys); + } + + if (panic_phys_range_before(hook->vp, &phys, &range)) { + kdb_log("vp = %p, phys = %p, prev (%p: %p-%p)\n", + hook->vp, phys, range.type, range.phys_start, + range.phys_start + range.len); + } else { + kdb_log("vp = %p, phys = %p, prev (!)\n", hook->vp, phys); + } + panic_dump_mem((void *)(((vm_offset_t)hook->mp -4096) & ~4095), 12288); +} + int vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), void *arg) @@ -593,22 +660,39 @@ vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), int vid, retval; int ret = 0; + /* + * The mount iterate mutex is held for the duration of the iteration. + * This can be done by a state flag on the mount structure but we can + * run into priority inversion issues sometimes. + * Using a mutex allows us to benefit from the priority donation + * mechanisms in the kernel for locks. This mutex should never be + * acquired in spin mode and it should be acquired before attempting to + * acquire the mount lock. + */ + mount_iterate_lock(mp); + mount_lock(mp); vnode_iterate_setup(mp); - /* it is returns 0 then there is nothing to do */ + /* If it returns 0 then there is nothing to do */ retval = vnode_iterate_prepare(mp); if (retval == 0) { vnode_iterate_clear(mp); mount_unlock(mp); + mount_iterate_unlock(mp); return(ret); } - + + struct vnode_iterate_panic_hook hook; + hook.mp = mp; + hook.vp = NULL; + panic_hook(&hook.hook, vnode_iterate_panic_hook); /* iterate over all the vnodes */ while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) { vp = TAILQ_FIRST(&mp->mnt_workerqueue); + hook.vp = vp; TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes); TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes); vid = vp->v_id; @@ -659,9 +743,11 @@ vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), } out: + panic_unhook(&hook.hook); (void)vnode_iterate_reloadq(mp); vnode_iterate_clear(mp); mount_unlock(mp); + mount_iterate_unlock(mp); return (ret); } @@ -677,6 +763,18 @@ mount_unlock_renames(mount_t mp) lck_mtx_unlock(&mp->mnt_renamelock); } +void +mount_iterate_lock(mount_t mp) +{ + lck_mtx_lock(&mp->mnt_iter_lock); +} + +void +mount_iterate_unlock(mount_t mp) +{ + lck_mtx_unlock(&mp->mnt_iter_lock); +} + void mount_lock(mount_t mp) { @@ -823,37 +921,34 @@ vfs_busy(mount_t mp, int flags) restart: if (mp->mnt_lflag & MNT_LDEAD) - return(ENOENT); - - if (mp->mnt_lflag & MNT_LUNMOUNT) { - if (flags & LK_NOWAIT) - return (ENOENT); + return (ENOENT); - mount_lock(mp); + mount_lock(mp); - if (mp->mnt_lflag & MNT_LDEAD) { + if (mp->mnt_lflag & MNT_LUNMOUNT) { + if (flags & LK_NOWAIT || mp->mnt_lflag & MNT_LDEAD) { mount_unlock(mp); - return(ENOENT); - } - if (mp->mnt_lflag & MNT_LUNMOUNT) { - mp->mnt_lflag |= MNT_LWAIT; - /* - * Since all busy locks are shared except the exclusive - * lock granted when unmounting, the only place that a - * wakeup needs to be done is at the release of the - * exclusive lock at the end of dounmount. - */ - msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL); return (ENOENT); } - mount_unlock(mp); + + /* + * Since all busy locks are shared except the exclusive + * lock granted when unmounting, the only place that a + * wakeup needs to be done is at the release of the + * exclusive lock at the end of dounmount. + */ + mp->mnt_lflag |= MNT_LWAIT; + msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL); + return (ENOENT); } + mount_unlock(mp); + lck_rw_lock_shared(&mp->mnt_rwlock); /* - * until we are granted the rwlock, it's possible for the mount point to - * change state, so reevaluate before granting the vfs_busy + * Until we are granted the rwlock, it's possible for the mount point to + * change state, so re-evaluate before granting the vfs_busy. */ if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) { lck_rw_done(&mp->mnt_rwlock); @@ -865,7 +960,6 @@ restart: /* * Free a busy filesystem. */ - void vfs_unbusy(mount_t mp) { @@ -939,7 +1033,7 @@ vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname) vfsp->vfc_refcount++; mount_list_unlock(); - strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN); + strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN); mp->mnt_vfsstat.f_mntonname[0] = '/'; /* XXX const poisoning layering violation */ (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL); @@ -1011,13 +1105,20 @@ vfs_mountroot(void) bdevvp_rootvp = rootvp; for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { - if (vfsp->vfc_mountroot == NULL) + if (vfsp->vfc_mountroot == NULL + && !ISSET(vfsp->vfc_vfsflags, VFC_VFSCANMOUNTROOT)) { continue; + } mp = vfs_rootmountalloc_internal(vfsp, "root_device"); mp->mnt_devvp = rootvp; - if ((error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx)) == 0) { + if (vfsp->vfc_mountroot) + error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx); + else + error = VFS_MOUNT(mp, rootvp, 0, ctx); + + if (!error) { if ( bdevvp_rootvp != rootvp ) { /* * rootvp changed... @@ -1046,6 +1147,10 @@ vfs_mountroot(void) */ vfs_init_io_attributes(rootvp, mp); + if (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) { + root_is_CF_drive = TRUE; + } + /* * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS. */ @@ -1056,6 +1161,12 @@ vfs_mountroot(void) mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT; } + uint32_t speed; + + if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) speed = 128; + else if (MNTK_SSD & mp->mnt_kern_flag) speed = 7*256; + else speed = 256; + vc_progress_setdiskspeed(speed); /* * Probe root file system for additional features. */ @@ -1079,6 +1190,11 @@ vfs_mountroot(void) (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) { mp->mnt_kern_flag |= MNTK_PATH_FROM_ID; } + + if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS) && + (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) { + mp->mnt_kern_flag |= MNTK_DIR_HARDLINKS; + } } /* @@ -1174,7 +1290,6 @@ vfs_getnewfsid(struct mount *mp) fsid_t tfsid; int mtype; - mount_t nmp; mount_list_lock(); @@ -1185,13 +1300,12 @@ vfs_getnewfsid(struct mount *mp) tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen); tfsid.val[1] = mtype; - TAILQ_FOREACH(nmp, &mountlist, mnt_list) { - while (vfs_getvfs_locked(&tfsid)) { - if (++mntid_gen == 0) - mntid_gen++; - tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen); - } + while (vfs_getvfs_locked(&tfsid)) { + if (++mntid_gen == 0) + mntid_gen++; + tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen); } + mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0]; mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1]; mount_list_unlock(); @@ -1201,9 +1315,14 @@ vfs_getnewfsid(struct mount *mp) * Routines having to do with the management of the vnode table. */ extern int (**dead_vnodeop_p)(void *); -long numvnodes, freevnodes, deadvnodes; +long numvnodes, freevnodes, deadvnodes, async_work_vnodes; +int async_work_timed_out = 0; +int async_work_handled = 0; +int dead_vnode_wanted = 0; +int dead_vnode_waited = 0; + /* * Move a vnode from one mount queue to another. */ @@ -1333,6 +1452,7 @@ bdevvp(dev_t dev, vnode_t *vpp) return (0); } + /* * Check to see if the new vnode represents a special device * for which we already have a vnode (either because of @@ -1394,6 +1514,8 @@ found_alias: nvp->v_specflags = 0; nvp->v_speclastr = -1; nvp->v_specinfo->si_opencount = 0; + nvp->v_specinfo->si_initted = 0; + nvp->v_specinfo->si_throttleable = 0; SPECHASH_LOCK(); @@ -1553,6 +1675,34 @@ out: } +boolean_t +vnode_on_reliable_media(vnode_t vp) +{ + if ( !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) && (vp->v_mount->mnt_flag & MNT_LOCAL) ) + return (TRUE); + return (FALSE); +} + +static void +vnode_async_list_add(vnode_t vp) +{ + vnode_list_lock(); + + if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE|VL_DEAD))) + panic("vnode_async_list_add: %p is in wrong state", vp); + + TAILQ_INSERT_HEAD(&vnode_async_work_list, vp, v_freelist); + vp->v_listflag |= VLIST_ASYNC_WORK; + + async_work_vnodes++; + + vnode_list_unlock(); + + wakeup(&vnode_async_work_list); + +} + + /* * put the vnode on appropriate free list. * called with vnode LOCKED @@ -1560,15 +1710,38 @@ out: static void vnode_list_add(vnode_t vp) { + boolean_t need_dead_wakeup = FALSE; + #if DIAGNOSTIC lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED); #endif + +again: + /* * if it is already on a list or non zero references return */ if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE)) return; + /* + * In vclean, we might have deferred ditching locked buffers + * because something was still referencing them (indicated by + * usecount). We can ditch them now. + */ + if (ISSET(vp->v_lflag, VL_DEAD) + && (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))) { + ++vp->v_iocount; // Probably not necessary, but harmless +#ifdef JOE_DEBUG + record_vp(vp, 1); +#endif + vnode_unlock(vp); + buf_invalidateblks(vp, BUF_INVALIDATE_LOCKED, 0, 0); + vnode_lock(vp); + vnode_dropiocount(vp); + goto again; + } + vnode_list_lock(); if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) { @@ -1601,7 +1774,13 @@ vnode_list_add(vnode_t vp) TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist); vp->v_listflag |= VLIST_DEAD; deadvnodes++; - } else if ((vp->v_flag & VAGE)) { + + if (dead_vnode_wanted) { + dead_vnode_wanted--; + need_dead_wakeup = TRUE; + } + + } else if ( (vp->v_flag & VAGE) ) { TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); vp->v_flag &= ~VAGE; freevnodes++; @@ -1611,6 +1790,9 @@ vnode_list_add(vnode_t vp) } } vnode_list_unlock(); + + if (need_dead_wakeup == TRUE) + wakeup_one((caddr_t)&dead_vnode_wanted); } @@ -1631,6 +1813,8 @@ vnode_list_remove_locked(vnode_t vp) VREMRAGE("vnode_list_remove", vp); else if (vp->v_listflag & VLIST_DEAD) VREMDEAD("vnode_list_remove", vp); + else if (vp->v_listflag & VLIST_ASYNC_WORK) + VREMASYNC_WORK("vnode_list_remove", vp); else VREMFREE("vnode_list_remove", vp); } @@ -1709,8 +1893,8 @@ vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked) panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag); if (fmode & FWRITE) { - if (--vp->v_writecount < 0) - panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag); + if (--vp->v_writecount < 0) + panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag); } if (fmode & O_EVTONLY) { if (--vp->v_kusecount < 0) @@ -1720,33 +1904,38 @@ vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked) panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag); if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) { - /* + /* * vnode is still busy... if we're the last * usecount, mark for a future call to VNOP_INACTIVE * when the iocount finally drops to 0 */ - if (vp->v_usecount == 0) { - vp->v_lflag |= VL_NEEDINACTIVE; + if (vp->v_usecount == 0) { + vp->v_lflag |= VL_NEEDINACTIVE; vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT); } goto done; } vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT); - if ( (vp->v_lflag & (VL_TERMINATE | VL_DEAD)) || dont_reenter) { - /* + if (ISSET(vp->v_lflag, VL_TERMINATE | VL_DEAD) || dont_reenter) { + /* * vnode is being cleaned, or * we've requested that we don't reenter - * the filesystem on this release... in - * this case, we'll mark the vnode aged - * if it's been marked for termination + * the filesystem on this release...in + * the latter case, we'll mark the vnode aged */ - if (dont_reenter) { - if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) ) - vp->v_lflag |= VL_NEEDINACTIVE; - vp->v_flag |= VAGE; + if (dont_reenter) { + if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) ) { + vp->v_lflag |= VL_NEEDINACTIVE; + + if (vnode_on_reliable_media(vp) == FALSE || vp->v_flag & VISDIRTY) { + vnode_async_list_add(vp); + goto done; + } + } + vp->v_flag |= VAGE; } - vnode_list_add(vp); + vnode_list_add(vp); goto done; } @@ -1814,9 +2003,6 @@ done: */ #if DIAGNOSTIC int busyprt = 0; /* print out busy vnodes */ -#if 0 -struct ctldebug debug1 = { "busyprt", &busyprt }; -#endif /* 0 */ #endif int @@ -1828,6 +2014,11 @@ vflush(struct mount *mp, struct vnode *skipvp, int flags) int retval; unsigned int vid; + /* + * See comments in vnode_iterate() for the rationale for this lock + */ + mount_iterate_lock(mp); + mount_lock(mp); vnode_iterate_setup(mp); /* @@ -1841,16 +2032,18 @@ vflush(struct mount *mp, struct vnode *skipvp, int flags) if (vnode_umount_preflight(mp, skipvp, flags)) { vnode_iterate_clear(mp); mount_unlock(mp); + mount_iterate_unlock(mp); return(EBUSY); } } loop: - /* it is returns 0 then there is nothing to do */ + /* If it returns 0 then there is nothing to do */ retval = vnode_iterate_prepare(mp); if (retval == 0) { vnode_iterate_clear(mp); mount_unlock(mp); + mount_iterate_unlock(mp); return(retval); } @@ -1869,7 +2062,13 @@ loop: vnode_lock_spin(vp); - if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) { + // If vnode is already terminating, wait for it... + while (vp->v_id == vid && ISSET(vp->v_lflag, VL_TERMINATE)) { + vp->v_lflag |= VL_TERMWANT; + msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vflush", NULL); + } + + if ((vp->v_id != vid) || ISSET(vp->v_lflag, VL_DEAD)) { vnode_unlock(vp); mount_lock(mp); continue; @@ -1945,6 +2144,7 @@ loop: #ifdef JOE_DEBUG record_vp(vp, 1); #endif + vnode_abort_advlocks(vp); vnode_reclaim_internal(vp, 1, 1, 0); vnode_dropiocount(vp); vnode_list_add(vp); @@ -1984,6 +2184,7 @@ loop: } vnode_iterate_clear(mp); mount_unlock(mp); + mount_iterate_unlock(mp); if (busy && ((flags & FORCECLOSE)==0)) return (EBUSY); @@ -2031,12 +2232,6 @@ vclean(vnode_t vp, int flags) vp->v_lflag |= VL_TERMINATE; - /* - * remove the vnode from any mount list - * it might be on... - */ - insmntque(vp, (struct mount *)0); - #if NAMEDSTREAMS is_namedstream = vnode_isnamedstream(vp); #endif @@ -2063,8 +2258,16 @@ vclean(vnode_t vp, int flags) else #endif { - VNOP_FSYNC(vp, MNT_WAIT, ctx); - buf_invalidateblks(vp, BUF_WRITE_DATA | BUF_INVALIDATE_LOCKED, 0, 0); + VNOP_FSYNC(vp, MNT_WAIT, ctx); + + /* + * If the vnode is still in use (by the journal for + * example) we don't want to invalidate locked buffers + * here. In that case, either the journal will tidy them + * up, or we will deal with it when the usecount is + * finally released in vnode_rele_internal. + */ + buf_invalidateblks(vp, BUF_WRITE_DATA | (active ? 0 : BUF_INVALIDATE_LOCKED), 0, 0); } if (UBCINFOEXISTS(vp)) /* @@ -2081,7 +2284,7 @@ vclean(vnode_t vp, int flags) /* Delete the shadow stream file before we reclaim its vnode */ if (vnode_isshadow(vp)) { - vnode_relenamedstream(pvp, vp, ctx); + vnode_relenamedstream(pvp, vp); } /* @@ -2102,6 +2305,8 @@ vclean(vnode_t vp, int flags) * Destroy ubc named reference * cluster_release is done on this path * along with dropping the reference on the ucred + * (and in the case of forced unmount of an mmap-ed file, + * the ubc reference on the vnode is dropped here too). */ ubc_destroy_named(vp); @@ -2124,12 +2329,21 @@ vclean(vnode_t vp, int flags) vnode_lock(vp); + /* + * Remove the vnode from any mount list it might be on. It is not + * safe to do this any earlier because unmount needs to wait for + * any vnodes to terminate and it cannot do that if it cannot find + * them. + */ + insmntque(vp, (struct mount *)0); + vp->v_mount = dead_mountp; vp->v_op = dead_vnodeop_p; vp->v_tag = VT_NON; vp->v_data = NULL; vp->v_lflag |= VL_DEAD; + vp->v_flag &= ~VISDIRTY; if (already_terminating == 0) { vp->v_lflag &= ~VL_TERMINATE; @@ -2186,15 +2400,25 @@ vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context) SPECHASH_LOCK(); break; } - vnode_reclaim_internal(vq, 0, 1, 0); - vnode_put(vq); + vnode_lock(vq); + if (!(vq->v_lflag & VL_TERMINATE)) { + vnode_reclaim_internal(vq, 1, 1, 0); + } + vnode_put_locked(vq); + vnode_unlock(vq); SPECHASH_LOCK(); break; } } SPECHASH_UNLOCK(); } - vnode_reclaim_internal(vp, 0, 0, REVOKEALL); + vnode_lock(vp); + if (vp->v_lflag & VL_TERMINATE) { + vnode_unlock(vp); + return (ENOENT); + } + vnode_reclaim_internal(vp, 1, 0, REVOKEALL); + vnode_unlock(vp); return (0); } @@ -2340,6 +2564,10 @@ vcount(vnode_t vp) int count; int vid; + if (!vnode_isspec(vp)) { + return (vp->v_usecount - vp->v_kusecount); + } + loop: if (!vnode_isaliased(vp)) return (vp->v_specinfo->si_opencount); @@ -2519,8 +2747,7 @@ set_package_extensions_table(user_addr_t data, int nentries, int maxwidth) } -__private_extern__ int -is_package_name(const char *name, int len) +int is_package_name(const char *name, int len) { int i, extlen; const char *ptr, *name_ext; @@ -2629,131 +2856,139 @@ int vn_searchfs_inappropriate_name(const char *name, int len) { */ extern unsigned int vfs_nummntops; +/* + * The VFS_NUMMNTOPS shouldn't be at name[1] since + * is a VFS generic variable. Since we no longer support + * VT_UFS, we reserve its value to support this sysctl node. + * + * It should have been: + * name[0]: VFS_GENERIC + * name[1]: VFS_NUMMNTOPS + */ +SYSCTL_INT(_vfs, VFS_NUMMNTOPS, nummntops, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vfs_nummntops, 0, ""); + +int +vfs_sysctl(int *name __unused, u_int namelen __unused, + user_addr_t oldp __unused, size_t *oldlenp __unused, + user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused); + int -vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, - user_addr_t newp, size_t newlen, proc_t p) +vfs_sysctl(int *name __unused, u_int namelen __unused, + user_addr_t oldp __unused, size_t *oldlenp __unused, + user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused) { - struct vfstable *vfsp; - int *username; - u_int usernamelen; - int error; - struct vfsconf vfsc; + return (EINVAL); +} - /* All non VFS_GENERIC and in VFS_GENERIC, - * VFS_MAXTYPENUM, VFS_CONF, VFS_SET_PACKAGE_EXTS - * needs to have root priv to have modifiers. - * For rest the userland_sysctl(CTLFLAG_ANYBODY) would cover. - */ - if ((newp != USER_ADDR_NULL) && ((name[0] != VFS_GENERIC) || - ((name[1] == VFS_MAXTYPENUM) || - (name[1] == VFS_CONF) || - (name[1] == VFS_SET_PACKAGE_EXTS))) - && (error = suser(kauth_cred_get(), &p->p_acflag))) { - return(error); - } - /* - * The VFS_NUMMNTOPS shouldn't be at name[0] since - * is a VFS generic variable. So now we must check - * namelen so we don't end up covering any UFS - * variables (sinc UFS vfc_typenum is 1). - * - * It should have been: - * name[0]: VFS_GENERIC - * name[1]: VFS_NUMMNTOPS - */ - if (namelen == 1 && name[0] == VFS_NUMMNTOPS) { - return (sysctl_rdint(oldp, oldlenp, newp, vfs_nummntops)); - } - /* all sysctl names at this level are at least name and field */ - if (namelen < 2) - return (EISDIR); /* overloaded */ - if (name[0] != VFS_GENERIC) { +// +// The following code disallows specific sysctl's that came through +// the direct sysctl interface (vfs_sysctl_node) instead of the newer +// sysctl_vfs_ctlbyfsid() interface. We can not allow these selectors +// through vfs_sysctl_node() because it passes the user's oldp pointer +// directly to the file system which (for these selectors) casts it +// back to a struct sysctl_req and then proceed to use SYSCTL_IN() +// which jumps through an arbitrary function pointer. When called +// through the sysctl_vfs_ctlbyfsid() interface this does not happen +// and so it's safe. +// +// Unfortunately we have to pull in definitions from AFP and SMB and +// perform explicit name checks on the file system to determine if +// these selectors are being used. +// - mount_list_lock(); - for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) - if (vfsp->vfc_typenum == name[0]) { - vfsp->vfc_refcount++; - break; - } - mount_list_unlock(); +#define AFPFS_VFS_CTL_GETID 0x00020001 +#define AFPFS_VFS_CTL_NETCHANGE 0x00020002 +#define AFPFS_VFS_CTL_VOLCHANGE 0x00020003 - if (vfsp == NULL) - return (ENOTSUP); +#define SMBFS_SYSCTL_REMOUNT 1 +#define SMBFS_SYSCTL_REMOUNT_INFO 2 +#define SMBFS_SYSCTL_GET_SERVER_SHARE 3 - /* XXX current context proxy for proc p? */ - error = ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, - oldp, oldlenp, newp, newlen, - vfs_context_current())); - mount_list_lock(); - vfsp->vfc_refcount--; - mount_list_unlock(); - return error; - } - switch (name[1]) { - case VFS_MAXTYPENUM: - return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf)); - case VFS_CONF: - if (namelen < 3) - return (ENOTDIR); /* overloaded */ +static int +is_bad_sysctl_name(struct vfstable *vfsp, int selector_name) +{ + switch(selector_name) { + case VFS_CTL_QUERY: + case VFS_CTL_TIMEO: + case VFS_CTL_NOLOCKS: + case VFS_CTL_NSTATUS: + case VFS_CTL_SADDR: + case VFS_CTL_DISC: + case VFS_CTL_SERVERINFO: + return 1; - mount_list_lock(); - for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) - if (vfsp->vfc_typenum == name[2]) - break; + default: + break; + } - if (vfsp == NULL) { - mount_list_unlock(); - return (ENOTSUP); + // the more complicated check for some of SMB's special values + if (strcmp(vfsp->vfc_name, "smbfs") == 0) { + switch(selector_name) { + case SMBFS_SYSCTL_REMOUNT: + case SMBFS_SYSCTL_REMOUNT_INFO: + case SMBFS_SYSCTL_GET_SERVER_SHARE: + return 1; + } + } else if (strcmp(vfsp->vfc_name, "afpfs") == 0) { + switch(selector_name) { + case AFPFS_VFS_CTL_GETID: + case AFPFS_VFS_CTL_NETCHANGE: + case AFPFS_VFS_CTL_VOLCHANGE: + return 1; } - - vfsc.vfc_reserved1 = 0; - bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name)); - vfsc.vfc_typenum = vfsp->vfc_typenum; - vfsc.vfc_refcount = vfsp->vfc_refcount; - vfsc.vfc_flags = vfsp->vfc_flags; - vfsc.vfc_reserved2 = 0; - vfsc.vfc_reserved3 = 0; - - mount_list_unlock(); - return (sysctl_rdstruct(oldp, oldlenp, newp, &vfsc, - sizeof(struct vfsconf))); - - case VFS_SET_PACKAGE_EXTS: - return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]); } - /* - * We need to get back into the general MIB, so we need to re-prepend - * CTL_VFS to our name and try userland_sysctl(). - */ - usernamelen = namelen + 1; - MALLOC(username, int *, usernamelen * sizeof(*username), - M_TEMP, M_WAITOK); - bcopy(name, username + 1, namelen * sizeof(*name)); - username[0] = CTL_VFS; - error = userland_sysctl(p, username, usernamelen, oldp, - oldlenp, newp, newlen, oldlenp); - FREE(username, M_TEMP); - return (error); + + // + // If we get here we passed all the checks so the selector is ok + // + return 0; } -/* - * Dump vnode list (via sysctl) - defunct - * use "pstat" instead - */ -/* ARGSUSED */ -int -sysctl_vnode -(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req) + +int vfs_sysctl_node SYSCTL_HANDLER_ARGS { - return(EINVAL); -} + int *name, namelen; + struct vfstable *vfsp; + int error; + int fstypenum; + + fstypenum = oidp->oid_number; + name = arg1; + namelen = arg2; + + /* all sysctl names at this level should have at least one name slot for the FS */ + if (namelen < 1) + return (EISDIR); /* overloaded */ + + mount_list_lock(); + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) + if (vfsp->vfc_typenum == fstypenum) { + vfsp->vfc_refcount++; + break; + } + mount_list_unlock(); + + if (vfsp == NULL) { + return (ENOTSUP); + } -SYSCTL_PROC(_kern, KERN_VNODE, vnode, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, - 0, 0, sysctl_vnode, "S,", ""); + if (is_bad_sysctl_name(vfsp, name[0])) { + printf("vfs: bad selector 0x%.8x for old-style sysctl(). use the sysctl-by-fsid interface instead\n", name[0]); + return EPERM; + } + + error = (vfsp->vfc_vfsops->vfs_sysctl)(name, namelen, req->oldptr, &req->oldlen, req->newptr, req->newlen, vfs_context_current()); + + mount_list_lock(); + vfsp->vfc_refcount--; + mount_list_unlock(); + return error; +} /* * Check to see if a filesystem is mounted on a block device. @@ -2785,41 +3020,68 @@ out: return (error); } +struct unmount_info { + int u_errs; // Total failed unmounts + int u_busy; // EBUSY failed unmounts +}; + +static int +unmount_callback(mount_t mp, void *arg) +{ + int error; + char *mntname; + struct unmount_info *uip = arg; + + mount_ref(mp, 0); + mount_iterdrop(mp); // avoid vfs_iterate deadlock in dounmount() + + MALLOC_ZONE(mntname, void *, MAXPATHLEN, M_NAMEI, M_WAITOK); + if (mntname) + strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN); + + error = dounmount(mp, MNT_FORCE, 1, vfs_context_current()); + if (error) { + uip->u_errs++; + printf("Unmount of %s failed (%d)\n", mntname ? mntname:"?", error); + if (error == EBUSY) + uip->u_busy++; + } + if (mntname) + FREE_ZONE(mntname, MAXPATHLEN, M_NAMEI); + + return (VFS_RETURNED); +} + /* * Unmount all filesystems. The list is traversed in reverse order * of mounting to avoid dependencies. + * Busy mounts are retried. */ __private_extern__ void vfs_unmountall(void) { - struct mount *mp; - int error; + int mounts, sec = 1; + struct unmount_info ui; - /* - * Since this only runs when rebooting, it is not interlocked. - */ - mount_list_lock(); - while(!TAILQ_EMPTY(&mountlist)) { - mp = TAILQ_LAST(&mountlist, mntlist); - mount_list_unlock(); - error = dounmount(mp, MNT_FORCE, 0, vfs_context_current()); - if ((error != 0) && (error != EBUSY)) { - printf("unmount of %s failed (", mp->mnt_vfsstat.f_mntonname); - printf("%d)\n", error); - mount_list_lock(); - TAILQ_REMOVE(&mountlist, mp, mnt_list); - continue; - } else if (error == EBUSY) { - /* If EBUSY is returned, the unmount was already in progress */ - printf("unmount of %p failed (", mp); - printf("BUSY)\n"); - } - mount_list_lock(); +retry: + ui.u_errs = ui.u_busy = 0; + vfs_iterate(VFS_ITERATE_CB_DROPREF | VFS_ITERATE_TAIL_FIRST, unmount_callback, &ui); + mounts = mount_getvfscnt(); + if (mounts == 0) + return; + + if (ui.u_busy > 0) { // Busy mounts - wait & retry + tsleep(&nummounts, PVFS, "busy mount", sec * hz); + sec *= 2; + if (sec <= 32) + goto retry; + printf("Unmounting timed out\n"); + } else if (ui.u_errs < mounts) { + // If the vfs_iterate missed mounts in progress - wait a bit + tsleep(&nummounts, PVFS, "missed mount", 2 * hz); } - mount_list_unlock(); } - /* * This routine is called from vnode_pager_deallocate out of the VM * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named @@ -2833,6 +3095,23 @@ vnode_pager_vrele(vnode_t vp) vnode_lock_spin(vp); vp->v_lflag &= ~VNAMED_UBC; + if (vp->v_usecount != 0) { + /* + * At the eleventh hour, just before the ubcinfo is + * destroyed, ensure the ubc-specific v_usecount + * reference has gone. We use v_usecount != 0 as a hint; + * ubc_unmap() does nothing if there's no mapping. + * + * This case is caused by coming here via forced unmount, + * versus the usual vm_object_deallocate() path. + * In the forced unmount case, ubc_destroy_named() + * releases the pager before memory_object_last_unmap() + * can be called. + */ + vnode_unlock(vp); + ubc_unmap(vp); + vnode_lock_spin(vp); + } uip = vp->v_ubcinfo; vp->v_ubcinfo = UBC_INFO_NULL; @@ -2847,6 +3126,11 @@ vnode_pager_vrele(vnode_t vp) u_int32_t rootunit = (u_int32_t)-1; +#if CONFIG_IOSCHED +extern int lowpri_throttle_enabled; +extern int iosched_enabled; +#endif + errno_t vfs_init_io_attributes(vnode_t devvp, mount_t mp) { @@ -2860,11 +3144,14 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) off_t readsegsize = 0; off_t writesegsize = 0; off_t alignment = 0; - off_t ioqueue_depth = 0; + u_int32_t minsaturationbytecount = 0; + u_int32_t ioqueue_depth = 0; u_int32_t blksize; u_int64_t temp; u_int32_t features; vfs_context_t ctx = vfs_context_current(); + dk_corestorage_info_t cs_info; + boolean_t cs_present = FALSE;; int isssd = 0; int isvirtual = 0; @@ -2908,8 +3195,8 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) * and if those advertised constraints result in a smaller * limit for a given I/O */ - mp->mnt_maxreadcnt = MAX_UPL_SIZE * PAGE_SIZE; - mp->mnt_maxwritecnt = MAX_UPL_SIZE * PAGE_SIZE; + mp->mnt_maxreadcnt = MAX_UPL_SIZE_BYTES; + mp->mnt_maxwritecnt = MAX_UPL_SIZE_BYTES; if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) { if (isvirtual) @@ -3037,8 +3324,37 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) if (features & DK_FEATURE_FORCE_UNIT_ACCESS) mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED; - if (features & DK_FEATURE_UNMAP) - mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED; + + if (VNOP_IOCTL(devvp, DKIOCGETIOMINSATURATIONBYTECOUNT, (caddr_t)&minsaturationbytecount, 0, ctx) == 0) { + mp->mnt_minsaturationbytecount = minsaturationbytecount; + } else { + mp->mnt_minsaturationbytecount = 0; + } + + if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, ctx) == 0) + cs_present = TRUE; + + if (features & DK_FEATURE_UNMAP) { + mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED; + + if (cs_present == TRUE) + mp->mnt_ioflags |= MNT_IOFLAGS_CSUNMAP_SUPPORTED; + } + if (cs_present == TRUE) { + /* + * for now we'll use the following test as a proxy for + * the underlying drive being FUSION in nature + */ + if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) + mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE; + } + +#if CONFIG_IOSCHED + if (iosched_enabled && (features & DK_FEATURE_PRIORITY)) { + mp->mnt_ioflags |= MNT_IOFLAGS_IOSCHED_SUPPORTED; + throttle_info_disable_throttle(mp->mnt_devbsdunit, (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) != 0); + } +#endif /* CONFIG_IOSCHED */ return (error); } @@ -3056,8 +3372,20 @@ vfs_event_init(void) } void -vfs_event_signal(__unused fsid_t *fsid, u_int32_t event, __unused intptr_t data) -{ +vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data) +{ + if (event == VQ_DEAD || event == VQ_NOTRESP) { + struct mount *mp = vfs_getvfs(fsid); + if (mp) { + mount_lock_spin(mp); + if (data) + mp->mnt_kern_flag &= ~MNT_LNOTRESP; // Now responding + else + mp->mnt_kern_flag |= MNT_LNOTRESP; // Not responding + mount_unlock(mp); + } + } + lck_mtx_lock(fs_klist_lock); KNOTE(&fs_klist, event); lck_mtx_unlock(fs_klist_lock); @@ -3295,10 +3623,12 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, sfs.f_ffree = (user64_long_t)sp->f_ffree; sfs.f_fsid = sp->f_fsid; sfs.f_owner = sp->f_owner; - +#ifdef NFSCLIENT if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) { strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSTYPENAMELEN); - } else { + } else +#endif + { strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN); } strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN); @@ -3353,10 +3683,13 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, sfs.f_ffree = (user32_long_t)sp->f_ffree; sfs.f_fsid = sp->f_fsid; sfs.f_owner = sp->f_owner; - + +#ifdef NFS_CLIENT if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) { strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSTYPENAMELEN); - } else { + } else +#endif + { strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN); } strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN); @@ -3378,20 +3711,27 @@ out: static int filt_fsattach(struct knote *kn); static void filt_fsdetach(struct knote *kn); static int filt_fsevent(struct knote *kn, long hint); +static int filt_fstouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_fsprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); struct filterops fs_filtops = { .f_attach = filt_fsattach, .f_detach = filt_fsdetach, .f_event = filt_fsevent, + .f_touch = filt_fstouch, + .f_process = filt_fsprocess, }; static int filt_fsattach(struct knote *kn) { - lck_mtx_lock(fs_klist_lock); - kn->kn_flags |= EV_CLEAR; KNOTE_ATTACH(&fs_klist, kn); lck_mtx_unlock(fs_klist_lock); + + /* + * filter only sees future events, + * so it can't be fired already. + */ return (0); } @@ -3418,6 +3758,52 @@ filt_fsevent(struct knote *kn, long hint) return (kn->kn_fflags != 0); } +static int +filt_fstouch(struct knote *kn, struct kevent_internal_s *kev) +{ + int res; + + lck_mtx_lock(fs_klist_lock); + + kn->kn_sfflags = kev->fflags; + if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) + kn->kn_udata = kev->udata; + + /* + * the above filter function sets bits even if nobody is looking for them. + * Just preserve those bits even in the new mask is more selective + * than before. + * + * For compatibility with previous implementations, we leave kn_fflags + * as they were before. + */ + //if (kn->kn_sfflags) + // kn->kn_fflags &= kn->kn_sfflags; + res = (kn->kn_fflags != 0); + + lck_mtx_unlock(fs_klist_lock); + + return res; +} + +static int +filt_fsprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev) +{ +#pragma unused(data) + int res; + + lck_mtx_lock(fs_klist_lock); + res = (kn->kn_fflags != 0); + if (res) { + *kev = kn->kn_kevent; + kn->kn_flags |= EV_CLEAR; /* automatic */ + kn->kn_fflags = 0; + kn->kn_data = 0; + } + lck_mtx_unlock(fs_klist_lock); + return res; +} + static int sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) @@ -3464,42 +3850,256 @@ sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp, return (0); } +static int +sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS +{ + int *name, namelen; + struct vfstable *vfsp; + struct vfsconf vfsc; + + (void)oidp; + name = arg1; + namelen = arg2; + + if (namelen < 1) { + return (EISDIR); + } else if (namelen > 1) { + return (ENOTDIR); + } + + mount_list_lock(); + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) + if (vfsp->vfc_typenum == name[0]) + break; + + if (vfsp == NULL) { + mount_list_unlock(); + return (ENOTSUP); + } + + vfsc.vfc_reserved1 = 0; + bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name)); + vfsc.vfc_typenum = vfsp->vfc_typenum; + vfsc.vfc_refcount = vfsp->vfc_refcount; + vfsc.vfc_flags = vfsp->vfc_flags; + vfsc.vfc_reserved2 = 0; + vfsc.vfc_reserved3 = 0; + + mount_list_unlock(); + return (SYSCTL_OUT(req, &vfsc, sizeof(struct vfsconf))); +} + /* the vfs.generic. branch. */ SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs generic hinge"); /* retreive a list of mounted filesystem fsid_t */ -SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, CTLFLAG_RD | CTLFLAG_LOCKED, +SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids"); /* perform operations on filesystem via fsid_t */ SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW | CTLFLAG_LOCKED, sysctl_vfs_ctlbyfsid, "ctlbyfsid"); SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW | CTLFLAG_ANYBODY, NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang"); - - +SYSCTL_INT(_vfs_generic, VFS_MAXTYPENUM, maxtypenum, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + &maxvfstypenum, 0, ""); +SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout, 0, ""); +SYSCTL_NODE(_vfs_generic, VFS_CONF, conf, + CTLFLAG_RD | CTLFLAG_LOCKED, + sysctl_vfs_generic_conf, ""); + +/* + * Print vnode state. + */ +void +vn_print_state(struct vnode *vp, const char *fmt, ...) +{ + va_list ap; + char perm_str[] = "(VM_KERNEL_ADDRPERM pointer)"; + char fs_name[MFSNAMELEN]; + + va_start(ap, fmt); + vprintf(fmt, ap); + va_end(ap); + printf("vp 0x%0llx %s: ", (uint64_t)VM_KERNEL_ADDRPERM(vp), perm_str); + printf("tag %d, type %d\n", vp->v_tag, vp->v_type); + /* Counts .. */ + printf(" iocount %d, usecount %d, kusecount %d references %d\n", + vp->v_iocount, vp->v_usecount, vp->v_kusecount, vp->v_references); + printf(" writecount %d, numoutput %d\n", vp->v_writecount, + vp->v_numoutput); + /* Flags */ + printf(" flag 0x%x, lflag 0x%x, listflag 0x%x\n", vp->v_flag, + vp->v_lflag, vp->v_listflag); + + if (vp->v_mount == NULL || vp->v_mount == dead_mountp) { + strlcpy(fs_name, "deadfs", MFSNAMELEN); + } else { + vfs_name(vp->v_mount, fs_name); + } + + printf(" v_data 0x%0llx %s\n", + (vp->v_data ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_data) : 0), + perm_str); + printf(" v_mount 0x%0llx %s vfs_name %s\n", + (vp->v_mount ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_mount) : 0), + perm_str, fs_name); +} + long num_reusedvnodes = 0; + +static vnode_t +process_vp(vnode_t vp, int want_vp, int *deferred) +{ + unsigned int vpid; + + *deferred = 0; + + vpid = vp->v_id; + + vnode_list_remove_locked(vp); + + vnode_list_unlock(); + + vnode_lock_spin(vp); + + /* + * We could wait for the vnode_lock after removing the vp from the freelist + * and the vid is bumped only at the very end of reclaim. So it is possible + * that we are looking at a vnode that is being terminated. If so skip it. + */ + if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || + VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) { + /* + * we lost the race between dropping the list lock + * and picking up the vnode_lock... someone else + * used this vnode and it is now in a new state + */ + vnode_unlock(vp); + + return (NULLVP); + } + if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) { + /* + * we did a vnode_rele_ext that asked for + * us not to reenter the filesystem during + * the release even though VL_NEEDINACTIVE was + * set... we'll do it here by doing a + * vnode_get/vnode_put + * + * pick up an iocount so that we can call + * vnode_put and drive the VNOP_INACTIVE... + * vnode_put will either leave us off + * the freelist if a new ref comes in, + * or put us back on the end of the freelist + * or recycle us if we were marked for termination... + * so we'll just go grab a new candidate + */ + vp->v_iocount++; +#ifdef JOE_DEBUG + record_vp(vp, 1); +#endif + vnode_put_locked(vp); + vnode_unlock(vp); + + return (NULLVP); + } + /* + * Checks for anyone racing us for recycle + */ + if (vp->v_type != VBAD) { + if (want_vp && (vnode_on_reliable_media(vp) == FALSE || (vp->v_flag & VISDIRTY))) { + vnode_async_list_add(vp); + vnode_unlock(vp); + + *deferred = 1; + + return (NULLVP); + } + if (vp->v_lflag & VL_DEAD) + panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp); + + vnode_lock_convert(vp); + (void)vnode_reclaim_internal(vp, 1, want_vp, 0); + + if (want_vp) { + if ((VONLIST(vp))) + panic("new_vnode(%p): vp on list", vp); + if (vp->v_usecount || vp->v_iocount || vp->v_kusecount || + (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) + panic("new_vnode(%p): free vnode still referenced", vp); + if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) + panic("new_vnode(%p): vnode seems to be on mount list", vp); + if ( !LIST_EMPTY(&vp->v_nclinks) || !TAILQ_EMPTY(&vp->v_ncchildren)) + panic("new_vnode(%p): vnode still hooked into the name cache", vp); + } else { + vnode_unlock(vp); + vp = NULLVP; + } + } + return (vp); +} + +__attribute__((noreturn)) +static void +async_work_continue(void) +{ + struct async_work_lst *q; + int deferred; + vnode_t vp; + + q = &vnode_async_work_list; + + for (;;) { + + vnode_list_lock(); + + if ( TAILQ_EMPTY(q) ) { + assert_wait(q, (THREAD_UNINT)); + + vnode_list_unlock(); + + thread_block((thread_continue_t)async_work_continue); + + continue; + } + async_work_handled++; + + vp = TAILQ_FIRST(q); + + vp = process_vp(vp, 0, &deferred); + + if (vp != NULLVP) + panic("found VBAD vp (%p) on async queue", vp); + } +} + + static int new_vnode(vnode_t *vpp) { vnode_t vp; - int retries = 0; /* retry incase of tablefull */ + uint32_t retries = 0, max_retries = 100; /* retry incase of tablefull */ int force_alloc = 0, walk_count = 0; - unsigned int vpid; - struct timespec ts; + boolean_t need_reliable_vp = FALSE; + int deferred; + struct timeval initial_tv; struct timeval current_tv; -#ifndef __LP64__ - struct unsafe_fsnode *l_unsafefs = 0; -#endif /* __LP64__ */ proc_t curproc = current_proc(); + initial_tv.tv_sec = 0; retry: - microuptime(¤t_tv); - vp = NULLVP; vnode_list_lock(); + if (need_reliable_vp == TRUE) + async_work_timed_out++; + if ((numvnodes - deadvnodes) < desiredvnodes || force_alloc) { + struct timespec ts; + if ( !TAILQ_EMPTY(&vnode_dead_list)) { /* * Can always reuse a dead one @@ -3519,6 +4119,8 @@ retry: VLISTNONE(vp); /* avoid double queue removal */ lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr); + TAILQ_INIT(&vp->v_ncchildren); + klist_init(&vp->v_knotes); nanouptime(&ts); vp->v_id = ts.tv_nsec; @@ -3532,6 +4134,7 @@ retry: vp->v_iocount = 1; goto done; } + microuptime(¤t_tv); #define MAX_WALK_COUNT 1000 @@ -3540,10 +4143,10 @@ retry: (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) { TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) { - if ( !(vp->v_listflag & VLIST_RAGE)) - panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp); + if ( !(vp->v_listflag & VLIST_RAGE)) + panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp); - // if we're a dependency-capable process, skip vnodes that can + // if we're a dependency-capable process, skip vnodes that can // cause recycling deadlocks. (i.e. this process is diskimages // helper and the vnode is in a disk image). Querying the // mnt_kern_flag for the mount's virtual device status @@ -3551,19 +4154,27 @@ retry: // may not be updated if there are multiple devnode layers // in between the disk image and the final consumer. - if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || - (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) { - break; - } + if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || + (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) { + /* + * if need_reliable_vp == TRUE, then we've already sent one or more + * non-reliable vnodes to the async thread for processing and timed + * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT + * mechanism to first scan for a reliable vnode before forcing + * a new vnode to be created + */ + if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) + break; + } + + // don't iterate more than MAX_WALK_COUNT vnodes to + // avoid keeping the vnode list lock held for too long. - // don't iterate more than MAX_WALK_COUNT vnodes to - // avoid keeping the vnode list lock held for too long. - if (walk_count++ > MAX_WALK_COUNT) { + if (walk_count++ > MAX_WALK_COUNT) { vp = NULL; - break; - } + break; + } } - } if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) { @@ -3581,19 +4192,27 @@ retry: // may not be updated if there are multiple devnode layers // in between the disk image and the final consumer. - if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || - (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) { - break; - } + if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || + (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) { + /* + * if need_reliable_vp == TRUE, then we've already sent one or more + * non-reliable vnodes to the async thread for processing and timed + * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT + * mechanism to first scan for a reliable vnode before forcing + * a new vnode to be created + */ + if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) + break; + } - // don't iterate more than MAX_WALK_COUNT vnodes to - // avoid keeping the vnode list lock held for too long. - if (walk_count++ > MAX_WALK_COUNT) { - vp = NULL; - break; - } - } + // don't iterate more than MAX_WALK_COUNT vnodes to + // avoid keeping the vnode list lock held for too long. + if (walk_count++ > MAX_WALK_COUNT) { + vp = NULL; + break; + } + } } // @@ -3606,9 +4225,9 @@ retry: // the allocation. // if (vp == NULL && walk_count >= MAX_WALK_COUNT) { - force_alloc = 1; - vnode_list_unlock(); - goto retry; + force_alloc = 1; + vnode_list_unlock(); + goto retry; } if (vp == NULL) { @@ -3616,9 +4235,9 @@ retry: * we've reached the system imposed maximum number of vnodes * but there isn't a single one available * wait a bit and then retry... if we can't get a vnode - * after 100 retries, than log a complaint + * after our target number of retries, than log a complaint */ - if (++retries <= 100) { + if (++retries <= max_retries) { vnode_list_unlock(); delay_for_interval(1, 1000 * 1000); goto retry; @@ -3627,14 +4246,20 @@ retry: vnode_list_unlock(); tablefull("vnode"); log(LOG_EMERG, "%d desired, %d numvnodes, " - "%d free, %d dead, %d rage\n", - desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes); -#if CONFIG_EMBEDDED + "%d free, %d dead, %d async, %d rage\n", + desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes); +#if CONFIG_JETSAM + +#if DEVELOPMENT || DEBUG + if (bootarg_no_vnode_jetsam) + panic("vnode table is full\n"); +#endif /* DEVELOPMENT || DEBUG */ + /* * Running out of vnodes tends to make a system unusable. Start killing * processes that jetsam knows are killable. */ - if (jetsam_kill_top_proc(TRUE, kJetsamFlagsKilledVnodes) < 0) { + if (memorystatus_kill_on_vnode_limit() == FALSE) { /* * If jetsam can't find any more processes to kill and there * still aren't any free vnodes, panic. Hopefully we'll get a @@ -3643,88 +4268,73 @@ retry: panic("vnode table is full\n"); } - delay_for_interval(1, 1000 * 1000); - goto retry; -#endif - - *vpp = NULL; - return (ENFILE); - } -steal_this_vp: - vpid = vp->v_id; - - vnode_list_remove_locked(vp); - - vnode_list_unlock(); - - vnode_lock_spin(vp); - - /* - * We could wait for the vnode_lock after removing the vp from the freelist - * and the vid is bumped only at the very end of reclaim. So it is possible - * that we are looking at a vnode that is being terminated. If so skip it. - */ - if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || - VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) { - /* - * we lost the race between dropping the list lock - * and picking up the vnode_lock... someone else - * used this vnode and it is now in a new state - * so we need to go back and try again + /* + * Now that we've killed someone, wait a bit and continue looking + * (with fewer retries before trying another kill). */ - vnode_unlock(vp); + delay_for_interval(3, 1000 * 1000); + retries = 0; + max_retries = 10; goto retry; - } - if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) { - /* - * we did a vnode_rele_ext that asked for - * us not to reenter the filesystem during - * the release even though VL_NEEDINACTIVE was - * set... we'll do it here by doing a - * vnode_get/vnode_put - * - * pick up an iocount so that we can call - * vnode_put and drive the VNOP_INACTIVE... - * vnode_put will either leave us off - * the freelist if a new ref comes in, - * or put us back on the end of the freelist - * or recycle us if we were marked for termination... - * so we'll just go grab a new candidate - */ - vp->v_iocount++; -#ifdef JOE_DEBUG - record_vp(vp, 1); #endif - vnode_put_locked(vp); - vnode_unlock(vp); - goto retry; + + *vpp = NULL; + return (ENFILE); } - OSAddAtomicLong(1, &num_reusedvnodes); +steal_this_vp: + if ((vp = process_vp(vp, 1, &deferred)) == NULLVP) { + if (deferred) { + int elapsed_msecs; + struct timeval elapsed_tv; - /* Checks for anyone racing us for recycle */ - if (vp->v_type != VBAD) { - if (vp->v_lflag & VL_DEAD) - panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp); - vnode_lock_convert(vp); - (void)vnode_reclaim_internal(vp, 1, 1, 0); + if (initial_tv.tv_sec == 0) + microuptime(&initial_tv); - if ((VONLIST(vp))) - panic("new_vnode(%p): vp on list", vp); - if (vp->v_usecount || vp->v_iocount || vp->v_kusecount || - (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) - panic("new_vnode(%p): free vnode still referenced", vp); - if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) - panic("new_vnode(%p): vnode seems to be on mount list", vp); - if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren)) - panic("new_vnode(%p): vnode still hooked into the name cache", vp); - } + vnode_list_lock(); + + dead_vnode_waited++; + dead_vnode_wanted++; + + /* + * note that we're only going to explicitly wait 10ms + * for a dead vnode to become available, since even if one + * isn't available, a reliable vnode might now be available + * at the head of the VRAGE or free lists... if so, we + * can satisfy the new_vnode request with less latency then waiting + * for the full 100ms duration we're ultimately willing to tolerate + */ + assert_wait_timeout((caddr_t)&dead_vnode_wanted, (THREAD_INTERRUPTIBLE), 10000, NSEC_PER_USEC); + + vnode_list_unlock(); + + thread_block(THREAD_CONTINUE_NULL); -#ifndef __LP64__ - if (vp->v_unsafefs) { - l_unsafefs = vp->v_unsafefs; - vp->v_unsafefs = (struct unsafe_fsnode *)NULL; + microuptime(&elapsed_tv); + + timevalsub(&elapsed_tv, &initial_tv); + elapsed_msecs = elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000; + + if (elapsed_msecs >= 100) { + /* + * we've waited long enough... 100ms is + * somewhat arbitrary for this case, but the + * normal worst case latency used for UI + * interaction is 100ms, so I've chosen to + * go with that. + * + * setting need_reliable_vp to TRUE + * forces us to find a reliable vnode + * that we can process synchronously, or + * to create a new one if the scan for + * a reliable one hits the scan limit + */ + need_reliable_vp = TRUE; + } + } + goto retry; } -#endif /* __LP64__ */ + OSAddAtomicLong(1, &num_reusedvnodes); + #if CONFIG_MACF /* @@ -3755,13 +4365,6 @@ steal_this_vp: vnode_unlock(vp); -#ifndef __LP64__ - if (l_unsafefs) { - lck_mtx_destroy(&l_unsafefs->fsnodelock, vnode_lck_grp); - FREE_ZONE((void *)l_unsafefs, sizeof(struct unsafe_fsnode), M_UNSAFEFS); - } -#endif /* __LP64__ */ - done: *vpp = vp; @@ -3864,6 +4467,17 @@ vnode_put(vnode_t vp) return(retval); } +static inline void +vn_set_dead(vnode_t vp) +{ + vp->v_mount = NULL; + vp->v_op = dead_vnodeop_p; + vp->v_tag = VT_NON; + vp->v_data = NULL; + vp->v_type = VBAD; + vp->v_lflag |= VL_DEAD; +} + int vnode_put_locked(vnode_t vp) { @@ -3918,6 +4532,15 @@ vnode_isinuse(vnode_t vp, int refcnt) return(vnode_isinuse_locked(vp, refcnt, 0)); } +int vnode_usecount(vnode_t vp) +{ + return vp->v_usecount; +} + +int vnode_iocount(vnode_t vp) +{ + return vp->v_iocount; +} static int vnode_isinuse_locked(vnode_t vp, int refcnt, int locked) @@ -3986,6 +4609,18 @@ vnode_suspend(vnode_t vp) return(0); } +/* + * Release any blocked locking requests on the vnode. + * Used for forced-unmounts. + * + * XXX What about network filesystems? + */ +static void +vnode_abort_advlocks(vnode_t vp) +{ + if (vp->v_flag & VLOCKLOCAL) + lf_abort_advlocks(vp); +} static errno_t @@ -4016,8 +4651,10 @@ vnode_drain(vnode_t vp) * this allows us to keep actively referenced vnodes in the list without having * to constantly remove and add to the list each time a vnode w/o a usecount is * referenced which costs us taking and dropping a global lock twice. + * However, if the vnode is marked DIRTY, we want to pull it out much earlier */ -#define UNAGE_THRESHHOLD 25 +#define UNAGE_THRESHHOLD 25 +#define UNAGE_DIRTYTHRESHHOLD 6 errno_t vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) @@ -4026,8 +4663,11 @@ vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) int nosusp = vflags & VNODE_NOSUSPEND; int always = vflags & VNODE_ALWAYS; int beatdrain = vflags & VNODE_DRAINO; + int withvid = vflags & VNODE_WITHID; for (;;) { + int sleepflg = 0; + /* * if it is a dead vnode with deadfs */ @@ -4059,29 +4699,56 @@ vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) break; /* - * In some situations, we want to get an iocount - * even if the vnode is draining to prevent deadlock, - * e.g. if we're in the filesystem, potentially holding - * resources that could prevent other iocounts from - * being released. + * If this vnode is getting drained, there are some cases where + * we can't block or, in case of tty vnodes, want to be + * interruptible. */ - if (beatdrain && (vp->v_lflag & VL_DRAIN)) { - break; + if (vp->v_lflag & VL_DRAIN) { + /* + * In some situations, we want to get an iocount + * even if the vnode is draining to prevent deadlock, + * e.g. if we're in the filesystem, potentially holding + * resources that could prevent other iocounts from + * being released. + */ + if (beatdrain) + break; + /* + * Don't block if the vnode's mount point is unmounting as + * we may be the thread the unmount is itself waiting on + * Only callers who pass in vids (at this point, we've already + * handled nosusp and nodead) are expecting error returns + * from this function, so only we can only return errors for + * those. ENODEV is intended to inform callers that the call + * failed because an unmount is in progress. + */ + if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount)) + return (ENODEV); + + if (vnode_istty(vp)) { + sleepflg = PCATCH; + } } vnode_lock_convert(vp); if (vp->v_lflag & VL_TERMINATE) { + int error; + vp->v_lflag |= VL_TERMWANT; - msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vnode getiocount", NULL); + error = msleep(&vp->v_lflag, &vp->v_lock, + (PVFS | sleepflg), "vnode getiocount", NULL); + if (error) + return (error); } else msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL); } - if (((vflags & VNODE_WITHID) != 0) && vid != vp->v_id) { + if (withvid && vid != vp->v_id) { return(ENOENT); } - if (++vp->v_references >= UNAGE_THRESHHOLD) { + if (++vp->v_references >= UNAGE_THRESHHOLD || + (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD)) { vp->v_references = 0; vnode_list_remove(vp); } @@ -4204,16 +4871,13 @@ vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags) vnode_unlock(vp); } -/* USAGE: - * The following api creates a vnode and associates all the parameter specified in vnode_fsparam - * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias - * is obsoleted by this. - */ -int -vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) +static int +vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, + int init_vnode) { int error; int insert = 1; + int existing_vnode; vnode_t vp; vnode_t nvp; vnode_t dvp; @@ -4223,29 +4887,68 @@ vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) #if CONFIG_TRIGGERS struct vnode_trigger_param *tinfo = NULL; #endif - if (param == NULL) - return (EINVAL); + if (*vpp) { + vp = *vpp; + *vpp = NULLVP; + existing_vnode = 1; + } else { + existing_vnode = 0; + } -#if CONFIG_TRIGGERS - if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) { - tinfo = (struct vnode_trigger_param *)data; + if (init_vnode) { + /* Do quick sanity check on the parameters. */ + if ((param == NULL) || (param->vnfs_vtype == VBAD)) { + error = EINVAL; + goto error_out; + } - /* Validate trigger vnode input */ - if ((param->vnfs_vtype != VDIR) || - (tinfo->vnt_resolve_func == NULL) || - (tinfo->vnt_flags & ~VNT_VALID_MASK)) { - return (EINVAL); +#if CONFIG_TRIGGERS + if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) { + tinfo = (struct vnode_trigger_param *)data; + + /* Validate trigger vnode input */ + if ((param->vnfs_vtype != VDIR) || + (tinfo->vnt_resolve_func == NULL) || + (tinfo->vnt_flags & ~VNT_VALID_MASK)) { + error = EINVAL; + goto error_out; + } + /* Fall through a normal create (params will be the same) */ + flavor = VNCREATE_FLAVOR; + size = VCREATESIZE; } - /* Fall through a normal create (params will be the same) */ - flavor = VNCREATE_FLAVOR; - size = VCREATESIZE; - } #endif - if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE)) - return (EINVAL); + if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE)) { + error = EINVAL; + goto error_out; + } + } - if ( (error = new_vnode(&vp)) ) - return(error); + if (!existing_vnode) { + if ((error = new_vnode(&vp)) ) { + return (error); + } + if (!init_vnode) { + /* Make it so that it can be released by a vnode_put) */ + vn_set_dead(vp); + *vpp = vp; + return (0); + } + } else { + /* + * A vnode obtained by vnode_create_empty has been passed to + * vnode_initialize - Unset VL_DEAD set by vn_set_dead. After + * this point, it is set back on any error. + * + * N.B. vnode locking - We make the same assumptions as the + * "unsplit" vnode_create did - i.e. it is safe to update the + * vnode's fields without the vnode lock. This vnode has been + * out and about with the filesystem and hopefully nothing + * was done to the vnode between the vnode_create_empty and + * now when it has come in through vnode_initialize. + */ + vp->v_lflag &= ~VL_DEAD; + } dvp = param->vnfs_dvp; cnp = param->vnfs_cnp; @@ -4264,16 +4967,13 @@ vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) #ifdef JOE_DEBUG record_vp(vp, 1); #endif - vp->v_mount = NULL; - vp->v_op = dead_vnodeop_p; - vp->v_tag = VT_NON; - vp->v_data = NULL; - vp->v_type = VBAD; - vp->v_lflag |= VL_DEAD; + vn_set_dead(vp); vnode_put(vp); return(error); } + if (param->vnfs_mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED) + memory_object_mark_io_tracking(vp->v_ubcinfo->ui_control); } #ifdef JOE_DEBUG record_vp(vp, 1); @@ -4295,12 +4995,7 @@ vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) error = vnode_resolver_create(param->vnfs_mp, vp, tinfo, FALSE); if (error) { printf("vnode_create: vnode_resolver_create() err %d\n", error); - vp->v_mount = NULL; - vp->v_op = dead_vnodeop_p; - vp->v_tag = VT_NON; - vp->v_data = NULL; - vp->v_type = VBAD; - vp->v_lflag |= VL_DEAD; + vn_set_dead(vp); #ifdef JOE_DEBUG record_vp(vp, 1); #endif @@ -4343,6 +5038,13 @@ vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) insert = 0; vnode_unlock(vp); } + + if (VCHR == vp->v_type) { + u_int maj = major(vp->v_rdev); + + if (maj < (u_int)nchrdev && cdevsw[maj].d_type == D_TTY) + vp->v_flag |= VISTTY; + } } if (vp->v_type == VFIFO) { @@ -4376,15 +5078,6 @@ vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) */ insmntque(vp, param->vnfs_mp); } -#ifndef __LP64__ - if ((param->vnfs_mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE) == 0) { - MALLOC_ZONE(vp->v_unsafefs, struct unsafe_fsnode *, - sizeof(struct unsafe_fsnode), M_UNSAFEFS, M_WAITOK); - vp->v_unsafefs->fsnode_count = 0; - vp->v_unsafefs->fsnodeowner = (void *)NULL; - lck_mtx_init(&vp->v_unsafefs->fsnodelock, vnode_lck_grp, vnode_lck_attr); - } -#endif /* __LP64__ */ } if (dvp && vnode_ref(dvp) == 0) { vp->v_parent = dvp; @@ -4423,7 +5116,104 @@ vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) */ vp->v_flag |= VRAGE; } + +#if CONFIG_SECLUDED_MEMORY + switch (secluded_for_filecache) { + case 0: + /* + * secluded_for_filecache == 0: + * + no file contents in secluded pool + */ + break; + case 1: + /* + * secluded_for_filecache == 1: + * + no files from / + * + files from /Applications/ are OK + * + files from /Applications/Camera are not OK + * + no files that are open for write + */ + if (vnode_vtype(vp) == VREG && + vnode_mount(vp) != NULL && + (! (vfs_flags(vnode_mount(vp)) & MNT_ROOTFS))) { + /* not from root filesystem: eligible for secluded pages */ + memory_object_mark_eligible_for_secluded( + ubc_getobject(vp, UBC_FLAGS_NONE), + TRUE); + } + break; + case 2: + /* + * secluded_for_filecache == 2: + * + all read-only files OK, except: + * + dyld_shared_cache_arm64* + * + Camera + * + mediaserverd + */ + if (vnode_vtype(vp) == VREG) { + memory_object_mark_eligible_for_secluded( + ubc_getobject(vp, UBC_FLAGS_NONE), + TRUE); + } + break; + default: + break; + } +#endif /* CONFIG_SECLUDED_MEMORY */ + return (0); + +error_out: + if (existing_vnode) { + vnode_put(vp); + } + return (error); +} + +/* USAGE: + * The following api creates a vnode and associates all the parameter specified in vnode_fsparam + * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias + * is obsoleted by this. + */ +int +vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) +{ + *vpp = NULLVP; + return (vnode_create_internal(flavor, size, data, vpp, 1)); +} + +int +vnode_create_empty(vnode_t *vpp) +{ + *vpp = NULLVP; + return (vnode_create_internal(VNCREATE_FLAVOR, VCREATESIZE, NULL, + vpp, 0)); +} + +int +vnode_initialize(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) +{ + if (*vpp == NULLVP) { + panic("NULL vnode passed to vnode_initialize"); + } +#if DEVELOPMENT || DEBUG + /* + * We lock to check that vnode is fit for unlocked use in + * vnode_create_internal. + */ + vnode_lock_spin(*vpp); + VNASSERT(((*vpp)->v_iocount == 1), *vpp, + ("vnode_initialize : iocount not 1, is %d", (*vpp)->v_iocount)); + VNASSERT(((*vpp)->v_usecount == 0), *vpp, + ("vnode_initialize : usecount not 0, is %d", (*vpp)->v_usecount)); + VNASSERT(((*vpp)->v_lflag & VL_DEAD), *vpp, + ("vnode_initialize : v_lflag does not have VL_DEAD, is 0x%x", + (*vpp)->v_lflag)); + VNASSERT(((*vpp)->v_data == NULL), *vpp, + ("vnode_initialize : v_data not NULL")); + vnode_unlock(*vpp); +#endif + return (vnode_create_internal(flavor, size, data, vpp, 1)); } int @@ -4461,6 +5251,7 @@ vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg) int count, actualcount, i; void * allocmem; int indx_start, indx_stop, indx_incr; + int cb_dropref = (flags & VFS_ITERATE_CB_DROPREF); count = mount_getvfscnt(); count += 10; @@ -4503,7 +5294,12 @@ vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg) /* iterate over all the vnodes */ ret = callout(mp, arg); - mount_iterdrop(mp); + /* + * Drop the iterref here if the callback didn't do it. + * Note: If cb_dropref is set the mp may no longer exist. + */ + if (!cb_dropref) + mount_iterdrop(mp); switch (ret) { case VFS_RETURNED: @@ -4555,6 +5351,11 @@ vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype) VFSATTR_WANTED(&va, f_ffree); VFSATTR_WANTED(&va, f_bsize); VFSATTR_WANTED(&va, f_fssubtype); + + if ((error = vfs_getattr(mp, &va, ctx)) != 0) { + KAUTH_DEBUG("STAT - filesystem returned error %d", error); + return(error); + } #if CONFIG_MACF if (eventtype == VFS_USER_EVENT) { error = mac_mount_check_getattr(ctx, mp, &va); @@ -4562,12 +5363,6 @@ vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype) return (error); } #endif - - if ((error = vfs_getattr(mp, &va, ctx)) != 0) { - KAUTH_DEBUG("STAT - filesystem returned error %d", error); - return(error); - } - /* * Unpack into the per-mount structure. * @@ -4711,8 +5506,8 @@ vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx) int error; u_int32_t ndflags = 0; - if (ctx == NULL) { /* XXX technically an error */ - ctx = vfs_context_current(); + if (ctx == NULL) { + return EINVAL; } if (flags & VNODE_LOOKUP_NOFOLLOW) @@ -4722,8 +5517,9 @@ vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx) if (flags & VNODE_LOOKUP_NOCROSSMOUNT) ndflags |= NOCROSSMOUNT; - if (flags & VNODE_LOOKUP_DOWHITEOUT) - ndflags |= DOWHITEOUT; + + if (flags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) + ndflags |= CN_NBMOUNTLOOK; /* XXX AUDITVNPATH1 needed ? */ NDINIT(&nd, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE, @@ -4759,9 +5555,10 @@ vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_ if (lflags & VNODE_LOOKUP_NOCROSSMOUNT) ndflags |= NOCROSSMOUNT; - if (lflags & VNODE_LOOKUP_DOWHITEOUT) - ndflags |= DOWHITEOUT; + if (lflags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) + ndflags |= CN_NBMOUNTLOOK; + /* XXX AUDITVNPATH1 needed ? */ NDINIT(&nd, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); @@ -4788,6 +5585,34 @@ vnode_close(vnode_t vp, int flags, vfs_context_t ctx) return (error); } +errno_t +vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx) +{ + struct vnode_attr va; + int error; + + VATTR_INIT(&va); + VATTR_WANTED(&va, va_modify_time); + error = vnode_getattr(vp, &va, ctx); + if (!error) + *mtime = va.va_modify_time; + return error; +} + +errno_t +vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx) +{ + struct vnode_attr va; + int error; + + VATTR_INIT(&va); + VATTR_WANTED(&va, va_flags); + error = vnode_getattr(vp, &va, ctx); + if (!error) + *flags = va.va_flags; + return error; +} + /* * Returns: 0 Success * vnode_getattr:??? @@ -4817,13 +5642,43 @@ vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx) return(vnode_setattr(vp, &va, ctx)); } +int +vnode_setdirty(vnode_t vp) +{ + vnode_lock_spin(vp); + vp->v_flag |= VISDIRTY; + vnode_unlock(vp); + return 0; +} + +int +vnode_cleardirty(vnode_t vp) +{ + vnode_lock_spin(vp); + vp->v_flag &= ~VISDIRTY; + vnode_unlock(vp); + return 0; +} + +int +vnode_isdirty(vnode_t vp) +{ + int dirty; + + vnode_lock_spin(vp); + dirty = (vp->v_flag & VISDIRTY) ? 1 : 0; + vnode_unlock(vp); + + return dirty; +} + static int vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx) { /* Only use compound VNOP for compound operation */ if (vnode_compound_open_available(dvp) && ((flags & VN_CREATE_DOOPEN) != 0)) { *vpp = NULLVP; - return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, VNOP_COMPOUND_OPEN_DO_CREATE, fmode, statusp, vap, ctx); + return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, O_CREAT, fmode, statusp, vap, ctx); } else { return VNOP_CREATE(dvp, vpp, &ndp->ni_cnd, vap, ctx); } @@ -4904,6 +5759,7 @@ vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *v panic("Mode for open, but not trying to open..."); } + /* * Create the requested node. */ @@ -4973,8 +5829,8 @@ out: static kauth_scope_t vnode_scope; static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); -static int vnode_authorize_callback_int(__unused kauth_cred_t credential, __unused void *idata, kauth_action_t action, - uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); +static int vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx, + vnode_t vp, vnode_t dvp, int *errorp); typedef struct _vnode_authorize_context { vnode_t vp; @@ -4988,6 +5844,7 @@ typedef struct _vnode_authorize_context { #define _VAC_IN_GROUP (1<<1) #define _VAC_IS_DIR_OWNER (1<<2) #define _VAC_IN_DIR_GROUP (1<<3) +#define _VAC_NO_VNODE_POINTERS (1<<4) } *vauth_ctx; void @@ -5087,6 +5944,9 @@ vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields) int vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, __unused void *reserved) { +#if !CONFIG_MACF +#pragma unused(cnp) +#endif int error = 0; /* @@ -5094,7 +5954,7 @@ vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_cont * However, some file systems may have limited support. */ if ((vp->v_type == VDIR) && - !(vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSDIRLINKS)) { + !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) { return (EPERM); /* POSIX */ } @@ -5115,7 +5975,6 @@ vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs /* Open of existing case */ kauth_action_t action; int error = 0; - if (cnp->cn_ndp == NULL) { panic("NULL ndp"); } @@ -5187,12 +6046,30 @@ vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs action |= KAUTH_VNODE_WRITE_DATA; } } - return (vnode_authorize(vp, NULL, action, ctx)); + error = vnode_authorize(vp, NULL, action, ctx); +#if NAMEDSTREAMS + if (error == EACCES) { + /* + * Shadow files may exist on-disk with a different UID/GID + * than that of the current context. Verify that this file + * is really a shadow file. If it was created successfully + * then it should be authorized. + */ + if (vnode_isshadow(vp) && vnode_isnamedstream (vp)) { + error = vnode_verifynamedstream(vp); + } + } +#endif + + return error; } int vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved) { +#if !CONFIG_MACF +#pragma unused(vap) +#endif /* Creation case */ int error; @@ -5219,13 +6096,22 @@ vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *v return (vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)); } -int +int vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, - struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - vfs_context_t ctx, void *reserved) + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, + vfs_context_t ctx, void *reserved) +{ + return vn_authorize_renamex(fdvp, fvp, fcnp, tdvp, tvp, tcnp, ctx, 0, reserved); +} + +int +vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, + vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved) { int error = 0; int moving = 0; + bool swap = flags & VFS_RENAME_SWAP; if (reserved != NULL) { panic("Passed something other than NULL as reserved field!"); @@ -5253,28 +6139,37 @@ vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentnam /***** *****/ #if CONFIG_MACF - error = mac_vnode_check_rename_from(ctx, fdvp, fvp, fcnp); - if (error) - goto out; -#endif - -#if CONFIG_MACF - error = mac_vnode_check_rename_to(ctx, - tdvp, tvp, fdvp == tdvp, tcnp); + error = mac_vnode_check_rename(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp); if (error) goto out; + if (swap) { + error = mac_vnode_check_rename(ctx, tdvp, tvp, tcnp, fdvp, fvp, fcnp); + if (error) + goto out; + } #endif /***** *****/ /***** *****/ if (tvp != NULL) { - if (fvp->v_type == VDIR && tvp->v_type != VDIR) { - error = ENOTDIR; - goto out; - } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) { - error = EISDIR; - goto out; + if (!swap) { + if (fvp->v_type == VDIR && tvp->v_type != VDIR) { + error = ENOTDIR; + goto out; + } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) { + error = EISDIR; + goto out; + } } + } else if (swap) { + /* + * Caller should have already checked this and returned + * ENOENT. If we send back ENOENT here, caller will retry + * which isn't what we want so we send back EINVAL here + * instead. + */ + error = EINVAL; + goto out; } if (fvp == tdvp) { @@ -5301,51 +6196,88 @@ vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentnam error = EINVAL; goto out; } - /***** *****/ - - /***** *****/ - error = 0; - if ((tvp != NULL) && vnode_isdir(tvp)) { - if (tvp != fdvp) - moving = 1; - } else if (tdvp != fdvp) { - moving = 1; + if (swap && fdvp->v_parent == tvp) { + error = EINVAL; + goto out; } + /***** *****/ - - /* - * must have delete rights to remove the old name even in - * the simple case of fdvp == tdvp. - * - * If fvp is a directory, and we are changing it's parent, - * then we also need rights to rewrite its ".." entry as well. - */ - if (vnode_isdir(fvp)) { - if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) - goto out; - } else { - if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0) + /***** *****/ + + if (swap) { + kauth_action_t f = 0, t = 0; + + /* + * Directories changing parents need ...ADD_SUBDIR... to + * permit changing ".." + */ + if (fdvp != tdvp) { + if (vnode_isdir(fvp)) + f = KAUTH_VNODE_ADD_SUBDIRECTORY; + if (vnode_isdir(tvp)) + t = KAUTH_VNODE_ADD_SUBDIRECTORY; + } + error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | f, ctx); + if (error) goto out; - } - if (moving) { - /* moving into tdvp or tvp, must have rights to add */ - if ((error = vnode_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp, - NULL, - vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, - ctx)) != 0) { + error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE | t, ctx); + if (error) goto out; + f = vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE; + t = vnode_isdir(tvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE; + if (fdvp == tdvp) + error = vnode_authorize(fdvp, NULL, f | t, ctx); + else { + error = vnode_authorize(fdvp, NULL, t, ctx); + if (error) + goto out; + error = vnode_authorize(tdvp, NULL, f, ctx); } + if (error) + goto out; } else { - /* node staying in same directory, must be allowed to add new name */ - if ((error = vnode_authorize(fdvp, NULL, - vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0) + error = 0; + if ((tvp != NULL) && vnode_isdir(tvp)) { + if (tvp != fdvp) + moving = 1; + } else if (tdvp != fdvp) { + moving = 1; + } + + /* + * must have delete rights to remove the old name even in + * the simple case of fdvp == tdvp. + * + * If fvp is a directory, and we are changing it's parent, + * then we also need rights to rewrite its ".." entry as well. + */ + if (vnode_isdir(fvp)) { + if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) + goto out; + } else { + if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0) + goto out; + } + if (moving) { + /* moving into tdvp or tvp, must have rights to add */ + if ((error = vnode_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp, + NULL, + vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, + ctx)) != 0) { + goto out; + } + } else { + /* node staying in same directory, must be allowed to add new name */ + if ((error = vnode_authorize(fdvp, NULL, + vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0) + goto out; + } + /* overwriting tvp */ + if ((tvp != NULL) && !vnode_isdir(tvp) && + ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) { goto out; - } - /* overwriting tvp */ - if ((tvp != NULL) && !vnode_isdir(tvp) && - ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) { - goto out; + } } /***** *****/ @@ -5358,6 +6290,9 @@ out: int vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved) { +#if !CONFIG_MACF +#pragma unused(vap) +#endif int error; if (reserved != NULL) { @@ -5393,8 +6328,11 @@ out: int vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved) { +#if CONFIG_MACF int error; - +#else +#pragma unused(cnp) +#endif if (reserved != NULL) { panic("Non-NULL reserved argument to vn_authorize_rmdir()"); } @@ -5423,6 +6361,111 @@ vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_conte return vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx); } +/* + * Authorizer for directory cloning. This does not use vnodes but instead + * uses prefilled vnode attributes from the filesystem. + * + * The same function is called to set up the attributes required, perform the + * authorization and cleanup (if required) + */ +int +vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action, + struct vnode_attr *dvap, __unused vnode_t sdvp, mount_t mp, + dir_clone_authorizer_op_t vattr_op, vfs_context_t ctx, + __unused void *reserved) +{ + int error; + int is_suser = vfs_context_issuser(ctx); + + if (vattr_op == OP_VATTR_SETUP) { + VATTR_INIT(vap); + + /* + * When ACL inheritence is implemented, both vap->va_acl and + * dvap->va_acl will be required (even as superuser). + */ + VATTR_WANTED(vap, va_type); + VATTR_WANTED(vap, va_mode); + VATTR_WANTED(vap, va_flags); + VATTR_WANTED(vap, va_uid); + VATTR_WANTED(vap, va_gid); + if (dvap) { + VATTR_INIT(dvap); + VATTR_WANTED(dvap, va_flags); + } + + if (!is_suser) { + /* + * If not superuser, we have to evaluate ACLs and + * need the target directory gid to set the initial + * gid of the new object. + */ + VATTR_WANTED(vap, va_acl); + if (dvap) + VATTR_WANTED(dvap, va_gid); + } + + return (0); + } else if (vattr_op == OP_VATTR_CLEANUP) { + return (0); /* Nothing to do for now */ + } + + /* dvap isn't used for authorization */ + error = vnode_attr_authorize(vap, NULL, mp, action, ctx); + + if (error) + return (error); + + /* + * vn_attribute_prepare should be able to accept attributes as well as + * vnodes but for now we do this inline. + */ + if (!is_suser) { + /* + * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit + * owner is set, that owner takes ownership of all new files. + */ + if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) && + (mp->mnt_fsowner != KAUTH_UID_NONE)) { + VATTR_SET(vap, va_uid, mp->mnt_fsowner); + } else { + /* default owner is current user */ + VATTR_SET(vap, va_uid, + kauth_cred_getuid(vfs_context_ucred(ctx))); + } + + if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) && + (mp->mnt_fsgroup != KAUTH_GID_NONE)) { + VATTR_SET(vap, va_gid, mp->mnt_fsgroup); + } else { + /* + * default group comes from parent object, + * fallback to current user + */ + if (VATTR_IS_SUPPORTED(dvap, va_gid)) { + VATTR_SET(vap, va_gid, dvap->va_gid); + } else { + VATTR_SET(vap, va_gid, + kauth_cred_getgid(vfs_context_ucred(ctx))); + } + } + } + + /* Inherit SF_RESTRICTED bit from destination directory only */ + if (VATTR_IS_ACTIVE(vap, va_flags)) { + VATTR_SET(vap, va_flags, + ((vap->va_flags & ~SF_RESTRICTED))); /* Turn off from source */ + if (VATTR_IS_ACTIVE(dvap, va_flags)) + VATTR_SET(vap, va_flags, + vap->va_flags | (dvap->va_flags & SF_RESTRICTED)); + } else if (VATTR_IS_ACTIVE(dvap, va_flags)) { + VATTR_SET(vap, va_flags, (dvap->va_flags & SF_RESTRICTED)); + } + + return (0); +} + + /* * Authorize an operation on a vnode. * @@ -5880,36 +6923,47 @@ out: * - Neither the node nor the directory are immutable. * - The user is not the superuser. * - * Deletion is not permitted if the directory is sticky and the caller is - * not owner of the node or directory. + * The precedence of factors for authorizing or denying delete for a credential + * + * 1) Explicit ACE on the node. (allow or deny DELETE) + * 2) Explicit ACE on the directory (allow or deny DELETE_CHILD). * - * If either the node grants DELETE, or the directory grants DELETE_CHILD, - * the node may be deleted. If neither denies the permission, and the - * caller has Posix write access to the directory, then the node may be - * deleted. + * If there are conflicting ACEs on the node and the directory, the node + * ACE wins. + * + * 3) Sticky bit on the directory. + * Deletion is not permitted if the directory is sticky and the caller is + * not owner of the node or directory. The sticky bit rules are like a deny + * delete ACE except lower in priority than ACL's either allowing or denying + * delete. + * + * 4) POSIX permisions on the directory. * * As an optimization, we cache whether or not delete child is permitted - * on directories without the sticky bit set. + * on directories. This enables us to skip directory ACL and POSIX checks + * as we already have the result from those checks. However, we always check the + * node ACL and, if the directory has the sticky bit set, we always check its + * ACL (even for a directory with an authorized delete child). Furthermore, + * caching the delete child authorization is independent of the sticky bit + * being set as it is only applicable in determining whether the node can be + * deleted or not. */ -int -vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child); -/*static*/ int +static int vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) { struct vnode_attr *vap = vcp->vap; struct vnode_attr *dvap = vcp->dvap; kauth_cred_t cred = vcp->ctx->vc_ucred; struct kauth_acl_eval eval; - int error, delete_denied, delete_child_denied, ismember; + int error, ismember; - /* check the ACL on the directory */ - delete_child_denied = 0; - if (!cached_delete_child && VATTR_IS_NOT(dvap, va_acl, NULL)) { - eval.ae_requested = KAUTH_VNODE_DELETE_CHILD; - eval.ae_acl = &dvap->va_acl->acl_ace[0]; - eval.ae_count = dvap->va_acl->acl_entrycount; + /* Check the ACL on the node first */ + if (VATTR_IS_NOT(vap, va_acl, NULL)) { + eval.ae_requested = KAUTH_VNODE_DELETE; + eval.ae_acl = &vap->va_acl->acl_ace[0]; + eval.ae_count = vap->va_acl->acl_entrycount; eval.ae_options = 0; - if (vauth_dir_owner(vcp)) + if (vauth_file_owner(vcp)) eval.ae_options |= KAUTH_AEVAL_IS_OWNER; /* * We use ENOENT as a marker to indicate we could not get @@ -5917,8 +6971,8 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) * have the ACL evaluation answer. Previously, we would * always deny the operation at this point. */ - if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) - return(error); + if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) + return (error); if (error == ENOENT) eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN; else if (ismember) @@ -5928,40 +6982,48 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS; eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS; - /* - * If there is no entry, we are going to defer to other - * authorization mechanisms. - */ - error = kauth_acl_evaluate(cred, &eval); - - if (error != 0) { + if ((error = kauth_acl_evaluate(cred, &eval)) != 0) { KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error); - return(error); + return (error); } + switch(eval.ae_result) { case KAUTH_RESULT_DENY: - delete_child_denied = 1; - break; - /* FALLSTHROUGH */ - case KAUTH_RESULT_ALLOW: - KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp); - return(0); + KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp); + return (EACCES); + case KAUTH_RESULT_ALLOW: + KAUTH_DEBUG("%p ALLOWED - granted by ACL", vcp->vp); + return (0); case KAUTH_RESULT_DEFER: default: - /* Effectively the same as !delete_child_denied */ - KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp); + /* Defer to directory */ + KAUTH_DEBUG("%p DEFERRED - by file ACL", vcp->vp); break; } } - /* check the ACL on the node */ - delete_denied = 0; - if (VATTR_IS_NOT(vap, va_acl, NULL)) { - eval.ae_requested = KAUTH_VNODE_DELETE; - eval.ae_acl = &vap->va_acl->acl_ace[0]; - eval.ae_count = vap->va_acl->acl_entrycount; + /* + * Without a sticky bit, a previously authorized delete child is + * sufficient to authorize this delete. + * + * If the sticky bit is set, a directory ACL which allows delete child + * overrides a (potential) sticky bit deny. The authorized delete child + * cannot tell us if it was authorized because of an explicit delete + * child allow ACE or because of POSIX permisions so we have to check + * the directory ACL everytime if the directory has a sticky bit. + */ + if (!(dvap->va_mode & S_ISTXT) && cached_delete_child) { + KAUTH_DEBUG("%p ALLOWED - granted by directory ACL or POSIX permissions and no sticky bit on directory", vcp->vp); + return (0); + } + + /* check the ACL on the directory */ + if (VATTR_IS_NOT(dvap, va_acl, NULL)) { + eval.ae_requested = KAUTH_VNODE_DELETE_CHILD; + eval.ae_acl = &dvap->va_acl->acl_ace[0]; + eval.ae_count = dvap->va_acl->acl_entrycount; eval.ae_options = 0; - if (vauth_file_owner(vcp)) + if (vauth_dir_owner(vcp)) eval.ae_options |= KAUTH_AEVAL_IS_OWNER; /* * We use ENOENT as a marker to indicate we could not get @@ -5969,7 +7031,7 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) * have the ACL evaluation answer. Previously, we would * always deny the operation at this point. */ - if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) + if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) return(error); if (error == ENOENT) eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN; @@ -5980,52 +7042,64 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS; eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS; - if ((error = kauth_acl_evaluate(cred, &eval)) != 0) { + /* + * If there is no entry, we are going to defer to other + * authorization mechanisms. + */ + error = kauth_acl_evaluate(cred, &eval); + + if (error != 0) { KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error); - return(error); + return (error); } - switch(eval.ae_result) { case KAUTH_RESULT_DENY: - delete_denied = 1; - break; + KAUTH_DEBUG("%p DENIED - denied by directory ACL", vcp->vp); + return (EACCES); case KAUTH_RESULT_ALLOW: - KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp->vp); - return(0); + KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp); + if (!cached_delete_child && vcp->dvp) { + vnode_cache_authorized_action(vcp->dvp, + vcp->ctx, KAUTH_VNODE_DELETE_CHILD); + } + return (0); case KAUTH_RESULT_DEFER: default: - /* Effectively the same as !delete_child_denied */ - KAUTH_DEBUG("%p DEFERRED%s - by file ACL", vcp->vp, delete_denied ? "(DENY)" : ""); + /* Deferred by directory ACL */ + KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp); break; } } - /* if denied by ACL on directory or node, return denial */ - if (delete_denied || delete_child_denied) { - KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp); - return(EACCES); - } - /* - * enforce sticky bit behaviour; the cached_delete_child property will - * be false and the dvap contents valis for sticky bit directories; - * this makes us check the directory each time, but it's unavoidable, - * as sticky bit is an exception to caching. + * From this point, we can't explicitly allow and if we reach the end + * of the function without a denial, then the delete is authorized. */ - if (!cached_delete_child && (dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) { - KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)", - vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid); - return(EACCES); + if (!cached_delete_child) { + if (vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */) != 0) { + KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp->vp); + return (EACCES); + } + /* + * Cache the authorized action on the vnode if allowed by the + * directory ACL or POSIX permissions. It is correct to cache + * this action even if sticky bit would deny deleting the node. + */ + if (vcp->dvp) { + vnode_cache_authorized_action(vcp->dvp, vcp->ctx, + KAUTH_VNODE_DELETE_CHILD); + } } - /* check the directory */ - if (!cached_delete_child && (error = vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */)) != 0) { - KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp->vp); - return(error); + /* enforce sticky bit behaviour */ + if ((dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) { + KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)", + vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid); + return (EACCES); } /* not denied, must be OK */ - return(0); + return (0); } @@ -6219,9 +7293,8 @@ vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_r * Check for file immutability. */ static int -vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, int ignore) +vnode_authorize_checkimmutable(mount_t mp, struct vnode_attr *vap, int rights, int ignore) { - mount_t mp; int error; int append; @@ -6230,7 +7303,7 @@ vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, i * * Sockets, fifos and devices require special handling. */ - switch(vp->v_type) { + switch(vap->va_type) { case VSOCK: case VFIFO: case VBLK: @@ -6249,7 +7322,6 @@ vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, i if (rights & KAUTH_VNODE_WRITE_RIGHTS) { /* check per-filesystem options if possible */ - mp = vp->v_mount; if (mp != NULL) { /* check for no-EA filesystems */ @@ -6266,7 +7338,7 @@ vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, i * allowable for a UF_APPEND file. */ append = 0; - if (vp->v_type == VDIR) { + if (vap->va_type == VDIR) { if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) append = 1; } else { @@ -6377,8 +7449,9 @@ vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_cont static int -vnode_authorize_callback(kauth_cred_t cred, void *idata, kauth_action_t action, - uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3) +vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata, + kauth_action_t action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3) { vfs_context_t ctx; vnode_t cvp = NULLVP; @@ -6454,7 +7527,7 @@ vnode_authorize_callback(kauth_cred_t cred, void *idata, kauth_action_t action, goto out; } defer: - result = vnode_authorize_callback_int(cred, idata, action, arg0, arg1, arg2, arg3); + result = vnode_authorize_callback_int(action, ctx, vp, dvp, (int *)arg3); if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP) { KAUTH_DEBUG("%p - caching action = %x", cvp, action); @@ -6469,30 +7542,97 @@ out: return result; } +static int +vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp, + kauth_ace_rights_t rights, int is_suser, boolean_t *found_deny, + int noimmutable, int parent_authorized_for_delete_child) +{ + int result; + + /* + * Check for immutability. + * + * In the deletion case, parent directory immutability vetoes specific + * file rights. + */ + if ((result = vnode_authorize_checkimmutable(mp, vcp->vap, rights, + noimmutable)) != 0) + goto out; + + if ((rights & KAUTH_VNODE_DELETE) && + !parent_authorized_for_delete_child) { + result = vnode_authorize_checkimmutable(mp, vcp->dvap, + KAUTH_VNODE_DELETE_CHILD, 0); + if (result) + goto out; + } + + /* + * Clear rights that have been authorized by reaching this point, bail if nothing left to + * check. + */ + rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE); + if (rights == 0) + goto out; + + /* + * If we're not the superuser, authorize based on file properties; + * note that even if parent_authorized_for_delete_child is TRUE, we + * need to check on the node itself. + */ + if (!is_suser) { + /* process delete rights */ + if ((rights & KAUTH_VNODE_DELETE) && + ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0)) + goto out; + + /* process remaining rights */ + if ((rights & ~KAUTH_VNODE_DELETE) && + (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, found_deny)) != 0) + goto out; + } else { + /* + * Execute is only granted to root if one of the x bits is set. This check only + * makes sense if the posix mode bits are actually supported. + */ + if ((rights & KAUTH_VNODE_EXECUTE) && + (vcp->vap->va_type == VREG) && + VATTR_IS_SUPPORTED(vcp->vap, va_mode) && + !(vcp->vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) { + result = EPERM; + KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp, va.va_mode); + goto out; + } + + /* Assume that there were DENYs so we don't wrongly cache KAUTH_VNODE_SEARCHBYANYONE */ + *found_deny = TRUE; + + KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp); + } +out: + return (result); +} static int -vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *idata, kauth_action_t action, - uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3) +vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx, + vnode_t vp, vnode_t dvp, int *errorp) { struct _vnode_authorize_context auth_context; vauth_ctx vcp; - vfs_context_t ctx; - vnode_t vp, dvp; kauth_cred_t cred; kauth_ace_rights_t rights; struct vnode_attr va, dva; int result; - int *errorp; int noimmutable; boolean_t parent_authorized_for_delete_child = FALSE; boolean_t found_deny = FALSE; boolean_t parent_ref= FALSE; + boolean_t is_suser = FALSE; vcp = &auth_context; - ctx = vcp->ctx = (vfs_context_t)arg0; - vp = vcp->vp = (vnode_t)arg1; - dvp = vcp->dvp = (vnode_t)arg2; - errorp = (int *)arg3; + vcp->ctx = ctx; + vcp->vp = vp; + vcp->dvp = dvp; /* * Note that we authorize against the context, not the passed cred * (the same thing anyway) @@ -6552,7 +7692,8 @@ vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *i if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE) parent_authorized_for_delete_child = TRUE; } else { - dvp = NULL; + vcp->dvp = NULLVP; + vcp->dvap = NULL; } /* @@ -6585,33 +7726,8 @@ vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *i goto out; /* - * Get vnode attributes and extended security information for the vnode - * and directory if required. - */ - VATTR_WANTED(&va, va_mode); - VATTR_WANTED(&va, va_uid); - VATTR_WANTED(&va, va_gid); - VATTR_WANTED(&va, va_flags); - VATTR_WANTED(&va, va_acl); - if ((result = vnode_getattr(vp, &va, ctx)) != 0) { - KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result); - goto out; - } - if (dvp && parent_authorized_for_delete_child == FALSE) { - VATTR_WANTED(&dva, va_mode); - VATTR_WANTED(&dva, va_uid); - VATTR_WANTED(&dva, va_gid); - VATTR_WANTED(&dva, va_flags); - VATTR_WANTED(&dva, va_acl); - if ((result = vnode_getattr(dvp, &dva, ctx)) != 0) { - KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result); - goto out; - } - } - - /* - * If the vnode is an extended attribute data vnode (eg. a resource fork), *_DATA becomes - * *_EXTATTRIBUTES. + * If the vnode is a namedstream (extended attribute) data vnode (eg. + * a resource fork), *_DATA becomes *_EXTATTRIBUTES. */ if (vnode_isnamedstream(vp)) { if (rights & KAUTH_VNODE_READ_DATA) { @@ -6622,81 +7738,67 @@ vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *i rights &= ~KAUTH_VNODE_WRITE_DATA; rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES; } + + /* + * Point 'vp' to the namedstream's parent for ACL checking + */ + if ((vp->v_parent != NULL) && + (vget_internal(vp->v_parent, 0, VNODE_NODEAD | VNODE_DRAINO) == 0)) { + parent_ref = TRUE; + vcp->vp = vp = vp->v_parent; + } } - /* - * Point 'vp' to the resource fork's parent for ACL checking - */ - if (vnode_isnamedstream(vp) && - (vp->v_parent != NULL) && - (vget_internal(vp->v_parent, 0, VNODE_NODEAD | VNODE_DRAINO) == 0)) { - parent_ref = TRUE; - vcp->vp = vp = vp->v_parent; - if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) - kauth_acl_free(va.va_acl); - VATTR_INIT(&va); - VATTR_WANTED(&va, va_mode); - VATTR_WANTED(&va, va_uid); - VATTR_WANTED(&va, va_gid); - VATTR_WANTED(&va, va_flags); - VATTR_WANTED(&va, va_acl); - if ((result = vnode_getattr(vp, &va, ctx)) != 0) - goto out; + if (vfs_context_issuser(ctx)) { + /* + * if we're not asking for execute permissions or modifications, + * then we're done, this action is authorized. + */ + if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) + goto success; + + is_suser = TRUE; } /* - * Check for immutability. + * Get vnode attributes and extended security information for the vnode + * and directory if required. * - * In the deletion case, parent directory immutability vetoes specific - * file rights. - */ - if ((result = vnode_authorize_checkimmutable(vp, &va, rights, noimmutable)) != 0) - goto out; - if ((rights & KAUTH_VNODE_DELETE) && - parent_authorized_for_delete_child == FALSE && - ((result = vnode_authorize_checkimmutable(dvp, &dva, KAUTH_VNODE_DELETE_CHILD, 0)) != 0)) - goto out; - - /* - * Clear rights that have been authorized by reaching this point, bail if nothing left to - * check. + * If we're root we only want mode bits and flags for checking + * execute and immutability. */ - rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE); - if (rights == 0) + VATTR_WANTED(&va, va_mode); + VATTR_WANTED(&va, va_flags); + if (!is_suser) { + VATTR_WANTED(&va, va_uid); + VATTR_WANTED(&va, va_gid); + VATTR_WANTED(&va, va_acl); + } + if ((result = vnode_getattr(vp, &va, ctx)) != 0) { + KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result); goto out; + } + VATTR_WANTED(&va, va_type); + VATTR_RETURN(&va, va_type, vnode_vtype(vp)); - /* - * If we're not the superuser, authorize based on file properties; - * note that even if parent_authorized_for_delete_child is TRUE, we - * need to check on the node itself. - */ - if (!vfs_context_issuser(ctx)) { - /* process delete rights */ - if ((rights & KAUTH_VNODE_DELETE) && - ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0)) - goto out; - - /* process remaining rights */ - if ((rights & ~KAUTH_VNODE_DELETE) && - (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, &found_deny)) != 0) - goto out; - } else { - - /* - * Execute is only granted to root if one of the x bits is set. This check only - * makes sense if the posix mode bits are actually supported. - */ - if ((rights & KAUTH_VNODE_EXECUTE) && - (vp->v_type == VREG) && - VATTR_IS_SUPPORTED(&va, va_mode) && - !(va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) { - result = EPERM; - KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp, va.va_mode); + if (vcp->dvp) { + VATTR_WANTED(&dva, va_mode); + VATTR_WANTED(&dva, va_flags); + if (!is_suser) { + VATTR_WANTED(&dva, va_uid); + VATTR_WANTED(&dva, va_gid); + VATTR_WANTED(&dva, va_acl); + } + if ((result = vnode_getattr(vcp->dvp, &dva, ctx)) != 0) { + KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result); goto out; } - - KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp); + VATTR_WANTED(&dva, va_type); + VATTR_RETURN(&dva, va_type, vnode_vtype(vcp->dvp)); } + + result = vnode_attr_authorize_internal(vcp, vp->v_mount, rights, is_suser, + &found_deny, noimmutable, parent_authorized_for_delete_child); out: if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) kauth_acl_free(va.va_acl); @@ -6717,6 +7819,10 @@ out: * deny execute, we can synthesize a global right that allows anyone to * traverse this directory during a pathname lookup without having to * match the credential associated with this cache of rights. + * + * Note that we can correctly cache KAUTH_VNODE_SEARCHBYANYONE + * only if we actually check ACLs which we don't for root. As + * a workaround, the lookup fast path checks for root. */ if (!VATTR_IS_SUPPORTED(&va, va_mode) || ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == @@ -6724,24 +7830,10 @@ out: vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE); } } - if ((rights & KAUTH_VNODE_DELETE) && parent_authorized_for_delete_child == FALSE) { - /* - * parent was successfully and newly authorized for content deletions - * add it to the cache, but only if it doesn't have the sticky - * bit set on it. This same check is done earlier guarding - * fetching of dva, and if we jumped to out without having done - * this, we will have returned already because of a non-zero - * 'result' value. - */ - if (VATTR_IS_SUPPORTED(&dva, va_mode) && - !(dva.va_mode & (S_ISVTX))) { - /* OK to cache delete rights */ - KAUTH_DEBUG("%p - caching DELETE_CHILD rights", dvp); - vnode_cache_authorized_action(dvp, ctx, KAUTH_VNODE_DELETE_CHILD); - } - } +success: if (parent_ref) vnode_put(vp); + /* * Note that this implies that we will allow requests for no rights, as well as * for rights that we do not recognise. There should be none of these. @@ -6750,6 +7842,109 @@ out: return(KAUTH_RESULT_ALLOW); } +int +vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap, + kauth_action_t action, vfs_context_t ctx) +{ + VATTR_INIT(vap); + VATTR_WANTED(vap, va_type); + VATTR_WANTED(vap, va_mode); + VATTR_WANTED(vap, va_flags); + if (dvap) { + VATTR_INIT(dvap); + if (action & KAUTH_VNODE_DELETE) { + VATTR_WANTED(dvap, va_type); + VATTR_WANTED(dvap, va_mode); + VATTR_WANTED(dvap, va_flags); + } + } else if (action & KAUTH_VNODE_DELETE) { + return (EINVAL); + } + + if (!vfs_context_issuser(ctx)) { + VATTR_WANTED(vap, va_uid); + VATTR_WANTED(vap, va_gid); + VATTR_WANTED(vap, va_acl); + if (dvap && (action & KAUTH_VNODE_DELETE)) { + VATTR_WANTED(dvap, va_uid); + VATTR_WANTED(dvap, va_gid); + VATTR_WANTED(dvap, va_acl); + } + } + + return (0); +} + +int +vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp, + kauth_action_t action, vfs_context_t ctx) +{ + struct _vnode_authorize_context auth_context; + vauth_ctx vcp; + kauth_ace_rights_t rights; + int noimmutable; + boolean_t found_deny; + boolean_t is_suser = FALSE; + int result = 0; + + vcp = &auth_context; + vcp->ctx = ctx; + vcp->vp = NULLVP; + vcp->vap = vap; + vcp->dvp = NULLVP; + vcp->dvap = dvap; + vcp->flags = vcp->flags_valid = 0; + + noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0; + rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE); + + /* + * Check for read-only filesystems. + */ + if ((rights & KAUTH_VNODE_WRITE_RIGHTS) && + mp && (mp->mnt_flag & MNT_RDONLY) && + ((vap->va_type == VREG) || (vap->va_type == VDIR) || + (vap->va_type == VLNK) || (rights & KAUTH_VNODE_DELETE) || + (rights & KAUTH_VNODE_DELETE_CHILD))) { + result = EROFS; + goto out; + } + + /* + * Check for noexec filesystems. + */ + if ((rights & KAUTH_VNODE_EXECUTE) && + (vap->va_type == VREG) && mp && (mp->mnt_flag & MNT_NOEXEC)) { + result = EACCES; + goto out; + } + + if (vfs_context_issuser(ctx)) { + /* + * if we're not asking for execute permissions or modifications, + * then we're done, this action is authorized. + */ + if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) + goto out; + is_suser = TRUE; + } else { + if (!VATTR_IS_SUPPORTED(vap, va_uid) || + !VATTR_IS_SUPPORTED(vap, va_gid) || + (mp && vfs_extendedsecurity(mp) && !VATTR_IS_SUPPORTED(vap, va_acl))) { + panic("vnode attrs not complete for vnode_attr_authorize\n"); + } + } + + result = vnode_attr_authorize_internal(vcp, mp, rights, is_suser, + &found_deny, noimmutable, FALSE); + + if (result == EPERM) + result = EACCES; +out: + return (result); +} + + int vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx) { @@ -6764,10 +7959,11 @@ static int vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx) { int error; - int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode; + int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode, inherit_restricted; kauth_cred_t cred; guid_t changer; mount_t dmp; + struct vnode_attr dva; error = 0; @@ -6777,6 +7973,8 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin defaulted_owner = defaulted_group = defaulted_mode = 0; + inherit_restricted = 0; + /* * Require that the filesystem support extended security to apply any. */ @@ -6806,6 +8004,16 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin } } + /* + * We need the dvp's va_flags and *may* need the gid of the directory, + * we ask for both here. + */ + VATTR_INIT(&dva); + VATTR_WANTED(&dva, va_gid); + VATTR_WANTED(&dva, va_flags); + if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) + goto out; + /* * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that * group takes ownership of all new files. @@ -6816,11 +8024,6 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin } else { if (!VATTR_IS_ACTIVE(vap, va_gid)) { /* default group comes from parent object, fallback to current user */ - struct vnode_attr dva; - VATTR_INIT(&dva); - VATTR_WANTED(&dva, va_gid); - if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) - goto out; if (VATTR_IS_SUPPORTED(&dva, va_gid)) { VATTR_SET(vap, va_gid, dva.va_gid); } else { @@ -6832,7 +8035,14 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin if (!VATTR_IS_ACTIVE(vap, va_flags)) VATTR_SET(vap, va_flags, 0); - + + /* Determine if SF_RESTRICTED should be inherited from the parent + * directory. */ + if (VATTR_IS_SUPPORTED(&dva, va_flags) && + (dva.va_flags & SF_RESTRICTED)) { + inherit_restricted = 1; + } + /* default mode is everything, masked with current umask */ if (!VATTR_IS_ACTIVE(vap, va_mode)) { VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd->fd_cmask); @@ -6957,6 +8167,12 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin } } out: + if (inherit_restricted) { + /* Apply SF_RESTRICTED to the file if its parent directory was + * restricted. This is done at the end so that root is not + * required if this flag is only set due to inheritance. */ + VATTR_SET(vap, va_flags, (vap->va_flags | SF_RESTRICTED)); + } if (defaulted_fieldsp) { if (defaulted_mode) { *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_MODE; @@ -7077,10 +8293,10 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ * If the size is being set, make sure it's not a directory. */ if (VATTR_IS_ACTIVE(vap, va_data_size)) { - /* size is meaningless on a directory, don't permit this */ - if (vnode_isdir(vp)) { - KAUTH_DEBUG("ATTR - ERROR: size change requested on a directory"); - error = EISDIR; + /* size is only meaningful on regular files, don't permit otherwise */ + if (!vnode_isreg(vp)) { + KAUTH_DEBUG("ATTR - ERROR: size change requested on non-file"); + error = vnode_isdir(vp) ? EISDIR : EINVAL; goto out; } } @@ -7435,19 +8651,33 @@ no_guuid_change: required_action |= KAUTH_VNODE_WRITE_SECURITY; } - /* clear set-uid and set-gid bits as required by Posix */ - if (VATTR_IS_ACTIVE(vap, va_mode)) { - newmode = vap->va_mode; - } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) { - newmode = ova.va_mode; - } else { - KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits"); - newmode = 0; - } - if (newmode & (S_ISUID | S_ISGID)) { - VATTR_SET(vap, va_mode, newmode & ~(S_ISUID | S_ISGID)); - KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", newmode, vap->va_mode); + } + + /* + * clear set-uid and set-gid bits. POSIX only requires this for + * non-privileged processes but we do it even for root. + */ + if (VATTR_IS_ACTIVE(vap, va_mode)) { + newmode = vap->va_mode; + } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) { + newmode = ova.va_mode; + } else { + KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits"); + newmode = 0; + } + + /* chown always clears setuid/gid bits. An exception is made for + * setattrlist executed by a root process to set on a file: + * setattrlist is allowed to set the new mode on the file and change (chown) + * uid/gid. + */ + if (newmode & (S_ISUID | S_ISGID)) { + if (!VATTR_IS_ACTIVE(vap, va_mode) || !has_priv_suser) { + KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", + newmode, newmode & ~(S_ISUID | S_ISGID)); + newmode &= ~(S_ISUID | S_ISGID); } + VATTR_SET(vap, va_mode, newmode); } } @@ -7526,21 +8756,37 @@ vfs_setlocklocal(mount_t mp) } void -vfs_setunmountpreflight(mount_t mp) +vfs_setcompoundopen(mount_t mp) { mount_lock_spin(mp); - mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT; + mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN; mount_unlock(mp); } + void -vfs_setcompoundopen(mount_t mp) +vnode_setswapmount(vnode_t vp) { - mount_lock_spin(mp); - mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN; - mount_unlock(mp); + mount_lock(vp->v_mount); + vp->v_mount->mnt_kern_flag |= MNTK_SWAP_MOUNT; + mount_unlock(vp->v_mount); +} + + +int64_t +vnode_getswappin_avail(vnode_t vp) +{ + int64_t max_swappin_avail = 0; + + mount_lock(vp->v_mount); + if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_SWAPPIN_SUPPORTED) + max_swappin_avail = vp->v_mount->mnt_max_swappin_available; + mount_unlock(vp->v_mount); + + return (max_swappin_avail); } + void vn_setunionwait(vnode_t vp) { @@ -7572,18 +8818,6 @@ vn_clearunionwait(vnode_t vp, int locked) vnode_unlock(vp); } -/* - * XXX - get "don't trigger mounts" flag for thread; used by autofs. - */ -extern int thread_notrigger(void); - -int -thread_notrigger(void) -{ - struct uthread *uth = (struct uthread *)get_bsdthread_info(current_thread()); - return (uth->uu_notrigger); -} - /* * Removes orphaned apple double files during a rmdir * Works by: @@ -7604,8 +8838,9 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * int eofflag, siz = UIO_BUFF_SIZE, nentries = 0; int open_flag = 0, full_erase_flag = 0; char uio_buf[ UIO_SIZEOF(1) ]; - char *rbuf = NULL, *cpos, *cend; - struct nameidata nd_temp; + char *rbuf = NULL; + void *dir_pos; + void *dir_end; struct dirent *dp; errno_t error; @@ -7617,7 +8852,7 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * if (error == EBUSY) *restart_flag = 1; if (error != 0) - goto outsc; + return (error); /* * set up UIO @@ -7658,14 +8893,14 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * /* * Iterate through directory */ - cpos = rbuf; - cend = rbuf + siz; - dp = (struct dirent*) cpos; + dir_pos = (void*) rbuf; + dir_end = (void*) (rbuf + siz); + dp = (struct dirent*) (dir_pos); - if (cpos == cend) + if (dir_pos == dir_end) eofflag = 1; - while ((cpos < cend)) { + while (dir_pos < dir_end) { /* * Check for . and .. as well as directories */ @@ -7683,8 +8918,8 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * goto outsc; } } - cpos += dp->d_reclen; - dp = (struct dirent*)cpos; + dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen); + dp = (struct dirent*)dir_pos; } /* @@ -7730,14 +8965,14 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * /* * Iterate through directory */ - cpos = rbuf; - cend = rbuf + siz; - dp = (struct dirent*) cpos; + dir_pos = (void*) rbuf; + dir_end = (void*) (rbuf + siz); + dp = (struct dirent*) dir_pos; - if (cpos == cend) + if (dir_pos == dir_end) eofflag = 1; - while ((cpos < cend)) { + while (dir_pos < dir_end) { /* * Check for . and .. as well as directories */ @@ -7746,19 +8981,18 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.')) ) { - NDINIT(&nd_temp, DELETE, OP_UNLINK, USEDVP, - UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name), - ctx); - nd_temp.ni_dvp = vp; - error = unlink1(ctx, &nd_temp, 0); + error = unlink1(ctx, vp, + CAST_USER_ADDR_T(dp->d_name), UIO_SYSSPACE, + VNODE_REMOVE_SKIP_NAMESPACE_EVENT | + VNODE_REMOVE_NO_AUDIT_PATH); if (error && error != ENOENT) { goto outsc; } } - cpos += dp->d_reclen; - dp = (struct dirent*)cpos; + dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen); + dp = (struct dirent*)dir_pos; } /* @@ -7786,7 +9020,8 @@ outsc: if (open_flag) VNOP_CLOSE(vp, FREAD, ctx); - uio_free(auio); + if (auio) + uio_free(auio); FREE(rbuf, M_TEMP); vnode_resume(vp); @@ -7808,6 +9043,148 @@ lock_vnode_and_post(vnode_t vp, int kevent_num) } } +void panic_print_vnodes(void); + +/* define PANIC_PRINTS_VNODES only if investigation is required. */ +#ifdef PANIC_PRINTS_VNODES + +static const char *__vtype(uint16_t vtype) +{ + switch (vtype) { + case VREG: + return "R"; + case VDIR: + return "D"; + case VBLK: + return "B"; + case VCHR: + return "C"; + case VLNK: + return "L"; + case VSOCK: + return "S"; + case VFIFO: + return "F"; + case VBAD: + return "x"; + case VSTR: + return "T"; + case VCPLX: + return "X"; + default: + return "?"; + } +} + +/* + * build a path from the bottom up + * NOTE: called from the panic path - no alloc'ing of memory and no locks! + */ +static char *__vpath(vnode_t vp, char *str, int len, int depth) +{ + int vnm_len; + const char *src; + char *dst; + + if (len <= 0) + return str; + /* str + len is the start of the string we created */ + if (!vp->v_name) + return str + len; + + /* follow mount vnodes to get the full path */ + if ((vp->v_flag & VROOT)) { + if (vp->v_mount != NULL && vp->v_mount->mnt_vnodecovered) { + return __vpath(vp->v_mount->mnt_vnodecovered, + str, len, depth+1); + } + return str + len; + } + + src = vp->v_name; + vnm_len = strlen(src); + if (vnm_len > len) { + /* truncate the name to fit in the string */ + src += (vnm_len - len); + vnm_len = len; + } + + /* start from the back and copy just characters (no NULLs) */ + + /* this will chop off leaf path (file) names */ + if (depth > 0) { + dst = str + len - vnm_len; + memcpy(dst, src, vnm_len); + len -= vnm_len; + } else { + dst = str + len; + } + + if (vp->v_parent && len > 1) { + /* follow parents up the chain */ + len--; + *(dst-1) = '/'; + return __vpath(vp->v_parent, str, len, depth + 1); + } + + return dst; +} + +extern int kdb_printf(const char *format, ...) __printflike(1,2); + +#define SANE_VNODE_PRINT_LIMIT 5000 +void panic_print_vnodes(void) +{ + mount_t mnt; + vnode_t vp; + int nvnodes = 0; + const char *type; + char *nm; + char vname[257]; + + kdb_printf("\n***** VNODES *****\n" + "TYPE UREF ICNT PATH\n"); + + /* NULL-terminate the path name */ + vname[sizeof(vname)-1] = '\0'; + + /* + * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist + */ + TAILQ_FOREACH(mnt, &mountlist, mnt_list) { + + if (!ml_validate_nofault((vm_offset_t)mnt, sizeof(mount_t))) { + kdb_printf("Unable to iterate the mount list %p - encountered an invalid mount pointer %p \n", + &mountlist, mnt); + break; + } + + TAILQ_FOREACH(vp, &mnt->mnt_vnodelist, v_mntvnodes) { + + if (!ml_validate_nofault((vm_offset_t)vp, sizeof(vnode_t))) { + kdb_printf("Unable to iterate the vnode list %p - encountered an invalid vnode pointer %p \n", + &mnt->mnt_vnodelist, vp); + break; + } + + if (++nvnodes > SANE_VNODE_PRINT_LIMIT) + return; + type = __vtype(vp->v_type); + nm = __vpath(vp, vname, sizeof(vname)-1, 0); + kdb_printf("%s %0d %0d %s\n", + type, vp->v_usecount, vp->v_iocount, nm); + } + } +} + +#else /* !PANIC_PRINTS_VNODES */ +void panic_print_vnodes(void) +{ + return; +} +#endif + + #ifdef JOE_DEBUG static void record_vp(vnode_t vp, int count) { struct uthread *ut; @@ -8034,26 +9411,6 @@ vnode_resolver_detach(vnode_t vp) OSAddAtomic(-1, &mp->mnt_numtriggers); } -/* - * Pathname operations that don't trigger a mount for trigger vnodes - */ -static const u_int64_t ignorable_pathops_mask = - 1LL << OP_MOUNT | - 1LL << OP_UNMOUNT | - 1LL << OP_STATFS | - 1LL << OP_ACCESS | - 1LL << OP_GETATTR | - 1LL << OP_LISTXATTR; - -int -vfs_istraditionaltrigger(enum path_operation op, const struct componentname *cnp) -{ - if (cnp->cn_flags & ISLASTCN) - return ((1LL << op) & ignorable_pathops_mask) == 0; - else - return (1); -} - __private_extern__ void vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx) @@ -8453,3 +9810,81 @@ out: } #endif /* CONFIG_TRIGGERS */ + +vm_offset_t kdebug_vnode(vnode_t vp) +{ + return VM_KERNEL_ADDRPERM(vp); +} + +static int flush_cache_on_write = 0; +SYSCTL_INT (_kern, OID_AUTO, flush_cache_on_write, + CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0, + "always flush the drive cache on writes to uncached files"); + +int vnode_should_flush_after_write(vnode_t vp, int ioflag) +{ + return (flush_cache_on_write + && (ISSET(ioflag, IO_NOCACHE) || vnode_isnocache(vp))); +} + +/* + * sysctl for use by disk I/O tracing tools to get the list of existing + * vnodes' paths + */ + +struct vnode_trace_paths_context { + uint64_t count; + long path[MAXPATHLEN / sizeof (long) + 1]; /* + 1 in case sizeof (long) does not divide MAXPATHLEN */ +}; + +static int vnode_trace_path_callback(struct vnode *vp, void *arg) { + int len, rv; + struct vnode_trace_paths_context *ctx; + + ctx = arg; + + len = sizeof (ctx->path); + rv = vn_getpath(vp, (char *)ctx->path, &len); + /* vn_getpath() NUL-terminates, and len includes the NUL */ + + if (!rv) { + kdebug_lookup_gen_events(ctx->path, len, vp, TRUE); + + if (++(ctx->count) == 1000) { + thread_yield_to_preemption(); + ctx->count = 0; + } + } + + return VNODE_RETURNED; +} + +static int vfs_trace_paths_callback(mount_t mp, void *arg) { + if (mp->mnt_flag & MNT_LOCAL) + vnode_iterate(mp, VNODE_ITERATE_ALL, vnode_trace_path_callback, arg); + + return VFS_RETURNED; +} + +static int sysctl_vfs_trace_paths SYSCTL_HANDLER_ARGS { + struct vnode_trace_paths_context ctx; + + (void)oidp; + (void)arg1; + (void)arg2; + (void)req; + + if (!kauth_cred_issuser(kauth_cred_get())) + return EPERM; + + if (!kdebug_enable || !kdebug_debugid_enabled(VFS_LOOKUP)) + return EINVAL; + + bzero(&ctx, sizeof (struct vnode_trace_paths_context)); + + vfs_iterate(0, vfs_trace_paths_callback, &ctx); + + return 0; +} + +SYSCTL_PROC(_vfs_generic, OID_AUTO, trace_paths, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, NULL, 0, &sysctl_vfs_trace_paths, "-", "trace_paths");