/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/kdebug.h>
#include <sys/kauth.h>
#include <sys/user.h>
+#include <sys/kern_memorystatus.h>
#include <miscfs/fifofs/fifo.h>
#include <string.h>
unsigned int val2);
__private_extern__ int unlink1(vfs_context_t, struct nameidata *, int);
+extern int system_inshutdown;
+
static void vnode_list_add(vnode_t);
static void vnode_list_remove(vnode_t);
+static void vnode_list_remove_locked(vnode_t);
static errno_t vnode_drain(vnode_t);
static void vgone(vnode_t, int flags);
static void vnode_reclaim_internal(vnode_t, int, int, int);
static void vnode_dropiocount (vnode_t);
-static errno_t vnode_getiocount(vnode_t vp, int vid, int vflags);
-static int vget_internal(vnode_t, int, int);
+static errno_t vnode_getiocount(vnode_t vp, unsigned int vid, int vflags);
static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
static int vnode_reload(vnode_t);
static int mount_getvfscnt(void);
static int mount_fillfsids(fsid_t *, int );
static void vnode_iterate_setup(mount_t);
-static int vnode_umount_preflight(mount_t, vnode_t, int);
+int vnode_umount_preflight(mount_t, vnode_t, int);
static int vnode_iterate_prepare(mount_t);
static int vnode_iterate_reloadq(mount_t);
static void vnode_iterate_clear(mount_t);
+static mount_t vfs_getvfs_locked(fsid_t *);
errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
+#ifdef JOE_DEBUG
+static void record_vp(vnode_t vp, int count);
+#endif
+
TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
* place for now... it should be deprecated out of the
* exports and removed eventually.
*/
-unsigned long vnodetarget; /* target for vnreclaim() */
+u_int32_t vnodetarget; /* target for vnreclaim() */
#define VNODE_FREE_TARGET 20 /* Default value for vnodetarget */
/*
vm_size_t oval = val1 - VNODE_FREE_MIN;
vm_size_t nval;
+ if (val1 == val2) {
+ return KERN_SUCCESS;
+ }
+
if(val2 < VNODE_FREE_MIN)
nval = 0;
else
if (vp->v_numoutput > output_target) {
- slpflag &= ~PDROP;
+ slpflag |= PDROP;
- vnode_lock(vp);
+ vnode_lock_spin(vp);
while ((vp->v_numoutput > output_target) && error == 0) {
if (output_target)
ts.tv_sec = (slptimeout/100);
ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000 ;
error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
+
+ vnode_lock_spin(vp);
}
vnode_unlock(vp);
}
}
-static int
+int
vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
{
vnode_t vp;
lck_mtx_lock(&mp->mnt_mlock);
}
+void
+mount_lock_spin(mount_t mp)
+{
+ lck_mtx_lock_spin(&mp->mnt_mlock);
+}
+
void
mount_unlock(mount_t mp)
{
mount_ref(mount_t mp, int locked)
{
if ( !locked)
- mount_lock(mp);
+ mount_lock_spin(mp);
mp->mnt_count++;
mount_drop(mount_t mp, int locked)
{
if ( !locked)
- mount_lock(mp);
+ mount_lock_spin(mp);
mp->mnt_count--;
{
mount_t mp;
- mp = _MALLOC_ZONE((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
- bzero((char *)mp, (u_long)sizeof(struct mount));
+ mp = _MALLOC_ZONE(sizeof(struct mount), M_MOUNT, M_WAITOK);
+ bzero((char *)mp, sizeof(struct mount));
/* Initialize the default IO constraints */
mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
mp->mnt_devblocksize = DEV_BSIZE;
mp->mnt_alignmentmask = PAGE_MASK;
+ mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
+ mp->mnt_ioscale = 1;
mp->mnt_ioflags = 0;
mp->mnt_realrootvp = NULLVP;
mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
dounmount(mp, MNT_FORCE, 0, ctx);
goto fail;
}
-
- /* VFS_ROOT provides reference so flags = 0 */
error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
+ /*
+ * get rid of reference provided by VFS_ROOT
+ */
+ vnode_put(vp);
+
if (error) {
printf("%s() vnode_label() returned %d\n",
__func__, error);
/*
* Lookup a mount point by filesystem identifier.
*/
-extern mount_t vfs_getvfs_locked(fsid_t *);
struct mount *
vfs_getvfs(fsid_t *fsid)
return (mount_list_lookupby_fsid(fsid, 0, 0));
}
-struct mount *
+static struct mount *
vfs_getvfs_locked(fsid_t *fsid)
{
return(mount_list_lookupby_fsid(fsid, 1, 0));
panic("insmntque: vp not in mount vnode list");
vp->v_lflag &= ~VNAMED_MOUNT;
- mount_lock(lmp);
+ mount_lock_spin(lmp);
mount_drop(lmp, 1);
* Insert into list of vnodes for the new mount point, if available.
*/
if ((vp->v_mount = mp) != NULL) {
- mount_lock(mp);
+ mount_lock_spin(mp);
if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0))
panic("vp already in mount list");
if (mp->mnt_lflag & MNT_LITER)
TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
if (vp->v_lflag & VNAMED_MOUNT)
panic("insmntque: vp already in mount vnode list");
- if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
- panic("insmntque: vp on the free list\n");
vp->v_lflag |= VNAMED_MOUNT;
mount_ref(mp, 1);
mount_unlock(mp);
{
struct vnode *vp;
struct vnode **vpp;
+ struct specinfo *sin = NULL;
int vid = 0;
vpp = &speclisth[SPECHASH(nvp_rdev)];
SPECHASH_UNLOCK();
if (vp) {
+found_alias:
if (vnode_getwithvid(vp,vid)) {
goto loop;
}
* Alias, but not in use, so flush it out.
*/
if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
+ vnode_put_locked(vp);
vnode_unlock(vp);
- vnode_put(vp);
goto loop;
}
+
}
if (vp == NULL || vp->v_tag != VT_NON) {
-retnullvp:
- MALLOC_ZONE(nvp->v_specinfo, struct specinfo *, sizeof(struct specinfo),
- M_SPECINFO, M_WAITOK);
+ if (sin == NULL) {
+ MALLOC_ZONE(sin, struct specinfo *, sizeof(struct specinfo),
+ M_SPECINFO, M_WAITOK);
+ }
+
+ nvp->v_specinfo = sin;
bzero(nvp->v_specinfo, sizeof(struct specinfo));
nvp->v_rdev = nvp_rdev;
nvp->v_specflags = 0;
nvp->v_speclastr = -1;
SPECHASH_LOCK();
+
+ /* We dropped the lock, someone could have added */
+ if (vp == NULLVP) {
+ for (vp = *vpp; vp; vp = vp->v_specnext) {
+ if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
+ vid = vp->v_id;
+ SPECHASH_UNLOCK();
+ goto found_alias;
+ }
+ }
+ }
+
nvp->v_hashchain = vpp;
nvp->v_specnext = *vpp;
*vpp = nvp;
- SPECHASH_UNLOCK();
if (vp != NULLVP) {
- nvp->v_flag |= VALIASED;
- vp->v_flag |= VALIASED;
+ nvp->v_specflags |= SI_ALIASED;
+ vp->v_specflags |= SI_ALIASED;
+ SPECHASH_UNLOCK();
+ vnode_put_locked(vp);
vnode_unlock(vp);
- vnode_put(vp);
+ } else {
+ SPECHASH_UNLOCK();
}
+
return (NULLVP);
}
+
+ if (sin) {
+ FREE_ZONE(sin, sizeof(struct specinfo), M_SPECINFO);
+ }
+
if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0)
return(vp);
- else {
- panic("checkalias with VT_NON vp that shouldn't: %x", (unsigned int)vp);
- goto retnullvp;
- }
+
+ panic("checkalias with VT_NON vp that shouldn't: %p", vp);
+
return (vp);
}
* and an error returned to indicate that the vnode is no longer
* usable (possibly having been changed to a new file system type).
*/
-static int
+int
vget_internal(vnode_t vp, int vid, int vflags)
{
int error = 0;
static void
vnode_list_add(vnode_t vp)
{
+#if DIAGNOSTIC
+ lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
+#endif
/*
* if it is already on a list or non zero references return
*/
- if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0))
+ if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE))
return;
+
vnode_list_lock();
if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
vnode_list_unlock();
}
+
+/*
+ * remove the vnode from appropriate free list.
+ * called with vnode LOCKED and
+ * the list lock held
+ */
+static void
+vnode_list_remove_locked(vnode_t vp)
+{
+ if (VONLIST(vp)) {
+ /*
+ * the v_listflag field is
+ * protected by the vnode_list_lock
+ */
+ if (vp->v_listflag & VLIST_RAGE)
+ VREMRAGE("vnode_list_remove", vp);
+ else if (vp->v_listflag & VLIST_DEAD)
+ VREMDEAD("vnode_list_remove", vp);
+ else
+ VREMFREE("vnode_list_remove", vp);
+ }
+}
+
+
/*
* remove the vnode from appropriate free list.
+ * called with vnode LOCKED
*/
static void
vnode_list_remove(vnode_t vp)
{
+#if DIAGNOSTIC
+ lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
+#endif
/*
* we want to avoid taking the list lock
* in the case where we're not on the free
/*
* however, we're not guaranteed that
* we won't go from the on-list state
- * to the non-on-list state until we
+ * to the not-on-list state until we
* hold the vnode_list_lock... this
- * is due to new_vnode removing vnodes
+ * is due to "new_vnode" removing vnodes
* from the free list uder the list_lock
* w/o the vnode lock... so we need to
* check again whether we're currently
* on the free list
*/
- if (VONLIST(vp)) {
- if (vp->v_listflag & VLIST_RAGE)
- VREMRAGE("vnode_list_remove", vp);
- else if (vp->v_listflag & VLIST_DEAD)
- VREMDEAD("vnode_list_remove", vp);
- else
- VREMFREE("vnode_list_remove", vp);
+ vnode_list_remove_locked(vp);
- VLISTNONE(vp);
- }
vnode_list_unlock();
}
}
{
if ( !locked)
vnode_lock_spin(vp);
-
+#if DIAGNOSTIC
+ else
+ lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
+#endif
if (--vp->v_usecount < 0)
- panic("vnode_rele_ext: vp %p usecount -ve : %d", vp, vp->v_usecount);
+ panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
if (fmode & FWRITE) {
if (--vp->v_writecount < 0)
- panic("vnode_rele_ext: vp %p writecount -ve : %ld", vp, vp->v_writecount);
+ panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
}
if (fmode & O_EVTONLY) {
if (--vp->v_kusecount < 0)
- panic("vnode_rele_ext: vp %p kusecount -ve : %d", vp, vp->v_kusecount);
+ panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
}
if (vp->v_kusecount > vp->v_usecount)
- panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d)\n",vp, vp->v_kusecount, vp->v_usecount);
+ panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
+
if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
/*
* vnode is still busy... if we're the last
goto defer_reclaim;
}
vnode_lock_convert(vp);
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
}
vnode_dropiocount(vp);
vnode_list_add(vp);
int busy = 0;
int reclaimed = 0;
int retval;
- int vid;
+ unsigned int vid;
mount_lock(mp);
vnode_iterate_setup(mp);
return(retval);
}
- /* iterate over all the vnodes */
- while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
- vp = TAILQ_FIRST(&mp->mnt_workerqueue);
- TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
- TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
- if ( (vp->v_mount != mp) || (vp == skipvp)) {
- continue;
- }
- vid = vp->v_id;
- mount_unlock(mp);
- vnode_lock(vp);
+ /* iterate over all the vnodes */
+ while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
+
+ vp = TAILQ_FIRST(&mp->mnt_workerqueue);
+ TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
+ TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
+
+ if ( (vp->v_mount != mp) || (vp == skipvp)) {
+ continue;
+ }
+ vid = vp->v_id;
+ mount_unlock(mp);
+
+ vnode_lock_spin(vp);
if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) {
vnode_unlock(vp);
continue;
}
/*
- * If requested, skip over vnodes marked VSWAP.
+ * If requested, skip over vnodes marked VROOT.
*/
if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
vnode_unlock(vp);
*/
if (((vp->v_usecount == 0) ||
((vp->v_usecount - vp->v_kusecount) == 0))) {
+
+ vnode_lock_convert(vp);
vp->v_iocount++; /* so that drain waits for * other iocounts */
#ifdef JOE_DEBUG
record_vp(vp, 1);
#endif
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
vnode_dropiocount(vp);
vnode_list_add(vp);
-
vnode_unlock(vp);
+
reclaimed++;
mount_lock(mp);
continue;
* anonymous device. For all other files, just kill them.
*/
if (flags & FORCECLOSE) {
+ vnode_lock_convert(vp);
+
if (vp->v_type != VBLK && vp->v_type != VCHR) {
vp->v_iocount++; /* so that drain waits * for other iocounts */
#ifdef JOE_DEBUG
record_vp(vp, 1);
#endif
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
vnode_dropiocount(vp);
vnode_list_add(vp);
vnode_unlock(vp);
return (0);
}
-long num_recycledvnodes = 0; /* long for OSAddAtomic */
+long num_recycledvnodes = 0;
/*
* Disassociate the underlying file system from a vnode.
* The vnode lock is held on entry.
int need_inactive;
int already_terminating;
int clflags = 0;
+#if NAMEDSTREAMS
+ int is_namedstream;
+#endif
/*
* Check to see if the vnode is in use.
*/
insmntque(vp, (struct mount *)0);
+#if NAMEDSTREAMS
+ is_namedstream = vnode_isnamedstream(vp);
+#endif
+
vnode_unlock(vp);
- OSAddAtomic(1, &num_recycledvnodes);
- /*
- * purge from the name cache as early as possible...
- */
- cache_purge(vp);
+ OSAddAtomicLong(1, &num_recycledvnodes);
if (flags & DOCLOSE)
clflags |= IO_NDELAY;
if (active || need_inactive)
VNOP_INACTIVE(vp, ctx);
+#if NAMEDSTREAMS
+ if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
+ vnode_t pvp = vp->v_parent;
+
+ /* Delete the shadow stream file before we reclaim its vnode */
+ if (vnode_isshadow(vp)) {
+ vnode_relenamedstream(pvp, vp, ctx);
+ }
+
+ /*
+ * Because vclean calls VNOP_INACTIVE prior to calling vnode_relenamedstream, we may not have
+ * torn down and/or deleted the shadow file yet. On HFS, if the shadow file is sufficiently large
+ * and occupies a large number of extents, the deletion will be deferred until VNOP_INACTIVE
+ * and the file treated like an open-unlinked. To rectify this, call VNOP_INACTIVE again
+ * explicitly to force its removal.
+ */
+ if (vnode_isshadow(vp)) {
+ VNOP_INACTIVE(vp, ctx);
+ }
+
+ /*
+ * No more streams associated with the parent. We
+ * have a ref on it, so its identity is stable.
+ * If the parent is on an opaque volume, then we need to know
+ * whether it has associated named streams.
+ */
+ if (vfs_authopaque(pvp->v_mount)) {
+ vnode_lock_spin(pvp);
+ pvp->v_lflag &= ~VL_HASSTREAMS;
+ vnode_unlock(pvp);
+ }
+ }
+#endif
+
/*
* Destroy ubc named reference
* cluster_release is done on this path
panic("vclean: cannot reclaim");
// make sure the name & parent ptrs get cleaned out!
- vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME);
+ vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE);
vnode_lock(vp);
panic("vnop_revoke");
#endif
- if (vp->v_flag & VALIASED) {
+ if (vnode_isaliased(vp)) {
/*
* If a vgone (or vclean) is already in progress,
- * wait until it is done and return.
+ * return an immediate error
*/
- vnode_lock(vp);
- if (vp->v_lflag & VL_TERMINATE) {
- vnode_unlock(vp);
+ if (vp->v_lflag & VL_TERMINATE)
return(ENOENT);
- }
- vnode_unlock(vp);
+
/*
* Ensure that vp will not be vgone'd while we
* are eliminating its aliases.
*/
SPECHASH_LOCK();
- while (vp->v_flag & VALIASED) {
+ while ((vp->v_specflags & SI_ALIASED)) {
for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
if (vq->v_rdev != vp->v_rdev ||
vq->v_type != vp->v_type || vp == vq)
SPECHASH_LOCK();
break;
}
- vnode_reclaim_internal(vq, 0, 0, 0);
+ vnode_reclaim_internal(vq, 0, 1, 0);
vnode_put(vq);
SPECHASH_LOCK();
break;
int
vnode_recycle(struct vnode *vp)
{
- vnode_lock(vp);
+ vnode_lock_spin(vp);
if (vp->v_iocount || vp->v_usecount) {
vp->v_lflag |= VL_MARKTERM;
vnode_unlock(vp);
return(0);
}
+ vnode_lock_convert(vp);
vnode_reclaim_internal(vp, 1, 0, 0);
+
vnode_unlock(vp);
return (1);
if (vq == NULL)
panic("missing bdev");
}
- if (vp->v_flag & VALIASED) {
+ if (vp->v_specflags & SI_ALIASED) {
vx = NULL;
for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
if (vq->v_rdev != vp->v_rdev ||
if (vx == NULL)
panic("missing alias");
if (vq == NULL)
- vx->v_flag &= ~VALIASED;
- vp->v_flag &= ~VALIASED;
+ vx->v_specflags &= ~SI_ALIASED;
+ vp->v_specflags &= ~SI_ALIASED;
}
SPECHASH_UNLOCK();
{
int vid;
loop:
- if ((vp->v_flag & VALIASED) == 0)
+ if (!vnode_isaliased(vp))
return (vp->v_usecount - vp->v_kusecount);
count = 0;
/*
* Alias, but not in use, so flush it out.
*/
- vnode_reclaim_internal(vq, 1, 0, 0);
+ vnode_reclaim_internal(vq, 1, 1, 0);
+ vnode_put_locked(vq);
vnode_unlock(vq);
- vnode_put(vq);
goto loop;
}
count += (vq->v_usecount - vq->v_kusecount);
/*
* Print out a description of a vnode.
*/
-#if !CONFIG_NO_PRINTF_STRINGS
static const char *typename[] =
{ "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
-#endif
void
vprint(const char *label, struct vnode *vp)
if (label != NULL)
printf("%s: ", label);
- printf("type %s, usecount %d, writecount %ld",
+ printf("type %s, usecount %d, writecount %d",
typename[vp->v_type], vp->v_usecount, vp->v_writecount);
sbuf[0] = '\0';
if (vp->v_flag & VROOT)
strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
if (vp->v_flag & VBWAIT)
strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
- if (vp->v_flag & VALIASED)
+ if (vnode_isaliased(vp))
strlcat(sbuf, "|VALIASED", sizeof(sbuf));
if (sbuf[0] != '\0')
printf(" flags (%s)", &sbuf[1]);
return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
}
+int
+vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
+{
+ return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
+}
int
vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
// them (i.e. a short 8 character name can't have an 8
// character extension).
//
+extern lck_mtx_t *pkg_extensions_lck;
+
__private_extern__ int
-set_package_extensions_table(void *data, int nentries, int maxwidth)
+set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
{
- char *new_exts;
+ char *new_exts, *old_exts;
int error;
if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
return EINVAL;
}
- MALLOC(new_exts, char *, nentries * maxwidth, M_TEMP, M_WAITOK);
+
+ // allocate one byte extra so we can guarantee null termination
+ MALLOC(new_exts, char *, (nentries * maxwidth) + 1, M_TEMP, M_WAITOK);
+ if (new_exts == NULL) {
+ return ENOMEM;
+ }
- error = copyin(CAST_USER_ADDR_T(data), new_exts, nentries * maxwidth);
+ error = copyin(data, new_exts, nentries * maxwidth);
if (error) {
FREE(new_exts, M_TEMP);
return error;
}
- if (extension_table) {
- FREE(extension_table, M_TEMP);
- }
+ new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
+
+ qsort(new_exts, nentries, maxwidth, extension_cmp);
+
+ lck_mtx_lock(pkg_extensions_lck);
+
+ old_exts = extension_table;
extension_table = new_exts;
nexts = nentries;
max_ext_width = maxwidth;
- qsort(extension_table, nexts, maxwidth, extension_cmp);
+ lck_mtx_unlock(pkg_extensions_lck);
+
+ if (old_exts) {
+ FREE(old_exts, M_TEMP);
+ }
return 0;
}
// advance over the "."
name_ext++;
+ lck_mtx_lock(pkg_extensions_lck);
+
// now iterate over all the extensions to see if any match
ptr = &extension_table[0];
for(i=0; i < nexts; i++, ptr+=max_ext_width) {
extlen = strlen(ptr);
if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
// aha, a match!
+ lck_mtx_unlock(pkg_extensions_lck);
return 1;
}
}
+ lck_mtx_unlock(pkg_extensions_lck);
+
// if we get here, no extension matched
return 0;
}
return 0;
}
+/*
+ * Determine if a name is inappropriate for a searchfs query.
+ * This list consists of /System currently.
+ */
+
+int vn_searchfs_inappropriate_name(const char *name, int len) {
+ const char *bad_names[] = { "System" };
+ int bad_len[] = { 6 };
+ int i;
+
+ for(i=0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
+ if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
+ return 1;
+ }
+ }
+
+ // if we get here, no name matched
+ return 0;
+}
/*
* Top level filesystem related information gathering.
int *username;
u_int usernamelen;
int error;
- struct vfsconf *vfsc;
+ struct vfsconf vfsc;
/* All non VFS_GENERIC and in VFS_GENERIC,
* VFS_MAXTYPENUM, VFS_CONF, VFS_SET_PACKAGE_EXTS
if (namelen < 2)
return (EISDIR); /* overloaded */
if (name[0] != VFS_GENERIC) {
- for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
- if (vfsp->vfc_typenum == name[0])
+
+ mount_list_lock();
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+ if (vfsp->vfc_typenum == name[0]) {
+ vfsp->vfc_refcount++;
break;
+ }
+ mount_list_unlock();
+
if (vfsp == NULL)
return (ENOTSUP);
/* XXX current context proxy for proc p? */
- return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
+ error = ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
oldp, oldlenp, newp, newlen,
vfs_context_current()));
+
+ mount_list_lock();
+ vfsp->vfc_refcount--;
+ mount_list_unlock();
+ return error;
}
switch (name[1]) {
case VFS_MAXTYPENUM:
case VFS_CONF:
if (namelen < 3)
return (ENOTDIR); /* overloaded */
+
+ mount_list_lock();
for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
if (vfsp->vfc_typenum == name[2])
break;
- if (vfsp == NULL)
+
+ if (vfsp == NULL) {
+ mount_list_unlock();
return (ENOTSUP);
- vfsc = (struct vfsconf *)vfsp;
- if (proc_is64bit(p)) {
- struct user_vfsconf usr_vfsc;
- usr_vfsc.vfc_vfsops = CAST_USER_ADDR_T(vfsc->vfc_vfsops);
- bcopy(vfsc->vfc_name, usr_vfsc.vfc_name, sizeof(usr_vfsc.vfc_name));
- usr_vfsc.vfc_typenum = vfsc->vfc_typenum;
- usr_vfsc.vfc_refcount = vfsc->vfc_refcount;
- usr_vfsc.vfc_flags = vfsc->vfc_flags;
- usr_vfsc.vfc_mountroot = CAST_USER_ADDR_T(vfsc->vfc_mountroot);
- usr_vfsc.vfc_next = CAST_USER_ADDR_T(vfsc->vfc_next);
- return (sysctl_rdstruct(oldp, oldlenp, newp, &usr_vfsc,
- sizeof(usr_vfsc)));
- }
- else {
- return (sysctl_rdstruct(oldp, oldlenp, newp, vfsc,
- sizeof(struct vfsconf)));
}
+
+ vfsc.vfc_reserved1 = 0;
+ bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
+ vfsc.vfc_typenum = vfsp->vfc_typenum;
+ vfsc.vfc_refcount = vfsp->vfc_refcount;
+ vfsc.vfc_flags = vfsp->vfc_flags;
+ vfsc.vfc_reserved2 = 0;
+ vfsc.vfc_reserved3 = 0;
+
+ mount_list_unlock();
+ return (sysctl_rdstruct(oldp, oldlenp, newp, &vfsc,
+ sizeof(struct vfsconf)));
case VFS_SET_PACKAGE_EXTS:
- return set_package_extensions_table((void *)name[1], name[2], name[3]);
+ return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]);
}
/*
* We need to get back into the general MIB, so we need to re-prepend
bcopy(name, username + 1, namelen * sizeof(*name));
username[0] = CTL_VFS;
error = userland_sysctl(p, username, usernamelen, oldp,
- oldlenp, 1, newp, newlen, oldlenp);
+ oldlenp, newp, newlen, oldlenp);
FREE(username, M_TEMP);
return (error);
}
error = EBUSY;
goto out;
}
- if (vp->v_flag & VALIASED) {
+ if (vp->v_specflags & SI_ALIASED) {
for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
if (vq->v_rdev != vp->v_rdev ||
vq->v_type != vp->v_type)
continue;
} else if (error == EBUSY) {
/* If EBUSY is returned, the unmount was already in progress */
- printf("unmount of %x failed (", (unsigned int)mp);
+ printf("unmount of %p failed (", mp);
printf("BUSY)\n");
}
mount_list_lock();
{
struct ubc_info *uip;
- vnode_lock(vp);
+ vnode_lock_spin(vp);
vp->v_lflag &= ~VNAMED_UBC;
uip = vp->v_ubcinfo;
vp->v_ubcinfo = UBC_INFO_NULL;
- ubc_info_deallocate(uip);
-
vnode_unlock(vp);
+
+ ubc_info_deallocate(uip);
}
vfs_init_io_attributes(vnode_t devvp, mount_t mp)
{
int error;
- off_t readblockcnt;
- off_t writeblockcnt;
- off_t readmaxcnt;
- off_t writemaxcnt;
- off_t readsegcnt;
- off_t writesegcnt;
- off_t readsegsize;
- off_t writesegsize;
- off_t alignment;
- u_long blksize;
+ off_t readblockcnt = 0;
+ off_t writeblockcnt = 0;
+ off_t readmaxcnt = 0;
+ off_t writemaxcnt = 0;
+ off_t readsegcnt = 0;
+ off_t writesegcnt = 0;
+ off_t readsegsize = 0;
+ off_t writesegsize = 0;
+ off_t alignment = 0;
+ off_t ioqueue_depth = 0;
+ u_int32_t blksize;
u_int64_t temp;
u_int32_t features;
vfs_context_t ctx = vfs_context_current();
-
+ int isssd = 0;
int isvirtual = 0;
/*
* determine if this mount point exists on the same device as the root
mp->mnt_devblocksize = blksize;
+ /*
+ * set the maximum possible I/O size
+ * this may get clipped to a smaller value
+ * based on which constraints are being advertised
+ * and if those advertised constraints result in a smaller
+ * limit for a given I/O
+ */
+ mp->mnt_maxreadcnt = MAX_UPL_SIZE * PAGE_SIZE;
+ mp->mnt_maxwritecnt = MAX_UPL_SIZE * PAGE_SIZE;
+
if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
if (isvirtual)
mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
}
+ if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
+ if (isssd)
+ mp->mnt_kern_flag |= MNTK_SSD;
+ }
if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
(caddr_t)&features, 0, ctx)))
(caddr_t)&alignment, 0, ctx)))
return (error);
+ if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
+ (caddr_t)&ioqueue_depth, 0, ctx)))
+ return (error);
+
if (readmaxcnt)
- temp = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt;
- else {
- if (readblockcnt) {
- temp = readblockcnt * blksize;
- temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
- } else
- temp = MAXPHYS;
+ mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt;
+
+ if (readblockcnt) {
+ temp = readblockcnt * blksize;
+ temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
+
+ if (temp < mp->mnt_maxreadcnt)
+ mp->mnt_maxreadcnt = (u_int32_t)temp;
}
- mp->mnt_maxreadcnt = (u_int32_t)temp;
if (writemaxcnt)
- temp = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt;
- else {
- if (writeblockcnt) {
- temp = writeblockcnt * blksize;
- temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
- } else
- temp = MAXPHYS;
+ mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt;
+
+ if (writeblockcnt) {
+ temp = writeblockcnt * blksize;
+ temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
+
+ if (temp < mp->mnt_maxwritecnt)
+ mp->mnt_maxwritecnt = (u_int32_t)temp;
}
- mp->mnt_maxwritecnt = (u_int32_t)temp;
if (readsegcnt) {
temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
- mp->mnt_segreadcnt = (u_int16_t)temp;
+ } else {
+ temp = mp->mnt_maxreadcnt / PAGE_SIZE;
+
+ if (temp > UINT16_MAX)
+ temp = UINT16_MAX;
}
+ mp->mnt_segreadcnt = (u_int16_t)temp;
+
if (writesegcnt) {
temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
- mp->mnt_segwritecnt = (u_int16_t)temp;
+ } else {
+ temp = mp->mnt_maxwritecnt / PAGE_SIZE;
+
+ if (temp > UINT16_MAX)
+ temp = UINT16_MAX;
}
+ mp->mnt_segwritecnt = (u_int16_t)temp;
+
if (readsegsize)
temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
else
temp = 0;
mp->mnt_alignmentmask = temp;
+
+ if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH)
+ temp = ioqueue_depth;
+ else
+ temp = MNT_DEFAULT_IOQUEUE_DEPTH;
+
+ mp->mnt_ioqueue_depth = temp;
+ mp->mnt_ioscale = (mp->mnt_ioqueue_depth + (MNT_DEFAULT_IOQUEUE_DEPTH - 1)) / MNT_DEFAULT_IOQUEUE_DEPTH;
+
+ if (mp->mnt_ioscale > 1)
+ printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
+
if (features & DK_FEATURE_FORCE_UNIT_ACCESS)
mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
void
vfs_event_init(void)
{
+
klist_init(&fs_klist);
fs_klist_lck_grp = lck_grp_alloc_init("fs_klist", NULL);
fs_klist_lock = lck_mtx_alloc_init(fs_klist_lck_grp, NULL);
return (ENOMEM);
MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK);
+ if (fsidlst == NULL) {
+ return (ENOMEM);
+ }
+
error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
&actual);
/*
sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
struct sysctl_req *req)
{
- struct vfsidctl vc;
- struct user_vfsidctl user_vc;
+ union union_vfsidctl vc;
struct mount *mp;
struct vfsstatfs *sp;
int *name, flags, namelen;
namelen = arg2;
is_64_bit = proc_is64bit(p);
- if (is_64_bit) {
- error = SYSCTL_IN(req, &user_vc, sizeof(user_vc));
- if (error)
- goto out;
- if (user_vc.vc_vers != VFS_CTL_VERS1) {
- error = EINVAL;
- goto out;
- }
- mp = mount_list_lookupby_fsid(&user_vc.vc_fsid, 0, 1);
- }
- else {
- error = SYSCTL_IN(req, &vc, sizeof(vc));
- if (error)
- goto out;
- if (vc.vc_vers != VFS_CTL_VERS1) {
- error = EINVAL;
- goto out;
- }
- mp = mount_list_lookupby_fsid(&vc.vc_fsid, 0, 1);
+ error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
+ if (error)
+ goto out;
+ if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
+ error = EINVAL;
+ goto out;
}
+ mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
if (mp == NULL) {
error = ENOENT;
goto out;
case VFS_CTL_UMOUNT:
req->newidx = 0;
if (is_64_bit) {
- req->newptr = user_vc.vc_ptr;
- req->newlen = (size_t)user_vc.vc_len;
+ req->newptr = vc.vc64.vc_ptr;
+ req->newlen = (size_t)vc.vc64.vc_len;
}
else {
- req->newptr = CAST_USER_ADDR_T(vc.vc_ptr);
- req->newlen = vc.vc_len;
+ req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
+ req->newlen = vc.vc32.vc_len;
}
error = SYSCTL_IN(req, &flags, sizeof(flags));
if (error)
case VFS_CTL_STATFS:
req->newidx = 0;
if (is_64_bit) {
- req->newptr = user_vc.vc_ptr;
- req->newlen = (size_t)user_vc.vc_len;
+ req->newptr = vc.vc64.vc_ptr;
+ req->newlen = (size_t)vc.vc64.vc_len;
}
else {
- req->newptr = CAST_USER_ADDR_T(vc.vc_ptr);
- req->newlen = vc.vc_len;
+ req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
+ req->newlen = vc.vc32.vc_len;
}
error = SYSCTL_IN(req, &flags, sizeof(flags));
if (error)
break;
sp = &mp->mnt_vfsstat;
- if (((flags & MNT_NOWAIT) == 0 || (flags & MNT_WAIT)) &&
+ if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
(error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT)))
goto out;
if (is_64_bit) {
- struct user_statfs sfs;
+ struct user64_statfs sfs;
bzero(&sfs, sizeof(sfs));
sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
sfs.f_type = mp->mnt_vtable->vfc_typenum;
- sfs.f_bsize = (user_long_t)sp->f_bsize;
- sfs.f_iosize = (user_long_t)sp->f_iosize;
- sfs.f_blocks = (user_long_t)sp->f_blocks;
- sfs.f_bfree = (user_long_t)sp->f_bfree;
- sfs.f_bavail = (user_long_t)sp->f_bavail;
- sfs.f_files = (user_long_t)sp->f_files;
- sfs.f_ffree = (user_long_t)sp->f_ffree;
+ sfs.f_bsize = (user64_long_t)sp->f_bsize;
+ sfs.f_iosize = (user64_long_t)sp->f_iosize;
+ sfs.f_blocks = (user64_long_t)sp->f_blocks;
+ sfs.f_bfree = (user64_long_t)sp->f_bfree;
+ sfs.f_bavail = (user64_long_t)sp->f_bavail;
+ sfs.f_files = (user64_long_t)sp->f_files;
+ sfs.f_ffree = (user64_long_t)sp->f_ffree;
sfs.f_fsid = sp->f_fsid;
sfs.f_owner = sp->f_owner;
error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
}
else {
- struct statfs sfs;
- bzero(&sfs, sizeof(struct statfs));
+ struct user32_statfs sfs;
+ bzero(&sfs, sizeof(sfs));
sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
sfs.f_type = mp->mnt_vtable->vfc_typenum;
* have to fudge the numbers here in that case. We inflate the blocksize in order
* to reflect the filesystem size as best we can.
*/
- if (sp->f_blocks > LONG_MAX) {
+ if (sp->f_blocks > INT_MAX) {
int shift;
/*
* being smaller than f_bsize.
*/
for (shift = 0; shift < 32; shift++) {
- if ((sp->f_blocks >> shift) <= LONG_MAX)
+ if ((sp->f_blocks >> shift) <= INT_MAX)
break;
- if ((sp->f_bsize << (shift + 1)) > LONG_MAX)
+ if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX)
break;
}
-#define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > LONG_MAX) ? LONG_MAX : ((x) >> (s)))
- sfs.f_blocks = (long)__SHIFT_OR_CLIP(sp->f_blocks, shift);
- sfs.f_bfree = (long)__SHIFT_OR_CLIP(sp->f_bfree, shift);
- sfs.f_bavail = (long)__SHIFT_OR_CLIP(sp->f_bavail, shift);
+#define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
+ sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
+ sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
+ sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
#undef __SHIFT_OR_CLIP
- sfs.f_bsize = (long)(sp->f_bsize << shift);
+ sfs.f_bsize = (user32_long_t)(sp->f_bsize << shift);
sfs.f_iosize = lmax(sp->f_iosize, sp->f_bsize);
} else {
- sfs.f_bsize = (long)sp->f_bsize;
- sfs.f_iosize = (long)sp->f_iosize;
- sfs.f_blocks = (long)sp->f_blocks;
- sfs.f_bfree = (long)sp->f_bfree;
- sfs.f_bavail = (long)sp->f_bavail;
+ sfs.f_bsize = (user32_long_t)sp->f_bsize;
+ sfs.f_iosize = (user32_long_t)sp->f_iosize;
+ sfs.f_blocks = (user32_long_t)sp->f_blocks;
+ sfs.f_bfree = (user32_long_t)sp->f_bfree;
+ sfs.f_bavail = (user32_long_t)sp->f_bavail;
}
- sfs.f_files = (long)sp->f_files;
- sfs.f_ffree = (long)sp->f_ffree;
+ sfs.f_files = (user32_long_t)sp->f_files;
+ sfs.f_ffree = (user32_long_t)sp->f_ffree;
sfs.f_fsid = sp->f_fsid;
sfs.f_owner = sp->f_owner;
static int filt_fsattach(struct knote *kn);
static void filt_fsdetach(struct knote *kn);
static int filt_fsevent(struct knote *kn, long hint);
-
-struct filterops fs_filtops =
- { 0, filt_fsattach, filt_fsdetach, filt_fsevent };
+struct filterops fs_filtops = {
+ .f_attach = filt_fsattach,
+ .f_detach = filt_fsdetach,
+ .f_event = filt_fsevent,
+};
static int
filt_fsattach(struct knote *kn)
}
if (pid < 0)
- OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), (UInt32 *)&p->p_flag);
+ OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
else
- OSBitOrAtomic(P_NOREMOTEHANG, (UInt32 *)&p->p_flag);
+ OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
proc_rele(p);
return (0);
NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
-long num_reusedvnodes = 0; /* long for OSAddAtomic */
+long num_reusedvnodes = 0;
static int
new_vnode(vnode_t *vpp)
vnode_t vp;
int retries = 0; /* retry incase of tablefull */
int force_alloc = 0, walk_count = 0;
- int vpid;
+ unsigned int vpid;
struct timespec ts;
struct timeval current_tv;
+#ifndef __LP64__
struct unsafe_fsnode *l_unsafefs = 0;
+#endif /* __LP64__ */
proc_t curproc = current_proc();
- pid_t current_pid = proc_pid(curproc);
retry:
microuptime(¤t_tv);
if (numvnodes < desiredvnodes || force_alloc) {
numvnodes++;
vnode_list_unlock();
+
MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK);
bzero((char *)vp, sizeof(*vp));
VLISTNONE(vp); /* avoid double queue removal */
lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr);
+ klist_init(&vp->v_knotes);
nanouptime(&ts);
vp->v_id = ts.tv_nsec;
vp->v_flag = VSTANDARD;
#if CONFIG_MACF
- mac_vnode_label_init(vp);
+ if (mac_vnode_label_init_needed(vp))
+ mac_vnode_label_init(vp);
#endif /* MAC */
+ vp->v_iocount = 1;
goto done;
}
(current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
- if ( !(vp->v_listflag & VLIST_RAGE) || !(vp->v_flag & VRAGE))
- panic("new_vnode: vp on RAGE list not marked both VLIST_RAGE and VRAGE");
+ if ( !(vp->v_listflag & VLIST_RAGE))
+ panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
- // skip vnodes which have a dependency on this process
- // (i.e. they're vnodes in a disk image and this process
- // is diskimages-helper)
+ // if we're a dependency-capable process, skip vnodes that can
+ // cause recycling deadlocks. (i.e. this process is diskimages
+ // helper and the vnode is in a disk image).
//
- if (vp->v_mount && vp->v_mount->mnt_dependent_pid != current_pid && vp->v_mount->mnt_dependent_process != curproc) {
+ if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
break;
}
*/
walk_count = 0;
TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
- // skip vnodes which have a dependency on this process
- // (i.e. they're vnodes in a disk image and this process
- // is diskimages-helper)
+ // if we're a dependency-capable process, skip vnodes that can
+ // cause recycling deadlocks. (i.e. this process is diskimages
+ // helper and the vnode is in a disk image)
//
- if (vp->v_mount && vp->v_mount->mnt_dependent_pid != current_pid && vp->v_mount->mnt_dependent_process != curproc) {
+ if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
break;
}
}
if (vp == NULL) {
- /*
+ /*
* we've reached the system imposed maximum number of vnodes
* but there isn't a single one available
* wait a bit and then retry... if we can't get a vnode
log(LOG_EMERG, "%d desired, %d numvnodes, "
"%d free, %d dead, %d rage\n",
desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes);
+#if CONFIG_EMBEDDED
+ /*
+ * Running out of vnodes tends to make a system unusable. Start killing
+ * processes that jetsam knows are killable.
+ */
+ if (jetsam_kill_top_proc() < 0) {
+ /*
+ * If jetsam can't find any more processes to kill and there
+ * still aren't any free vnodes, panic. Hopefully we'll get a
+ * panic log to tell us why we ran out.
+ */
+ panic("vnode table is full\n");
+ }
+
+ delay_for_interval(1, 1000 * 1000);
+ goto retry;
+#endif
+
*vpp = NULL;
return (ENFILE);
}
steal_this_vp:
vpid = vp->v_id;
- /*
- * the v_listflag field is
- * protected by the vnode_list_lock
- */
- if (vp->v_listflag & VLIST_DEAD)
- VREMDEAD("new_vnode", vp);
- else if (vp->v_listflag & VLIST_RAGE)
- VREMRAGE("new_vnode", vp);
- else
- VREMFREE("new_vnode", vp);
- VLISTNONE(vp);
+ vnode_list_remove_locked(vp);
vnode_list_unlock();
+
vnode_lock_spin(vp);
/*
vnode_unlock(vp);
goto retry;
}
- OSAddAtomic(1, &num_reusedvnodes);
+ OSAddAtomicLong(1, &num_reusedvnodes);
/* Checks for anyone racing us for recycle */
if (vp->v_type != VBAD) {
if (vp->v_lflag & VL_DEAD)
- panic("new_vnode: the vnode is VL_DEAD but not VBAD");
+ panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
vnode_lock_convert(vp);
-
(void)vnode_reclaim_internal(vp, 1, 1, 0);
if ((VONLIST(vp)))
- panic("new_vnode: vp on list ");
+ panic("new_vnode(%p): vp on list", vp);
if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
(vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH)))
- panic("new_vnode: free vnode still referenced\n");
+ panic("new_vnode(%p): free vnode still referenced", vp);
if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0))
- panic("new_vnode: vnode seems to be on mount list ");
+ panic("new_vnode(%p): vnode seems to be on mount list", vp);
if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren))
- panic("new_vnode: vnode still hooked into the name cache");
+ panic("new_vnode(%p): vnode still hooked into the name cache", vp);
}
+
+#ifndef __LP64__
if (vp->v_unsafefs) {
l_unsafefs = vp->v_unsafefs;
vp->v_unsafefs = (struct unsafe_fsnode *)NULL;
}
+#endif /* __LP64__ */
#if CONFIG_MACF
/*
if (vp->v_lflag & VL_LABELED) {
vnode_lock_convert(vp);
mac_vnode_label_recycle(vp);
+ } else if (mac_vnode_label_init_needed(vp)) {
+ vnode_lock_convert(vp);
+ mac_vnode_label_init(vp);
}
+
#endif /* MAC */
+ vp->v_iocount = 1;
vp->v_lflag = 0;
vp->v_writecount = 0;
vp->v_references = 0;
vnode_unlock(vp);
+#ifndef __LP64__
if (l_unsafefs) {
lck_mtx_destroy(&l_unsafefs->fsnodelock, vnode_lck_grp);
FREE_ZONE((void *)l_unsafefs, sizeof(struct unsafe_fsnode), M_UNSAFEFS);
}
+#endif /* __LP64__ */
+
done:
*vpp = vp;
int
vnode_get_locked(struct vnode *vp)
{
-
+#if DIAGNOSTIC
+ lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
+#endif
if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
return(ENOENT);
}
}
int
-vnode_getwithvid(vnode_t vp, int vid)
+vnode_getwithvid(vnode_t vp, uint32_t vid)
{
return(vget_internal(vp, vid, ( VNODE_NODEAD| VNODE_WITHID)));
}
}
+__private_extern__ int
+vnode_getalways(vnode_t vp)
+{
+ return(vget_internal(vp, 0, VNODE_ALWAYS));
+}
+
int
vnode_put(vnode_t vp)
{
{
vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
+#if DIAGNOSTIC
+ lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
+#endif
retry:
if (vp->v_iocount < 1)
panic("vnode_put(%p): iocount < 1", vp);
if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
vnode_lock_convert(vp);
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
}
vnode_dropiocount(vp);
vnode_list_add(vp);
errno_t
vnode_resume(vnode_t vp)
{
+ if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
- vnode_lock_spin(vp);
-
- if (vp->v_owner == current_thread()) {
+ vnode_lock_spin(vp);
vp->v_lflag &= ~VL_SUSPENDED;
vp->v_owner = NULL;
vnode_unlock(vp);
- wakeup(&vp->v_iocount);
- } else
- vnode_unlock(vp);
+ wakeup(&vp->v_iocount);
+ }
return(0);
}
#define UNAGE_THRESHHOLD 25
static errno_t
-vnode_getiocount(vnode_t vp, int vid, int vflags)
+vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
{
int nodead = vflags & VNODE_NODEAD;
int nosusp = vflags & VNODE_NOSUSPEND;
+ int always = vflags & VNODE_ALWAYS;
for (;;) {
/*
(vp->v_owner == current_thread())) {
break;
}
+
+ if (always != 0)
+ break;
vnode_lock_convert(vp);
if (vp->v_lflag & VL_TERMINATE) {
#ifdef JOE_DEBUG
record_vp(vp, -1);
#endif
- if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1)) {
- vnode_lock_convert(vp);
+ if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1))
wakeup(&vp->v_iocount);
- }
}
vn_clearunionwait(vp, 1);
- if (vnode_drain(vp)) {
- panic("vnode drain failed");
- vnode_unlock(vp);
- return;
- }
+ vnode_drain(vp);
+
isfifo = (vp->v_type == VFIFO);
if (vp->v_type != VBAD)
* once new_vnode drops the list_lock, it will block trying to take
* the vnode lock until we release it... at that point it will evaluate
* whether the v_vid has changed
+ * also need to make sure that the vnode isn't on a list where "new_vnode"
+ * can find it after the v_id has been bumped until we are completely done
+ * with the vnode (i.e. putting it back on a list has to be the very last
+ * thing we do to this vnode... many of the callers of vnode_reclaim_internal
+ * are holding an io_count on the vnode... they need to drop the io_count
+ * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
+ * they are completely done with the vnode
*/
vnode_list_lock();
+
+ vnode_list_remove_locked(vp);
vp->v_id++;
+
vnode_list_unlock();
if (isfifo) {
vp->v_fifoinfo = NULL;
FREE(fip, M_TEMP);
}
-
vp->v_type = VBAD;
if (vp->v_data)
panic("vnode_reclaim_internal: cleaned vnode isn't");
if (vp->v_numoutput)
- panic("vnode_reclaim_internal: Clean vnode has pending I/O's");
+ panic("vnode_reclaim_internal: clean vnode has pending I/O's");
if (UBCINFOEXISTS(vp))
panic("vnode_reclaim_internal: ubcinfo not cleaned");
if (vp->v_parent)
vp->v_lflag &= ~VL_DRAIN;
vp->v_owner = NULL;
+ KNOTE(&vp->v_knotes, NOTE_REVOKE);
+
+ /* Make sure that when we reuse the vnode, no knotes left over */
+ klist_init(&vp->v_knotes);
+
if (vp->v_lflag & VL_TERMWANT) {
vp->v_lflag &= ~VL_TERMWANT;
wakeup(&vp->v_lflag);
}
- if (!reuse && vp->v_usecount == 0) {
+ if (!reuse) {
/*
* make sure we get on the
- * dead list
+ * dead list if appropriate
*/
- vnode_list_remove(vp);
vnode_list_add(vp);
}
if (!locked)
* vnode_create(int flavor, size_t size, void * param, vnode_t *vp)
*/
int
-vnode_create(int flavor, size_t size, void *data, vnode_t *vpp)
+vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
{
int error;
int insert = 1;
vp->v_op = param->vnfs_vops;
vp->v_type = param->vnfs_vtype;
vp->v_data = param->vnfs_fsnode;
- vp->v_iocount = 1;
if (param->vnfs_markroot)
vp->v_flag |= VROOT;
bzero(fip, sizeof(struct fifoinfo ));
vp->v_fifoinfo = fip;
}
- /* The file systems usually pass the address of the location where
- * where there store the vnode pointer. When we add the vnode in mount
- * point and name cache they are discoverable. So the file system node
- * will have the connection to vnode setup by then
+ /* The file systems must pass the address of the location where
+ * they store the vnode pointer. When we add the vnode into the mount
+ * list and name cache they become discoverable. So the file system node
+ * must have the connection to vnode setup by then
*/
*vpp = vp;
if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL)
vp->v_flag |= VLOCKLOCAL;
if (insert) {
+ if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
+ panic("insmntque: vp on the free list\n");
/*
* enter in mount vnode list
*/
insmntque(vp, param->vnfs_mp);
}
-#ifdef INTERIM_FSNODE_LOCK
- if (param->vnfs_mp->mnt_vtable->vfc_threadsafe == 0) {
+#ifndef __LP64__
+ if ((param->vnfs_mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE) == 0) {
MALLOC_ZONE(vp->v_unsafefs, struct unsafe_fsnode *,
sizeof(struct unsafe_fsnode), M_UNSAFEFS, M_WAITOK);
vp->v_unsafefs->fsnode_count = 0;
vp->v_unsafefs->fsnodeowner = (void *)NULL;
lck_mtx_init(&vp->v_unsafefs->fsnodelock, vnode_lck_grp, vnode_lck_attr);
}
-#endif /* INTERIM_FSNODE_LOCK */
+#endif /* __LP64__ */
}
if (dvp && vnode_ref(dvp) == 0) {
vp->v_parent = dvp;
/*
* enter into name cache
* we've got the info to enter it into the name cache now
+ * cache_enter_create will pick up an extra reference on
+ * the name entered into the string cache
*/
- cache_enter(dvp, vp, cnp);
- }
- vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
+ vp->v_name = cache_enter_create(dvp, vp, cnp);
+ } else
+ vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
+
if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED)
vp->v_flag |= VISUNION;
}
return(0);
}
-void
+int
mount_list_add(mount_t mp)
{
+ int res;
+
mount_list_lock();
- TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- nummounts++;
+ if (system_inshutdown != 0) {
+ res = -1;
+ } else {
+ TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
+ nummounts++;
+ res = 0;
+ }
mount_list_unlock();
+
+ return res;
}
void
mount_list_unlock();
}
-#if CONFIG_VOLFS
mount_t
mount_lookupby_volfsid(int volfs_id, int withref)
{
out:
return(cur_mount);
}
-#endif
-
mount_t
mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
{
struct nameidata nd;
int error;
- u_long ndflags = 0;
+ u_int32_t ndflags = 0;
if (ctx == NULL) { /* XXX technically an error */
ctx = vfs_context_current();
{
struct nameidata nd;
int error;
- u_long ndflags = 0;
+ u_int32_t ndflags = 0;
int lflags = flags;
if (ctx == NULL) { /* XXX technically an error */
vp = *vpp;
#if CONFIG_MACF
if (!(flags & VN_CREATE_NOLABEL)) {
- error = vnode_label(vnode_mount(vp), dvp, vp, cnp,
- VNODE_LABEL_CREATE|VNODE_LABEL_NEEDREF, ctx);
+ error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
if (error)
goto error;
}
return(result);
}
+/*
+ * vauth_node_group
+ *
+ * Description: Ask if a cred is a member of the group owning the vnode object
+ *
+ * Parameters: vap vnode attribute
+ * vap->va_gid group owner of vnode object
+ * cred credential to check
+ * ismember pointer to where to put the answer
+ * idontknow Return this if we can't get an answer
+ *
+ * Returns: 0 Success
+ * idontknow Can't get information
+ * kauth_cred_ismember_gid:? Error from kauth subsystem
+ * kauth_cred_ismember_gid:? Error from kauth subsystem
+ */
static int
-vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember)
+vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
{
int error;
int result;
error = 0;
result = 0;
- /* the caller is expected to have asked the filesystem for a group at some point */
+ /*
+ * The caller is expected to have asked the filesystem for a group
+ * at some point prior to calling this function. The answer may
+ * have been that there is no group ownership supported for the
+ * vnode object, in which case we return
+ */
if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
+ /*
+ * Credentials which are opted into external group membership
+ * resolution which are not known to the external resolver
+ * will result in an ENOENT error. We translate this into
+ * the appropriate 'idontknow' response for our caller.
+ *
+ * XXX We do not make a distinction here between an ENOENT
+ * XXX arising from a response from the external resolver,
+ * XXX and an ENOENT which is internally generated. This is
+ * XXX a deficiency of the published kauth_cred_ismember_gid()
+ * XXX KPI which can not be overcome without new KPI. For
+ * XXX all currently known cases, however, this wil result
+ * XXX in correct behaviour.
+ */
+ if (error == ENOENT)
+ error = idontknow;
}
- /* we could test the group UUID here if we had a policy for it */
+ /*
+ * XXX We could test the group UUID here if we had a policy for it,
+ * XXX but this is problematic from the perspective of synchronizing
+ * XXX group UUID and POSIX GID ownership of a file and keeping the
+ * XXX values coherent over time. The problem is that the local
+ * XXX system will vend transient group UUIDs for unknown POSIX GID
+ * XXX values, and these are not persistent, whereas storage of values
+ * XXX is persistent. One potential solution to this is a local
+ * XXX (persistent) replica of remote directory entries and vended
+ * XXX local ids in a local directory server (think in terms of a
+ * XXX caching DNS server).
+ */
if (!error)
*ismember = result;
return(result);
}
+
+/*
+ * vauth_file_ingroup
+ *
+ * Description: Ask if a user is a member of the group owning the directory
+ *
+ * Parameters: vcp The vnode authorization context that
+ * contains the user and directory info
+ * vcp->flags_valid Valid flags
+ * vcp->flags Flags values
+ * vcp->vap File vnode attributes
+ * vcp->ctx VFS Context (for user)
+ * ismember pointer to where to put the answer
+ * idontknow Return this if we can't get an answer
+ *
+ * Returns: 0 Success
+ * vauth_node_group:? Error from vauth_node_group()
+ *
+ * Implicit returns: *ismember 0 The user is not a group member
+ * 1 The user is a group member
+ */
static int
-vauth_file_ingroup(vauth_ctx vcp, int *ismember)
+vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
{
int error;
+ /* Check for a cached answer first, to avoid the check if possible */
if (vcp->flags_valid & _VAC_IN_GROUP) {
*ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
error = 0;
} else {
- error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember);
+ /* Otherwise, go look for it */
+ error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
if (!error) {
/* cache our result */
return(result);
}
+/*
+ * vauth_dir_ingroup
+ *
+ * Description: Ask if a user is a member of the group owning the directory
+ *
+ * Parameters: vcp The vnode authorization context that
+ * contains the user and directory info
+ * vcp->flags_valid Valid flags
+ * vcp->flags Flags values
+ * vcp->dvap Dir vnode attributes
+ * vcp->ctx VFS Context (for user)
+ * ismember pointer to where to put the answer
+ * idontknow Return this if we can't get an answer
+ *
+ * Returns: 0 Success
+ * vauth_node_group:? Error from vauth_node_group()
+ *
+ * Implicit returns: *ismember 0 The user is not a group member
+ * 1 The user is a group member
+ */
static int
-vauth_dir_ingroup(vauth_ctx vcp, int *ismember)
+vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
{
int error;
+ /* Check for a cached answer first, to avoid the check if possible */
if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
*ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
error = 0;
} else {
- error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember);
+ /* Otherwise, go look for it */
+ error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
if (!error) {
/* cache our result */
}
/* Check group membership (most expensive) */
- ismember = 0;
+ ismember = 0; /* Default to allow, if the target has no group owner */
+
+ /*
+ * In the case we can't get an answer about the user from the call to
+ * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
+ * the side of caution, rather than simply granting access, or we will
+ * fail to correctly implement exclusion groups, so we set the third
+ * parameter on the basis of the state of 'group_ok'.
+ */
if (on_dir) {
- error = vauth_dir_ingroup(vcp, &ismember);
+ error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
} else {
- error = vauth_file_ingroup(vcp, &ismember);
+ error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
}
if (error)
goto out;
* - Neither the node nor the directory are immutable.
* - The user is not the superuser.
*
- * Deletion is not permitted if the directory is sticky and the caller is not owner of the
- * node or directory.
+ * Deletion is not permitted if the directory is sticky and the caller is
+ * not owner of the node or directory.
+ *
+ * If either the node grants DELETE, or the directory grants DELETE_CHILD,
+ * the node may be deleted. If neither denies the permission, and the
+ * caller has Posix write access to the directory, then the node may be
+ * deleted.
*
- * If either the node grants DELETE, or the directory grants DELETE_CHILD, the node may be
- * deleted. If neither denies the permission, and the caller has Posix write access to the
- * directory, then the node may be deleted.
+ * As an optimization, we cache whether or not delete child is permitted
+ * on directories without the sticky bit set.
*/
-static int
-vnode_authorize_delete(vauth_ctx vcp)
+int
+vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child);
+/*static*/ int
+vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
{
struct vnode_attr *vap = vcp->vap;
struct vnode_attr *dvap = vcp->dvap;
/* check the ACL on the directory */
delete_child_denied = 0;
- if (VATTR_IS_NOT(dvap, va_acl, NULL)) {
+ if (!cached_delete_child && VATTR_IS_NOT(dvap, va_acl, NULL)) {
+ errno_t posix_error;
+
eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
eval.ae_acl = &dvap->va_acl->acl_ace[0];
eval.ae_count = dvap->va_acl->acl_entrycount;
eval.ae_options = 0;
if (vauth_dir_owner(vcp))
eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
- if ((error = vauth_dir_ingroup(vcp, &ismember)) != 0)
- return(error);
+ /*
+ * We use ENOENT as a marker to indicate we could not get
+ * information in order to delay evaluation until after we
+ * have the ACL evaluation answer. Previously, we would
+ * always deny the operation at this point.
+ */
+ if ((posix_error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
+ return(posix_error);
if (ismember)
eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
+ /*
+ * If there is no entry, we are going to defer to other
+ * authorization mechanisms.
+ */
error = kauth_acl_evaluate(cred, &eval);
if (error != 0) {
KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
return(error);
}
- if (eval.ae_result == KAUTH_RESULT_DENY)
+ switch(eval.ae_result) {
+ case KAUTH_RESULT_DENY:
delete_child_denied = 1;
- if (eval.ae_result == KAUTH_RESULT_ALLOW) {
+ break;
+ case KAUTH_RESULT_ALLOW:
KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
return(0);
+ case KAUTH_RESULT_DEFER:
+ /*
+ * If we don't have a POSIX answer of "yes", and we
+ * can't get an ACL answer, then we deny it now.
+ */
+ if (posix_error == ENOENT) {
+ delete_child_denied = 1;
+ break;
+ }
+ default:
+ /* Effectively the same as !delete_child_denied */
+ KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
+ break;
}
}
/* check the ACL on the node */
delete_denied = 0;
if (VATTR_IS_NOT(vap, va_acl, NULL)) {
+ errno_t posix_error;
+
eval.ae_requested = KAUTH_VNODE_DELETE;
eval.ae_acl = &vap->va_acl->acl_ace[0];
eval.ae_count = vap->va_acl->acl_entrycount;
eval.ae_options = 0;
if (vauth_file_owner(vcp))
eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
- if ((error = vauth_file_ingroup(vcp, &ismember)) != 0)
- return(error);
+ /*
+ * We use ENOENT as a marker to indicate we could not get
+ * information in order to delay evaluation until after we
+ * have the ACL evaluation answer. Previously, we would
+ * always deny the operation at this point.
+ */
+ if ((posix_error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
+ return(posix_error);
if (ismember)
eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
return(error);
}
- if (eval.ae_result == KAUTH_RESULT_DENY)
+
+ switch(eval.ae_result) {
+ case KAUTH_RESULT_DENY:
delete_denied = 1;
- if (eval.ae_result == KAUTH_RESULT_ALLOW) {
+ break;
+ case KAUTH_RESULT_ALLOW:
KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp->vp);
return(0);
+ case KAUTH_RESULT_DEFER:
+ /*
+ * If we don't have a POSIX answer of "yes", and we
+ * can't get an ACL answer, then we deny it now.
+ */
+ if (posix_error == ENOENT) {
+ delete_denied = 1;
+ }
+ default:
+ /* Effectively the same as !delete_child_denied */
+ KAUTH_DEBUG("%p DEFERRED%s - by file ACL", vcp->vp, delete_denied ? "(DENY)" : "");
+ break;
}
}
/* if denied by ACL on directory or node, return denial */
if (delete_denied || delete_child_denied) {
- KAUTH_DEBUG("%p ALLOWED - denied by ACL", vcp->vp);
+ KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp);
return(EACCES);
}
- /* enforce sticky bit behaviour */
- if ((dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
+ /*
+ * enforce sticky bit behaviour; the cached_delete_child property will
+ * be false and the dvap contents valis for sticky bit directories;
+ * this makes us check the directory each time, but it's unavoidable,
+ * as sticky bit is an exception to caching.
+ */
+ if (!cached_delete_child && (dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
vcp->vp, cred->cr_uid, vap->va_uid, dvap->va_uid);
return(EACCES);
}
/* check the directory */
- if ((error = vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */)) != 0) {
+ if (!cached_delete_child && (error = vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */)) != 0) {
KAUTH_DEBUG("%p ALLOWED - granted by posix permisssions", vcp->vp);
return(error);
}
/* if we have an ACL, evaluate it */
if (VATTR_IS_NOT(vap, va_acl, NULL)) {
+ errno_t posix_error;
+
eval.ae_requested = acl_rights;
eval.ae_acl = &vap->va_acl->acl_ace[0];
eval.ae_count = vap->va_acl->acl_entrycount;
eval.ae_options = 0;
if (vauth_file_owner(vcp))
eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
- if ((error = vauth_file_ingroup(vcp, &ismember)) != 0)
- return(error);
+ /*
+ * We use ENOENT as a marker to indicate we could not get
+ * information in order to delay evaluation until after we
+ * have the ACL evaluation answer. Previously, we would
+ * always deny the operation at this point.
+ */
+ if ((posix_error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
+ return(posix_error);
if (ismember)
eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
return(error);
}
- if (eval.ae_result == KAUTH_RESULT_DENY) {
+ switch(eval.ae_result) {
+ case KAUTH_RESULT_DENY:
KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
- return(EACCES); /* deny, deny, counter-allege */
- }
- if (eval.ae_result == KAUTH_RESULT_ALLOW) {
+ return(EACCES); /* deny, deny, counter-allege */
+ case KAUTH_RESULT_ALLOW:
KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
return(0);
+ case KAUTH_RESULT_DEFER:
+ /*
+ * If we don't have a POSIX answer of "yes", and we
+ * can't get an ACL answer, then we deny it now.
+ */
+ if (posix_error == ENOENT) {
+ KAUTH_DEBUG("%p DENIED(DEFERRED) - by ACL", vcp->vp);
+ return(EACCES); /* deny, deny, counter-allege */
+ }
+ default:
+ /* Effectively the same as !delete_child_denied */
+ KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
+ break;
}
+
*found_deny = eval.ae_found_deny;
/* fall through and evaluate residual rights */
}
}
- /* check for file immutability */
+ /*
+ * check for file immutability. first, check if the requested rights are
+ * allowable for a UF_APPEND file.
+ */
append = 0;
if (vp->v_type == VDIR) {
- if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY)) == rights)
+ if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
append = 1;
} else {
- if ((rights & KAUTH_VNODE_APPEND_DATA) == rights)
+ if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
append = 1;
}
if ((error = vnode_immutable(vap, append, ignore)) != 0) {
vfs_context_t ctx;
vnode_t cvp = NULLVP;
vnode_t vp, dvp;
- int result;
+ int result = KAUTH_RESULT_DENY;
+ int parent_iocount = 0;
+ int parent_action; /* In case we need to use namedstream's data fork for cached rights*/
ctx = (vfs_context_t)arg0;
vp = (vnode_t)arg1;
*/
if (dvp && vp)
goto defer;
- if (dvp)
+ if (dvp) {
cvp = dvp;
- else
- cvp = vp;
+ } else {
+ /*
+ * For named streams on local-authorization volumes, rights are cached on the parent;
+ * authorization is determined by looking at the parent's properties anyway, so storing
+ * on the parent means that we don't recompute for the named stream and that if
+ * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
+ * stream to flush its cache separately. If we miss in the cache, then we authorize
+ * as if there were no cached rights (passing the named stream vnode and desired rights to
+ * vnode_authorize_callback_int()).
+ *
+ * On an opaquely authorized volume, we don't know the relationship between the
+ * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
+ * on such a volume are authorized directly (rather than using the parent) and have their
+ * own caches. When a named stream vnode is created, we mark the parent as having a named
+ * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
+ * find the stream and flush its cache.
+ */
+ if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
+ cvp = vp->v_parent;
+ if ((cvp != NULLVP) && (vnode_getwithref(cvp) == 0)) {
+ parent_iocount = 1;
+ } else {
+ cvp = NULL;
+ goto defer; /* If we can't use the parent, take the slow path */
+ }
+
+ /* Have to translate some actions */
+ parent_action = action;
+ if (parent_action & KAUTH_VNODE_READ_DATA) {
+ parent_action &= ~KAUTH_VNODE_READ_DATA;
+ parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
+ }
+ if (parent_action & KAUTH_VNODE_WRITE_DATA) {
+ parent_action &= ~KAUTH_VNODE_WRITE_DATA;
+ parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
+ }
- if (vnode_cache_is_authorized(cvp, ctx, action) == TRUE)
- return KAUTH_RESULT_ALLOW;
+ } else {
+ cvp = vp;
+ }
+ }
+
+ if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
+ result = KAUTH_RESULT_ALLOW;
+ goto out;
+ }
defer:
result = vnode_authorize_callback_int(cred, idata, action, arg0, arg1, arg2, arg3);
if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP)
vnode_cache_authorized_action(cvp, ctx, action);
+out:
+ if (parent_iocount) {
+ vnode_put(cvp);
+ }
+
return result;
}
int result;
int *errorp;
int noimmutable;
- boolean_t parent_authorized_for_delete = FALSE;
+ boolean_t parent_authorized_for_delete_child = FALSE;
boolean_t found_deny = FALSE;
boolean_t parent_ref= FALSE;
* can skip a whole bunch of work... we will still have to
* authorize that this specific child can be removed
*/
- if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE) == TRUE)
- parent_authorized_for_delete = TRUE;
+ if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE)
+ parent_authorized_for_delete_child = TRUE;
} else {
dvp = NULL;
}
KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
goto out;
}
- if (dvp && parent_authorized_for_delete == FALSE) {
+ if (dvp && parent_authorized_for_delete_child == FALSE) {
VATTR_WANTED(&dva, va_mode);
VATTR_WANTED(&dva, va_uid);
VATTR_WANTED(&dva, va_gid);
* If the vnode is an extended attribute data vnode (eg. a resource fork), *_DATA becomes
* *_EXTATTRIBUTES.
*/
- if (S_ISXATTR(va.va_mode) || vnode_isnamedstream(vp)) {
+ if (vnode_isnamedstream(vp)) {
if (rights & KAUTH_VNODE_READ_DATA) {
rights &= ~KAUTH_VNODE_READ_DATA;
rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
if ((result = vnode_authorize_checkimmutable(vp, &va, rights, noimmutable)) != 0)
goto out;
if ((rights & KAUTH_VNODE_DELETE) &&
- parent_authorized_for_delete == FALSE &&
+ parent_authorized_for_delete_child == FALSE &&
((result = vnode_authorize_checkimmutable(dvp, &dva, KAUTH_VNODE_DELETE_CHILD, 0)) != 0))
goto out;
goto out;
/*
- * If we're not the superuser, authorize based on file properties.
+ * If we're not the superuser, authorize based on file properties;
+ * note that even if parent_authorized_for_delete_child is TRUE, we
+ * need to check on the node itself.
*/
if (!vfs_context_issuser(ctx)) {
/* process delete rights */
if ((rights & KAUTH_VNODE_DELETE) &&
- parent_authorized_for_delete == FALSE &&
- ((result = vnode_authorize_delete(vcp)) != 0))
+ ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0))
goto out;
/* process remaining rights */
vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
}
}
- if ((rights & KAUTH_VNODE_DELETE) && parent_authorized_for_delete == FALSE) {
+ if ((rights & KAUTH_VNODE_DELETE) && parent_authorized_for_delete_child == FALSE) {
/*
- * parent was successfully and newly authorized for deletions
- * add it to the cache
+ * parent was successfully and newly authorized for content deletions
+ * add it to the cache, but only if it doesn't have the sticky
+ * bit set on it. This same check is done earlier guarding
+ * fetching of dva, and if we jumped to out without having done
+ * this, we will have returned already because of a non-zero
+ * 'result' value.
*/
- vnode_cache_authorized_action(dvp, ctx, KAUTH_VNODE_DELETE);
+ if (VATTR_IS_SUPPORTED(&dva, va_mode) &&
+ !(dva.va_mode & (S_ISVTX))) {
+ /* OK to cache delete rights */
+ vnode_cache_authorized_action(dvp, ctx, KAUTH_VNODE_DELETE_CHILD);
+ }
}
if (parent_ref)
vnode_put(vp);
*/
if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
/* if the owner UUID is not actually changing ... */
- if (VATTR_IS_SUPPORTED(&ova, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid))
- goto no_uuuid_change;
+ if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
+ if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid))
+ goto no_uuuid_change;
+
+ /*
+ * If the current owner UUID is a null GUID, check
+ * it against the UUID corresponding to the owner UID.
+ */
+ if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
+ VATTR_IS_SUPPORTED(&ova, va_uid)) {
+ guid_t uid_guid;
+
+ if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
+ kauth_guid_equal(&vap->va_uuuid, &uid_guid))
+ goto no_uuuid_change;
+ }
+ }
/*
* The owner UUID cannot be set by a non-superuser to anything other than
- * their own.
+ * their own or a null GUID (to "unset" the owner UUID).
+ * Note that file systems must be prepared to handle the
+ * null UUID case in a manner appropriate for that file
+ * system.
*/
if (!has_priv_suser) {
if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
/* XXX ENOENT here - no UUID - should perhaps become EPERM */
goto out;
}
- if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
- KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us");
+ if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
+ !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
+ KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
error = EPERM;
goto out;
}
*/
if (VATTR_IS_ACTIVE(vap, va_guuid)) {
/* if the group UUID is not actually changing ... */
- if (VATTR_IS_SUPPORTED(&ova, va_guuid) && kauth_guid_equal(&vap->va_guuid, &ova.va_guuid))
- goto no_guuid_change;
+ if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
+ if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid))
+ goto no_guuid_change;
+
+ /*
+ * If the current group UUID is a null UUID, check
+ * it against the UUID corresponding to the group GID.
+ */
+ if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
+ VATTR_IS_SUPPORTED(&ova, va_gid)) {
+ guid_t gid_guid;
+
+ if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
+ kauth_guid_equal(&vap->va_guuid, &gid_guid))
+ goto no_guuid_change;
+ }
+ }
/*
* The group UUID cannot be set by a non-superuser to anything other than
- * one of which they are a member.
+ * one of which they are a member or a null GUID (to "unset"
+ * the group UUID).
+ * Note that file systems must be prepared to handle the
+ * null UUID case in a manner appropriate for that file
+ * system.
*/
if (!has_priv_suser) {
- if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
+ if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid))
+ ismember = 1;
+ else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
goto out;
}
if (!ismember) {
- KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
+ KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
error = EPERM;
goto out;
}
mount_unlock(mp);
}
+void
+vfs_setunmountpreflight(mount_t mp)
+{
+ mount_lock_spin(mp);
+ mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
+ mount_unlock(mp);
+}
+
void
vn_setunionwait(vnode_t vp)
{
void
vn_checkunionwait(vnode_t vp)
{
- vnode_lock(vp);
+ vnode_lock_spin(vp);
while ((vp->v_flag & VISUNION) == VISUNION)
msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
vnode_unlock(vp);
vn_clearunionwait(vnode_t vp, int locked)
{
if (!locked)
- vnode_lock(vp);
+ vnode_lock_spin(vp);
if((vp->v_flag & VISUNION) == VISUNION) {
vp->v_flag &= ~VISUNION;
wakeup((caddr_t)&vp->v_flag);
cpos += dp->d_reclen;
dp = (struct dirent*)cpos;
}
+
+ /*
+ * workaround for HFS/NFS setting eofflag before end of file
+ */
+ if (vp->v_tag == VT_HFS && nentries > 2)
+ eofflag=0;
+
+ if (vp->v_tag == VT_NFS) {
+ if (eofflag && !full_erase_flag) {
+ full_erase_flag = 1;
+ eofflag = 0;
+ uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
+ }
+ else if (!eofflag && full_erase_flag)
+ full_erase_flag = 0;
+ }
} while (!eofflag);
/*
- * If we've made it here all the files in the dir are AppleDouble
+ * If we've made it here all the files in the dir are ._ files.
* We can delete the files even though the node is suspended
* because we are the owner of the file.
*/
uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
eofflag = 0;
+ full_erase_flag = 0;
do {
siz = UIO_BUFF_SIZE;
!((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
(dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
) {
+
NDINIT(&nd_temp, DELETE, USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name), ctx);
nd_temp.ni_dvp = vp;
error = unlink1(ctx, &nd_temp, 0);
- if(error && error != ENOENT)
+ if (error && error != ENOENT) {
goto outsc;
+ }
}
cpos += dp->d_reclen;
dp = (struct dirent*)cpos;
}
-#ifdef JOE_DEBUG
+void
+lock_vnode_and_post(vnode_t vp, int kevent_num)
+{
+ /* Only take the lock if there's something there! */
+ if (vp->v_knotes.slh_first != NULL) {
+ vnode_lock(vp);
+ KNOTE(&vp->v_knotes, kevent_num);
+ vnode_unlock(vp);
+ }
+}
-record_vp(vnode_t vp, int count) {
+#ifdef JOE_DEBUG
+static void record_vp(vnode_t vp, int count) {
struct uthread *ut;
int i;