X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..6d2010ae8f7a6078e10b361c6962983bab233e0f:/bsd/vfs/vfs_cache.c diff --git a/bsd/vfs/vfs_cache.c b/bsd/vfs/vfs_cache.c index 1c2c9230c..3096d1294 100644 --- a/bsd/vfs/vfs_cache.c +++ b/bsd/vfs/vfs_cache.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -64,6 +64,12 @@ * * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 */ +/* + * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ #include #include #include @@ -74,6 +80,11 @@ #include #include #include +#include + +#if CONFIG_MACF +#include +#endif /* * Name caching works as follows: @@ -102,24 +113,64 @@ u_long nchash; /* size of hash table - 1 */ long numcache; /* number of cache entries allocated */ int desiredNodes; int desiredNegNodes; +int ncs_negtotal; +int nc_disabled = 0; TAILQ_HEAD(, namecache) nchead; /* chain of all name cache entries */ TAILQ_HEAD(, namecache) neghead; /* chain of only negative cache entries */ + + +#if COLLECT_STATS + struct nchstats nchstats; /* cache effectiveness statistics */ +#define NCHSTAT(v) { \ + nchstats.v++; \ +} +#define NAME_CACHE_LOCK() name_cache_lock() +#define NAME_CACHE_UNLOCK() name_cache_unlock() +#define NAME_CACHE_LOCK_SHARED() name_cache_lock() + +#else + +#define NCHSTAT(v) +#define NAME_CACHE_LOCK() name_cache_lock() +#define NAME_CACHE_UNLOCK() name_cache_unlock() +#define NAME_CACHE_LOCK_SHARED() name_cache_lock_shared() + +#endif + + /* vars for name cache list lock */ lck_grp_t * namecache_lck_grp; lck_grp_attr_t * namecache_lck_grp_attr; lck_attr_t * namecache_lck_attr; -lck_mtx_t * namecache_mtx_lock; + +lck_grp_t * strcache_lck_grp; +lck_grp_attr_t * strcache_lck_grp_attr; +lck_attr_t * strcache_lck_attr; + +lck_rw_t * namecache_rw_lock; +lck_rw_t * strtable_rw_lock; + +#define NUM_STRCACHE_LOCKS 1024 + +lck_mtx_t strcache_mtx_locks[NUM_STRCACHE_LOCKS]; + static vnode_t cache_lookup_locked(vnode_t dvp, struct componentname *cnp); -static int remove_name_locked(const char *); -static char *add_name_locked(const char *, size_t, u_int, u_int); -static void init_string_table(void); +static const char *add_name_internal(const char *, uint32_t, u_int, boolean_t, u_int); +static void init_string_table(void) __attribute__((section("__TEXT, initcode"))); static void cache_delete(struct namecache *, int); -static void dump_string_table(void); +static void cache_enter_locked(vnode_t dvp, vnode_t vp, struct componentname *cnp, const char *strname); + +#ifdef DUMP_STRING_TABLE +/* + * Internal dump function used for debugging + */ +void dump_string_table(void); +#endif /* DUMP_STRING_TABLE */ -static void init_crc32(void); +static void init_crc32(void) __attribute__((section("__TEXT, initcode"))); static unsigned int crc32tab[256]; @@ -128,112 +179,320 @@ static unsigned int crc32tab[256]; -// -// This function builds the path to a filename in "buff". The -// length of the buffer *INCLUDING* the trailing zero byte is -// returned in outlen. NOTE: the length includes the trailing -// zero byte and thus the length is one greater than what strlen -// would return. This is important and lots of code elsewhere -// in the kernel assumes this behavior. -// +/* + * This function builds the path to a filename in "buff". The + * length of the buffer *INCLUDING* the trailing zero byte is + * returned in outlen. NOTE: the length includes the trailing + * zero byte and thus the length is one greater than what strlen + * would return. This is important and lots of code elsewhere + * in the kernel assumes this behavior. + * + * This function can call vnop in file system if the parent vnode + * does not exist or when called for hardlinks via volfs path. + * If BUILDPATH_NO_FS_ENTER is set in flags, it only uses values present + * in the name cache and does not enter the file system. + * + * passed in vp must have a valid io_count reference + */ int -build_path(vnode_t first_vp, char *buff, int buflen, int *outlen) +build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx) { - vnode_t vp = first_vp; - char *end, *str; - int len, ret=0, counter=0; + vnode_t vp, tvp; + vnode_t vp_with_iocount; + vnode_t proc_root_dir_vp; + char *end; + const char *str; + int len; + int ret = 0; + int fixhardlink; + + if (first_vp == NULLVP) + return (EINVAL); + + /* + * Grab the process fd so we can evaluate fd_rdir. + */ + if (vfs_context_proc(ctx)->p_fd) + proc_root_dir_vp = vfs_context_proc(ctx)->p_fd->fd_rdir; + else + proc_root_dir_vp = NULL; + + vp_with_iocount = NULLVP; +again: + vp = first_vp; end = &buff[buflen-1]; *end = '\0'; /* - * if this is the root dir of a file system... + * holding the NAME_CACHE_LOCK in shared mode is + * sufficient to stabilize both the vp->v_parent chain + * and the 'vp->v_mount->mnt_vnodecovered' chain + * + * if we need to drop this lock, we must first grab the v_id + * from the vnode we're currently working with... if that + * vnode doesn't already have an io_count reference (the vp + * passed in comes with one), we must grab a reference + * after we drop the NAME_CACHE_LOCK via vnode_getwithvid... + * deadlocks may result if you call vnode_get while holding + * the NAME_CACHE_LOCK... we lazily release the reference + * we pick up the next time we encounter a need to drop + * the NAME_CACHE_LOCK or before we return from this routine */ - if (vp && (vp->v_flag & VROOT) && vp->v_mount) { - /* - * then if it's the root fs, just put in a '/' and get out of here - */ - if (vp->v_mount->mnt_flag & MNT_ROOTFS) { + NAME_CACHE_LOCK_SHARED(); + + /* + * Check if this is the root of a file system. + */ + while (vp && vp->v_flag & VROOT) { + if (vp->v_mount == NULL) { + ret = EINVAL; + goto out_unlock; + } + if ((vp->v_mount->mnt_flag & MNT_ROOTFS) || (vp == proc_root_dir_vp)) { + /* + * It's the root of the root file system, so it's + * just "/". + */ *--end = '/'; - goto out; + + goto out_unlock; } else { - /* - * else just use the covered vnode to get the mount path - */ vp = vp->v_mount->mnt_vnodecovered; } } - name_cache_lock(); - while (vp && vp->v_parent != vp) { - /* - * the maximum depth of a file system hierarchy is MAXPATHLEN/2 - * (with single-char names separated by slashes). we panic if - * we've ever looped more than that. - */ - if (counter++ > MAXPATHLEN/2) { - panic("build_path: vnode parent chain is too long! vp 0x%x\n", vp); - } - str = vp->v_name; - - if (str == NULL) { - if (vp->v_parent != NULL) { - ret = EINVAL; - } - break; - } - len = strlen(str); + while ((vp != NULLVP) && (vp->v_parent != vp)) { + int vid; /* - * check that there's enough space (make sure to include space for the '/') + * For hardlinks the v_name may be stale, so if its OK + * to enter a file system, ask the file system for the + * name and parent (below). */ - if ((end - buff) < (len + 1)) { - ret = ENOSPC; - break; + fixhardlink = (vp->v_flag & VISHARDLINK) && + (vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID) && + !(flags & BUILDPATH_NO_FS_ENTER); + + if (!fixhardlink) { + str = vp->v_name; + + if (str == NULL || *str == '\0') { + if (vp->v_parent != NULL) + ret = EINVAL; + else + ret = ENOENT; + goto out_unlock; + } + len = strlen(str); + /* + * Check that there's enough space (including space for the '/') + */ + if ((end - buff) < (len + 1)) { + ret = ENOSPC; + goto out_unlock; + } + /* + * Copy the name backwards. + */ + str += len; + + for (; len > 0; len--) + *--end = *--str; + /* + * Add a path separator. + */ + *--end = '/'; } - /* - * copy it backwards - */ - str += len; - for (; len > 0; len--) { - *--end = *--str; - } /* - * put in the path separator + * Walk up the parent chain. */ - *--end = '/'; + if (((vp->v_parent != NULLVP) && !fixhardlink) || + (flags & BUILDPATH_NO_FS_ENTER)) { + /* + * In this if () block we are not allowed to enter the filesystem + * to conclusively get the most accurate parent identifier. + * As a result, if 'vp' does not identify '/' and it + * does not have a valid v_parent, then error out + * and disallow further path construction + */ + if ((vp->v_parent == NULLVP) && (rootvnode != vp)) { + /* Only '/' is allowed to have a NULL parent pointer */ + ret = EINVAL; - /* - * walk up the chain (as long as we're not the root) - */ - if (vp == first_vp && (vp->v_flag & VROOT)) { - if (vp->v_mount && vp->v_mount->mnt_vnodecovered) { - vp = vp->v_mount->mnt_vnodecovered->v_parent; - } else { - vp = NULLVP; + /* The code below will exit early if 'tvp = vp' == NULL */ } + + vp = vp->v_parent; + + /* + * if the vnode we have in hand isn't a directory and it + * has a v_parent, then we started with the resource fork + * so skip up to avoid getting a duplicate copy of the + * file name in the path. + */ + if (vp && !vnode_isdir(vp) && vp->v_parent) + vp = vp->v_parent; } else { - vp = vp->v_parent; + /* + * No parent, go get it if supported. + */ + struct vnode_attr va; + vnode_t dvp; + + /* + * Make sure file system supports obtaining a path from id. + */ + if (!(vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID)) { + ret = ENOENT; + goto out_unlock; + } + vid = vp->v_id; + + NAME_CACHE_UNLOCK(); + + if (vp != first_vp && vp != vp_with_iocount) { + if (vp_with_iocount) { + vnode_put(vp_with_iocount); + vp_with_iocount = NULLVP; + } + if (vnode_getwithvid(vp, vid)) + goto again; + vp_with_iocount = vp; + } + VATTR_INIT(&va); + VATTR_WANTED(&va, va_parentid); + + if (fixhardlink) { + VATTR_WANTED(&va, va_name); + MALLOC_ZONE(va.va_name, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); + } else { + va.va_name = NULL; + } + /* + * Ask the file system for its parent id and for its name (optional). + */ + ret = vnode_getattr(vp, &va, ctx); + + if (fixhardlink) { + if ((ret == 0) && (VATTR_IS_SUPPORTED(&va, va_name))) { + str = va.va_name; + vnode_update_identity(vp, NULL, str, strlen(str), 0, VNODE_UPDATE_NAME); + } else if (vp->v_name) { + str = vp->v_name; + ret = 0; + } else { + ret = ENOENT; + goto bad_news; + } + len = strlen(str); + + /* + * Check that there's enough space. + */ + if ((end - buff) < (len + 1)) { + ret = ENOSPC; + } else { + /* Copy the name backwards. */ + str += len; + + for (; len > 0; len--) { + *--end = *--str; + } + /* + * Add a path separator. + */ + *--end = '/'; + } +bad_news: + FREE_ZONE(va.va_name, MAXPATHLEN, M_NAMEI); + } + if (ret || !VATTR_IS_SUPPORTED(&va, va_parentid)) { + ret = ENOENT; + goto out; + } + /* + * Ask the file system for the parent vnode. + */ + if ((ret = VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx))) + goto out; + + if (!fixhardlink && (vp->v_parent != dvp)) + vnode_update_identity(vp, dvp, NULL, 0, 0, VNODE_UPDATE_PARENT); + + if (vp_with_iocount) + vnode_put(vp_with_iocount); + vp = dvp; + vp_with_iocount = vp; + + NAME_CACHE_LOCK_SHARED(); + + /* + * if the vnode we have in hand isn't a directory and it + * has a v_parent, then we started with the resource fork + * so skip up to avoid getting a duplicate copy of the + * file name in the path. + */ + if (vp && !vnode_isdir(vp) && vp->v_parent) + vp = vp->v_parent; } /* - * check if we're crossing a mount point and - * switch the vp if we are. + * When a mount point is crossed switch the vp. + * Continue until we find the root or we find + * a vnode that's not the root of a mounted + * file system. */ - if (vp && (vp->v_flag & VROOT) && vp->v_mount) { - vp = vp->v_mount->mnt_vnodecovered; + tvp = vp; + + while (tvp) { + if (tvp == proc_root_dir_vp) + goto out_unlock; /* encountered the root */ + + if (!(tvp->v_flag & VROOT) || !tvp->v_mount) + break; /* not the root of a mounted FS */ + tvp = tvp->v_mount->mnt_vnodecovered; + } + if (tvp == NULLVP) + goto out_unlock; + vp = tvp; + + if (vp && (flags & BUILDPATH_CHECKACCESS)) { + vid = vp->v_id; + + NAME_CACHE_UNLOCK(); + + if (vp != first_vp && vp != vp_with_iocount) { + if (vp_with_iocount) { + vnode_put(vp_with_iocount); + vp_with_iocount = NULLVP; + } + if (vnode_getwithvid(vp, vid)) + goto again; + vp_with_iocount = vp; + } + if ((ret = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx))) + goto out; /* no peeking */ + + NAME_CACHE_LOCK_SHARED(); } } - name_cache_unlock(); +out_unlock: + NAME_CACHE_UNLOCK(); out: + if (vp_with_iocount) + vnode_put(vp_with_iocount); /* - * slide it down to the beginning of the buffer + * Slide the name down to the beginning of the buffer. */ memmove(buff, end, &buff[buflen] - end); - - *outlen = &buff[buflen] - end; // length includes the trailing zero byte + + /* + * length includes the trailing zero byte + */ + *outlen = &buff[buflen] - end; - return ret; + return (ret); } @@ -248,7 +507,7 @@ vnode_getparent(vnode_t vp) vnode_t pvp = NULLVP; int pvid; - name_cache_lock(); + NAME_CACHE_LOCK_SHARED(); /* * v_parent is stable behind the name_cache lock * however, the only thing we can really guarantee @@ -259,38 +518,33 @@ vnode_getparent(vnode_t vp) if ( (pvp = vp->v_parent) != NULLVP ) { pvid = pvp->v_id; - name_cache_unlock(); + NAME_CACHE_UNLOCK(); if (vnode_getwithvid(pvp, pvid) != 0) pvp = NULL; } else - name_cache_unlock(); - + NAME_CACHE_UNLOCK(); return (pvp); } -char * +const char * vnode_getname(vnode_t vp) { - char *name = NULL; - - name_cache_lock(); + const char *name = NULL; + + NAME_CACHE_LOCK_SHARED(); if (vp->v_name) - name = add_name_locked(vp->v_name, strlen(vp->v_name), 0, 0); - name_cache_unlock(); + name = vfs_addname(vp->v_name, strlen(vp->v_name), 0, 0); + NAME_CACHE_UNLOCK(); return (name); } void -vnode_putname(char *name) +vnode_putname(const char *name) { - name_cache_lock(); - - remove_name_locked(name); - - name_cache_unlock(); + vfs_removename(name); } @@ -308,52 +562,115 @@ vnode_putname(char *name) * if VNODE_UPDATE_CACHE, flush the name cache entries associated with vp */ void -vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int name_hashval, int flags) +vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags) { struct namecache *ncp; vnode_t old_parentvp = NULLVP; - +#if NAMEDSTREAMS + int isstream = (vp->v_flag & VISNAMEDSTREAM); + int kusecountbumped = 0; +#endif + kauth_cred_t tcred = NULL; + const char *vname = NULL; + const char *tname = NULL; if (flags & VNODE_UPDATE_PARENT) { - if (dvp && vnode_ref(dvp) != 0) - dvp = NULLVP; - } else + if (dvp && vnode_ref(dvp) != 0) { + dvp = NULLVP; + } +#if NAMEDSTREAMS + /* Don't count a stream's parent ref during unmounts */ + if (isstream && dvp && (dvp != vp) && (dvp != vp->v_parent) && (dvp->v_type == VREG)) { + vnode_lock_spin(dvp); + ++dvp->v_kusecount; + kusecountbumped = 1; + vnode_unlock(dvp); + } +#endif + } else { dvp = NULLVP; - name_cache_lock(); + } + if ( (flags & VNODE_UPDATE_NAME) ) { + if (name != vp->v_name) { + if (name && *name) { + if (name_len == 0) + name_len = strlen(name); + tname = vfs_addname(name, name_len, name_hashval, 0); + } + } else + flags &= ~VNODE_UPDATE_NAME; + } + if ( (flags & (VNODE_UPDATE_PURGE | VNODE_UPDATE_PARENT | VNODE_UPDATE_CACHE | VNODE_UPDATE_NAME)) ) { + + NAME_CACHE_LOCK(); + + if ( (flags & VNODE_UPDATE_PURGE) ) { - if ( (flags & VNODE_UPDATE_NAME) && (name != vp->v_name) ) { - if (vp->v_name != NULL) { - remove_name_locked(vp->v_name); - vp->v_name = NULL; + if (vp->v_parent) + vp->v_parent->v_nc_generation++; + + while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) + cache_delete(ncp, 1); + + while ( (ncp = LIST_FIRST(&vp->v_ncchildren)) ) + cache_delete(ncp, 1); + + /* + * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held + */ + tcred = vp->v_cred; + vp->v_cred = NOCRED; + vp->v_authorized_actions = 0; } - if (name && *name) { - if (name_len == 0) - name_len = strlen(name); - vp->v_name = add_name_locked(name, name_len, name_hashval, 0); + if ( (flags & VNODE_UPDATE_NAME) ) { + vname = vp->v_name; + vp->v_name = tname; } - } - if (flags & VNODE_UPDATE_PARENT) { - if (dvp != vp && dvp != vp->v_parent) { - old_parentvp = vp->v_parent; - vp->v_parent = dvp; - dvp = NULLVP; + if (flags & VNODE_UPDATE_PARENT) { + if (dvp != vp && dvp != vp->v_parent) { + old_parentvp = vp->v_parent; + vp->v_parent = dvp; + dvp = NULLVP; - if (old_parentvp) - flags |= VNODE_UPDATE_CACHE; + if (old_parentvp) + flags |= VNODE_UPDATE_CACHE; + } } - } - if (flags & VNODE_UPDATE_CACHE) { - while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) - cache_delete(ncp, 1); - } - name_cache_unlock(); + if (flags & VNODE_UPDATE_CACHE) { + while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) + cache_delete(ncp, 1); + } + NAME_CACHE_UNLOCK(); - if (dvp != NULLVP) + if (vname != NULL) + vfs_removename(vname); + + if (IS_VALID_CRED(tcred)) + kauth_cred_unref(&tcred); + } + if (dvp != NULLVP) { +#if NAMEDSTREAMS + /* Back-out the ref we took if we lost a race for vp->v_parent. */ + if (kusecountbumped) { + vnode_lock_spin(dvp); + if (dvp->v_kusecount > 0) + --dvp->v_kusecount; + vnode_unlock(dvp); + } +#endif vnode_rele(dvp); - + } if (old_parentvp) { struct uthread *ut; +#if NAMEDSTREAMS + if (isstream) { + vnode_lock_spin(old_parentvp); + if ((old_parentvp->v_type != VDIR) && (old_parentvp->v_kusecount > 0)) + --old_parentvp->v_kusecount; + vnode_unlock(old_parentvp); + } +#endif ut = get_bsdthread_info(current_thread()); /* @@ -368,8 +685,7 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int nam while ( (vp = old_parentvp) != NULLVP ) { - vnode_lock(vp); - + vnode_lock_spin(vp); vnode_rele_internal(vp, 0, 0, 1); /* @@ -383,7 +699,7 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int nam * we'll sit in this loop until we run into * a parent in this chain that is not in this state * - * make our check and the node_rele atomic + * make our check and the vnode_rele atomic * with respect to the current vnode we're working on * by holding the vnode lock * if vnode_rele deferred the vnode_reclaim and has put @@ -398,11 +714,17 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int nam * pull the parent pointer now so that when we do the * vnode_reclaim for each of the vnodes in the uu_vreclaims * list, we won't recurse back through here + * + * need to do a convert here in case vnode_rele_internal + * returns with the lock held in the spin mode... it + * can drop and retake the lock under certain circumstances */ - name_cache_lock(); + vnode_lock_convert(vp); + + NAME_CACHE_LOCK(); old_parentvp = vp->v_parent; vp->v_parent = NULLVP; - name_cache_unlock(); + NAME_CACHE_UNLOCK(); } else { /* * we're done... we ran into a vnode that isn't @@ -436,9 +758,9 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int nam * so that HFS can post-process the lookup. Also, volfs will call * VNOP_GETATTR2 to determine the parent, instead of using v_parent. */ -void vnode_set_hard_link(vnode_t vp) +void vnode_setmultipath(vnode_t vp) { - vnode_lock(vp); + vnode_lock_spin(vp); /* * In theory, we're changing the vnode's identity as far as the @@ -457,137 +779,211 @@ void vnode_set_hard_link(vnode_t vp) } + +/* + * backwards compatibility + */ void vnode_uncache_credentials(vnode_t vp) { - kauth_cred_t ucred = NULL; + vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); +} - if (vp->v_cred) { - vnode_lock(vp); - ucred = vp->v_cred; - vp->v_cred = NULL; +/* + * use the exclusive form of NAME_CACHE_LOCK to protect the update of the + * following fields in the vnode: v_cred_timestamp, v_cred, v_authorized_actions + * we use this lock so that we can look at the v_cred and v_authorized_actions + * atomically while behind the NAME_CACHE_LOCK in shared mode in 'cache_lookup_path', + * which is the super-hot path... if we are updating the authorized actions for this + * vnode, we are already in the super-slow and far less frequented path so its not + * that bad that we take the lock exclusive for this case... of course we strive + * to hold it for the minimum amount of time possible + */ + +void vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action) +{ + kauth_cred_t tcred = NOCRED; - vnode_unlock(vp); + NAME_CACHE_LOCK(); - if (ucred) - kauth_cred_rele(ucred); + vp->v_authorized_actions &= ~action; + + if (action == KAUTH_INVALIDATE_CACHED_RIGHTS && + IS_VALID_CRED(vp->v_cred)) { + /* + * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held + */ + tcred = vp->v_cred; + vp->v_cred = NOCRED; } + NAME_CACHE_UNLOCK(); + + if (tcred != NOCRED) + kauth_cred_unref(&tcred); } -void vnode_cache_credentials(vnode_t vp, vfs_context_t context) +extern int bootarg_vnode_cache_defeat; /* default = 0, from bsd_init.c */ + +boolean_t +vnode_cache_is_authorized(vnode_t vp, vfs_context_t ctx, kauth_action_t action) { - kauth_cred_t ucred; - kauth_cred_t tcred = NOCRED; - struct timeval tv; + kauth_cred_t ucred; + boolean_t retval = FALSE; - ucred = vfs_context_ucred(context); + /* Boot argument to defeat rights caching */ + if (bootarg_vnode_cache_defeat) + return FALSE; - if (vp->v_cred != ucred || (vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE)) { - vnode_lock(vp); + if ( (vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) { + /* + * a TTL is enabled on the rights cache... handle it here + * a TTL of 0 indicates that no rights should be cached + */ + if (vp->v_mount->mnt_authcache_ttl) { + if ( !(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL) ) { + /* + * For filesystems marked only MNTK_AUTH_OPAQUE (generally network ones), + * we will only allow a SEARCH right on a directory to be cached... + * that cached right always has a default TTL associated with it + */ + if (action != KAUTH_VNODE_SEARCH || vp->v_type != VDIR) + vp = NULLVP; + } + if (vp != NULLVP && vnode_cache_is_stale(vp) == TRUE) { + vnode_uncache_authorized_action(vp, vp->v_authorized_actions); + vp = NULLVP; + } + } else + vp = NULLVP; + } + if (vp != NULLVP) { + ucred = vfs_context_ucred(ctx); - microuptime(&tv); - vp->v_cred_timestamp = tv.tv_sec; + NAME_CACHE_LOCK_SHARED(); - if (vp->v_cred != ucred) { - kauth_cred_ref(ucred); - - tcred = vp->v_cred; - vp->v_cred = ucred; - } - vnode_unlock(vp); - - if (tcred) - kauth_cred_rele(tcred); + if (vp->v_cred == ucred && (vp->v_authorized_actions & action) == action) + retval = TRUE; + + NAME_CACHE_UNLOCK(); } + return retval; } -/* reverse_lookup - lookup by walking back up the parent chain while leveraging - * use of the name cache lock in order to protect our starting vnode. - * NOTE - assumes you already have search access to starting point. - * returns 0 when we have reached the root, current working dir, or chroot root - * - */ -int -reverse_lookup(vnode_t start_vp, vnode_t *lookup_vpp, struct filedesc *fdp, vfs_context_t context, int *dp_authorized) + +void vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t action) { - int vid, done = 0; - int auth_opaque = 0; - vnode_t dp = start_vp; - vnode_t vp = NULLVP; - kauth_cred_t ucred; - struct timeval tv; + kauth_cred_t tcred = NOCRED; + kauth_cred_t ucred; + struct timeval tv; + boolean_t ttl_active = FALSE; - ucred = vfs_context_ucred(context); - *lookup_vpp = start_vp; + ucred = vfs_context_ucred(ctx); - name_cache_lock(); + if (!IS_VALID_CRED(ucred) || action == 0) + return; + + if ( (vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) { + /* + * a TTL is enabled on the rights cache... handle it here + * a TTL of 0 indicates that no rights should be cached + */ + if (vp->v_mount->mnt_authcache_ttl == 0) + return; + + if ( !(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL) ) { + /* + * only cache SEARCH action for filesystems marked + * MNTK_AUTH_OPAQUE on VDIRs... + * the lookup_path code will time these out + */ + if ( (action & ~KAUTH_VNODE_SEARCH) || vp->v_type != VDIR ) + return; + } + ttl_active = TRUE; - if ( dp->v_mount && (dp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) ) { - auth_opaque = 1; microuptime(&tv); } - for (;;) { - *dp_authorized = 0; + NAME_CACHE_LOCK(); - if (auth_opaque && ((tv.tv_sec - dp->v_cred_timestamp) > VCRED_EXPIRED)) - break; - if (dp->v_cred != ucred) - break; - /* - * indicate that we're allowed to traverse this directory... - * even if we bail for some reason, this information is valid and is used - * to avoid doing a vnode_authorize + if (vp->v_cred != ucred) { + kauth_cred_ref(ucred); + /* + * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held */ - *dp_authorized = 1; + tcred = vp->v_cred; + vp->v_cred = ucred; + vp->v_authorized_actions = 0; + } + if (ttl_active == TRUE && vp->v_authorized_actions == 0) { + /* + * only reset the timestamnp on the + * first authorization cached after the previous + * timer has expired or we're switching creds... + * 'vnode_cache_is_authorized' will clear the + * authorized actions if the TTL is active and + * it has expired + */ + vp->v_cred_timestamp = tv.tv_sec; + } + vp->v_authorized_actions |= action; - if ((dp->v_flag & VROOT) != 0 || /* Hit "/" */ - (dp == fdp->fd_cdir) || /* Hit process's working directory */ - (dp == fdp->fd_rdir)) { /* Hit process chroot()-ed root */ - done = 1; - break; - } + NAME_CACHE_UNLOCK(); - if ( (vp = dp->v_parent) == NULLVP) - break; + if (IS_VALID_CRED(tcred)) + kauth_cred_unref(&tcred); +} - dp = vp; - *lookup_vpp = dp; - } /* for (;;) */ - vid = dp->v_id; - - name_cache_unlock(); - - if (done == 0 && dp != start_vp) { - if (vnode_getwithvid(dp, vid) != 0) { - *lookup_vpp = start_vp; - } - } +boolean_t vnode_cache_is_stale(vnode_t vp) +{ + struct timeval tv; + boolean_t retval; + + microuptime(&tv); - return((done == 1) ? 0 : -1); + if ((tv.tv_sec - vp->v_cred_timestamp) > vp->v_mount->mnt_authcache_ttl) + retval = TRUE; + else + retval = FALSE; + + return retval; } + + +/* + * Returns: 0 Success + * ERECYCLE vnode was recycled from underneath us. Force lookup to be re-driven from namei. + * This errno value should not be seen by anyone outside of the kernel. + */ int -cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, vfs_context_t context, int *trailing_slash, int *dp_authorized) +cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, + vfs_context_t ctx, int *dp_authorized, vnode_t last_dp) { char *cp; /* pointer into pathname argument */ - int vid, vvid; - int auth_opaque = 0; + int vid; + int vvid = 0; /* protected by vp != NULLVP */ vnode_t vp = NULLVP; vnode_t tdp = NULLVP; kauth_cred_t ucred; - struct timeval tv; + boolean_t ttl_enabled = FALSE; + struct timeval tv; + mount_t mp; unsigned int hash; + int error = 0; - ucred = vfs_context_ucred(context); - *trailing_slash = 0; +#if CONFIG_TRIGGERS + vnode_t trigger_vp; +#endif /* CONFIG_TRIGGERS */ - name_cache_lock(); + ucred = vfs_context_ucred(ctx); + ndp->ni_flag &= ~(NAMEI_TRAILINGSLASH); + NAME_CACHE_LOCK_SHARED(); - if ( dp->v_mount && (dp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) ) { - auth_opaque = 1; + if ( dp->v_mount && (dp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) { + ttl_enabled = TRUE; microuptime(&tv); } for (;;) { @@ -602,7 +998,7 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, cp = cnp->cn_nameptr; while (*cp && (*cp != '/')) { - hash ^= crc32tab[((hash >> 24) ^ (unsigned char)*cp++)]; + hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; } /* * the crc generator can legitimately generate @@ -629,7 +1025,7 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, ndp->ni_pathlen--; if (*cp == '\0') { - *trailing_slash = 1; + ndp->ni_flag |= NAMEI_TRAILINGSLASH; *ndp->ni_next = '\0'; } } @@ -644,12 +1040,56 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, cnp->cn_flags |= ISDOTDOT; *dp_authorized = 0; +#if NAMEDRSRCFORK + /* + * Process a request for a file's resource fork. + * + * Consume the _PATH_RSRCFORKSPEC suffix and tag the path. + */ + if ((ndp->ni_pathlen == sizeof(_PATH_RSRCFORKSPEC)) && + (cp[1] == '.' && cp[2] == '.') && + bcmp(cp, _PATH_RSRCFORKSPEC, sizeof(_PATH_RSRCFORKSPEC)) == 0) { + /* Skip volfs file systems that don't support native streams. */ + if ((dp->v_mount != NULL) && + (dp->v_mount->mnt_flag & MNT_DOVOLFS) && + (dp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) { + goto skiprsrcfork; + } + cnp->cn_flags |= CN_WANTSRSRCFORK; + cnp->cn_flags |= ISLASTCN; + ndp->ni_next[0] = '\0'; + ndp->ni_pathlen = 1; + } +skiprsrcfork: +#endif - if (auth_opaque && ((tv.tv_sec - dp->v_cred_timestamp) > VCRED_EXPIRED)) +#if CONFIG_MACF + + /* + * Name cache provides authorization caching (see below) + * that will short circuit MAC checks in lookup(). + * We must perform MAC check here. On denial + * dp_authorized will remain 0 and second check will + * be perfomed in lookup(). + */ + if (!(cnp->cn_flags & DONOTAUTH)) { + error = mac_vnode_check_lookup(ctx, dp, cnp); + if (error) { + NAME_CACHE_UNLOCK(); + goto errorout; + } + } +#endif /* MAC */ + if (ttl_enabled && ((tv.tv_sec - dp->v_cred_timestamp) > dp->v_mount->mnt_authcache_ttl)) break; - if (dp->v_cred != ucred) + /* + * NAME_CACHE_LOCK holds these fields stable + */ + if ((dp->v_cred != ucred || !(dp->v_authorized_actions & KAUTH_VNODE_SEARCH)) && + !(dp->v_authorized_actions & KAUTH_VNODE_SEARCHBYANYONE)) break; + /* * indicate that we're allowed to traverse this directory... * even if we fail the cache lookup or decide to bail for @@ -659,18 +1099,28 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, *dp_authorized = 1; if ( (cnp->cn_flags & (ISLASTCN | ISDOTDOT)) ) { - if (cnp->cn_nameiop != LOOKUP) - break; - if (cnp->cn_flags & (LOCKPARENT | NOCACHE)) - break; + if (cnp->cn_nameiop != LOOKUP) + break; + if (cnp->cn_flags & LOCKPARENT) + break; + if (cnp->cn_flags & NOCACHE) + break; if (cnp->cn_flags & ISDOTDOT) { + /* + * Force directory hardlinks to go to + * file system for ".." requests. + */ + if (dp && (dp->v_flag & VISHARDLINK)) { + break; + } /* * Quit here only if we can't use * the parent directory pointer or * don't have one. Otherwise, we'll * use it below. */ - if ((dp->v_flag & VROOT) || + if ((dp->v_flag & VROOT) || + dp == ndp->ni_rootdir || dp->v_parent == NULLVP) break; } @@ -682,13 +1132,20 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, */ if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') vp = dp; - else if (cnp->cn_flags & ISDOTDOT) + else if ( (cnp->cn_flags & ISDOTDOT) ) vp = dp->v_parent; else { if ( (vp = cache_lookup_locked(dp, cnp)) == NULLVP) break; - } + if ( (vp->v_flag & VISHARDLINK) ) { + /* + * The file system wants a VNOP_LOOKUP on this vnode + */ + vp = NULL; + break; + } + } if ( (cnp->cn_flags & ISLASTCN) ) break; @@ -697,8 +1154,27 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, vp = NULL; break; } - if (vp->v_mountedhere && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) - break; + + if ( (mp = vp->v_mountedhere) && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) { + + if (mp->mnt_realrootvp == NULLVP || mp->mnt_generation != mount_generation || + mp->mnt_realrootvp_vid != mp->mnt_realrootvp->v_id) + break; + vp = mp->mnt_realrootvp; + } + +#if CONFIG_TRIGGERS + /* + * After traversing all mountpoints stacked here, if we have a + * trigger in hand, resolve it. Note that we don't need to + * leave the fast path if the mount has already happened. + */ + if ((vp->v_resolve != NULL) && + (vp->v_resolve->vr_resolve_func != NULL)) { + break; + } +#endif /* CONFIG_TRIGGERS */ + dp = vp; vp = NULLVP; @@ -714,8 +1190,7 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, vvid = vp->v_id; vid = dp->v_id; - name_cache_unlock(); - + NAME_CACHE_UNLOCK(); if ((vp != NULLVP) && (vp->v_type != VLNK) && ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) { @@ -732,34 +1207,45 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, dp = NULLVP; } else { need_dp: - /* + /* * return the last directory we looked at - * with an io reference held + * with an io reference held. If it was the one passed + * in as a result of the last iteration of VNOP_LOOKUP, + * it should already hold an io ref. No need to increase ref. */ - if (dp == ndp->ni_usedvp) { - /* - * if this vnode matches the one passed in via USEDVP - * than this context already holds an io_count... just - * use vnode_get to get an extra ref for lookup to play - * with... can't use the getwithvid variant here because - * it will block behind a vnode_drain which would result - * in a deadlock (since we already own an io_count that the - * vnode_drain is waiting on)... vnode_get grabs the io_count - * immediately w/o waiting... it always succeeds - */ - vnode_get(dp); - } else if ( (vnode_getwithvid(dp, vid)) ) { - /* - * failure indicates the vnode - * changed identity or is being - * TERMINATED... in either case - * punt this lookup - */ - return (ENOENT); + if (last_dp != dp){ + + if (dp == ndp->ni_usedvp) { + /* + * if this vnode matches the one passed in via USEDVP + * than this context already holds an io_count... just + * use vnode_get to get an extra ref for lookup to play + * with... can't use the getwithvid variant here because + * it will block behind a vnode_drain which would result + * in a deadlock (since we already own an io_count that the + * vnode_drain is waiting on)... vnode_get grabs the io_count + * immediately w/o waiting... it always succeeds + */ + vnode_get(dp); + } else if ( (vnode_getwithvid_drainok(dp, vid)) ) { + /* + * failure indicates the vnode + * changed identity or is being + * TERMINATED... in either case + * punt this lookup. + * + * don't necessarily return ENOENT, though, because + * we really want to go back to disk and make sure it's + * there or not if someone else is changing this + * vnode. + */ + error = ERECYCLE; + goto errorout; + } } } if (vp != NULLVP) { - if ( (vnode_getwithvid(vp, vvid)) ) { + if ( (vnode_getwithvid_drainok(vp, vvid)) ) { vp = NULLVP; /* @@ -776,45 +1262,72 @@ need_dp: } } } + ndp->ni_dvp = dp; ndp->ni_vp = vp; - return (0); +#if CONFIG_TRIGGERS + trigger_vp = vp ? vp : dp; + if ((error == 0) && (trigger_vp != NULLVP) && vnode_isdir(trigger_vp)) { + error = vnode_trigger_resolve(trigger_vp, ndp, ctx); + if (error) { + if (vp) + vnode_put(vp); + if (dp) + vnode_put(dp); + goto errorout; + } + } +#endif /* CONFIG_TRIGGERS */ + +errorout: + /* + * If we came into cache_lookup_path after an iteration of the lookup loop that + * resulted in a call to VNOP_LOOKUP, then VNOP_LOOKUP returned a vnode with a io ref + * on it. It is now the job of cache_lookup_path to drop the ref on this vnode + * when it is no longer needed. If we get to this point, and last_dp is not NULL + * and it is ALSO not the dvp we want to return to caller of this function, it MUST be + * the case that we got to a subsequent path component and this previous vnode is + * no longer needed. We can then drop the io ref on it. + */ + if ((last_dp != NULLVP) && (last_dp != ndp->ni_dvp)){ + vnode_put(last_dp); + } + + //initialized to 0, should be the same if no error cases occurred. + return error; } static vnode_t cache_lookup_locked(vnode_t dvp, struct componentname *cnp) { - register struct namecache *ncp; - register struct nchashhead *ncpp; - register long namelen = cnp->cn_namelen; - char *nameptr = cnp->cn_nameptr; + struct namecache *ncp; + struct nchashhead *ncpp; + long namelen = cnp->cn_namelen; unsigned int hashval = (cnp->cn_hash & NCHASHMASK); - vnode_t vp; + if (nc_disabled) { + return NULL; + } + ncpp = NCHHASH(dvp, cnp->cn_hash); LIST_FOREACH(ncp, ncpp, nc_hash) { if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { - if (memcmp(ncp->nc_name, nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) + if (memcmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) break; } } - if (ncp == 0) + if (ncp == 0) { /* * We failed to find an entry */ + NCHSTAT(ncs_miss); return (NULL); - - vp = ncp->nc_vp; - if (vp && (vp->v_flag & VISHARDLINK)) { - /* - * The file system wants a VNOP_LOOKUP on this vnode - */ - vp = NULL; } - - return (vp); + NCHSTAT(ncs_goodhits); + + return (ncp->nc_vp); } @@ -829,11 +1342,11 @@ hash_string(const char *cp, int len) if (len) { while (len--) { - hash ^= crc32tab[((hash >> 24) ^ (unsigned char)*cp++)]; + hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; } } else { while (*cp != '\0') { - hash ^= crc32tab[((hash >> 24) ^ (unsigned char)*cp++)]; + hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; } } /* @@ -863,55 +1376,69 @@ hash_string(const char *cp, int len) */ int -cache_lookup(dvp, vpp, cnp) - struct vnode *dvp; - struct vnode **vpp; - struct componentname *cnp; +cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) { - register struct namecache *ncp; - register struct nchashhead *ncpp; - register long namelen = cnp->cn_namelen; - char *nameptr = cnp->cn_nameptr; - unsigned int hashval = (cnp->cn_hash & NCHASHMASK); + struct namecache *ncp; + struct nchashhead *ncpp; + long namelen = cnp->cn_namelen; + unsigned int hashval; + boolean_t have_exclusive = FALSE; uint32_t vid; vnode_t vp; - name_cache_lock(); + if (cnp->cn_hash == 0) + cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + hashval = (cnp->cn_hash & NCHASHMASK); + if (nc_disabled) { + return 0; + } + + NAME_CACHE_LOCK_SHARED(); + +relook: ncpp = NCHHASH(dvp, cnp->cn_hash); LIST_FOREACH(ncp, ncpp, nc_hash) { if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { - if (memcmp(ncp->nc_name, nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) + if (memcmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) break; } } /* We failed to find an entry */ if (ncp == 0) { - nchstats.ncs_miss++; - name_cache_unlock(); + NCHSTAT(ncs_miss); + NAME_CACHE_UNLOCK(); return (0); } /* We don't want to have an entry, so dump it */ if ((cnp->cn_flags & MAKEENTRY) == 0) { - nchstats.ncs_badhits++; - cache_delete(ncp, 1); - name_cache_unlock(); - return (0); + if (have_exclusive == TRUE) { + NCHSTAT(ncs_badhits); + cache_delete(ncp, 1); + NAME_CACHE_UNLOCK(); + return (0); + } + NAME_CACHE_UNLOCK(); + NAME_CACHE_LOCK(); + have_exclusive = TRUE; + goto relook; } vp = ncp->nc_vp; /* We found a "positive" match, return the vnode */ if (vp) { - nchstats.ncs_goodhits++; + NCHSTAT(ncs_goodhits); vid = vp->v_id; - name_cache_unlock(); + NAME_CACHE_UNLOCK(); if (vnode_getwithvid(vp, vid)) { - name_cache_lock(); - nchstats.ncs_badvid++; - name_cache_unlock(); +#if COLLECT_STATS + NAME_CACHE_LOCK(); + NCHSTAT(ncs_badvid); + NAME_CACHE_UNLOCK(); +#endif return (0); } *vpp = vp; @@ -920,49 +1447,124 @@ cache_lookup(dvp, vpp, cnp) /* We found a negative match, and want to create it, so purge */ if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) { - nchstats.ncs_badhits++; - cache_delete(ncp, 1); - name_cache_unlock(); - return (0); + if (have_exclusive == TRUE) { + NCHSTAT(ncs_badhits); + cache_delete(ncp, 1); + NAME_CACHE_UNLOCK(); + return (0); + } + NAME_CACHE_UNLOCK(); + NAME_CACHE_LOCK(); + have_exclusive = TRUE; + goto relook; } /* * We found a "negative" match, ENOENT notifies client of this match. * The nc_whiteout field records whether this is a whiteout. */ - nchstats.ncs_neghits++; + NCHSTAT(ncs_neghits); if (ncp->nc_whiteout) cnp->cn_flags |= ISWHITEOUT; - name_cache_unlock(); + NAME_CACHE_UNLOCK(); return (ENOENT); } +const char * +cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp) +{ + const char *strname; + + if (cnp->cn_hash == 0) + cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + + /* + * grab 2 references on the string entered + * one for the cache_enter_locked to consume + * and the second to be consumed by v_name (vnode_create call point) + */ + strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, TRUE, 0); + + NAME_CACHE_LOCK(); + + cache_enter_locked(dvp, vp, cnp, strname); + + NAME_CACHE_UNLOCK(); + + return (strname); +} + + +/* + * Add an entry to the cache... + * but first check to see if the directory + * that this entry is to be associated with has + * had any cache_purges applied since we took + * our identity snapshot... this check needs to + * be done behind the name cache lock + */ +void +cache_enter_with_gen(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int gen) +{ + + if (cnp->cn_hash == 0) + cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + + NAME_CACHE_LOCK(); + + if (dvp->v_nc_generation == gen) + (void)cache_enter_locked(dvp, vp, cnp, NULL); + + NAME_CACHE_UNLOCK(); +} + + /* * Add an entry to the cache. */ void -cache_enter(dvp, vp, cnp) - struct vnode *dvp; - struct vnode *vp; - struct componentname *cnp; +cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) { - register struct namecache *ncp, *negp; - register struct nchashhead *ncpp; + const char *strname; if (cnp->cn_hash == 0) cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); - name_cache_lock(); + /* + * grab 1 reference on the string entered + * for the cache_enter_locked to consume + */ + strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0); + + NAME_CACHE_LOCK(); + + cache_enter_locked(dvp, vp, cnp, strname); + + NAME_CACHE_UNLOCK(); +} + + +static void +cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, const char *strname) +{ + struct namecache *ncp, *negp; + struct nchashhead *ncpp; + + if (nc_disabled) + return; - /* if the entry is for -ve caching vp is null */ + /* + * if the entry is for -ve caching vp is null + */ if ((vp != NULLVP) && (LIST_FIRST(&vp->v_nclinks))) { /* * someone beat us to the punch.. * this vnode is already in the cache */ - name_cache_unlock(); - return; + if (strname != NULL) + vfs_removename(strname); + return; } /* * We allocate a new entry if we are less than the maximum @@ -975,7 +1577,7 @@ cache_enter(dvp, vp, cnp) /* * Allocate one more entry */ - ncp = (struct namecache *)_MALLOC_ZONE((u_long)sizeof *ncp, M_CACHE, M_WAITOK); + ncp = (struct namecache *)_MALLOC_ZONE(sizeof(*ncp), M_CACHE, M_WAITOK); numcache++; } else { /* @@ -989,11 +1591,11 @@ cache_enter(dvp, vp, cnp) * still in use... we need to * delete it before re-using it */ - nchstats.ncs_stolen++; + NCHSTAT(ncs_stolen); cache_delete(ncp, 0); } } - nchstats.ncs_enters++; + NCHSTAT(ncs_enters); /* * Fill in cache info, if vp is NULL this is a "negative" cache entry. @@ -1002,8 +1604,11 @@ cache_enter(dvp, vp, cnp) ncp->nc_dvp = dvp; ncp->nc_hashval = cnp->cn_hash; ncp->nc_whiteout = FALSE; - ncp->nc_name = add_name_locked(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0); + if (strname == NULL) + ncp->nc_name = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0); + else + ncp->nc_name = strname; /* * make us the newest entry in the cache * i.e. we'll be the last to be stolen @@ -1013,7 +1618,7 @@ cache_enter(dvp, vp, cnp) ncpp = NCHHASH(dvp, cnp->cn_hash); #if DIAGNOSTIC { - register struct namecache *p; + struct namecache *p; for (p = ncpp->lh_first; p != 0; p = p->nc_hash.le_next) if (p == ncp) @@ -1041,17 +1646,15 @@ cache_enter(dvp, vp, cnp) if (cnp->cn_flags & ISWHITEOUT) ncp->nc_whiteout = TRUE; - nchstats.ncs_negtotal++; + ncs_negtotal++; - if (nchstats.ncs_negtotal > desiredNegNodes) { + if (ncs_negtotal > desiredNegNodes) { /* * if we've reached our desired limit * of negative cache entries, delete * the oldest */ negp = TAILQ_FIRST(&neghead); - TAILQ_REMOVE(&neghead, negp, nc_un.nc_negentry); - cache_delete(negp, 1); } } @@ -1060,8 +1663,6 @@ cache_enter(dvp, vp, cnp) * are children of dvp */ LIST_INSERT_HEAD(&dvp->v_ncchildren, ncp, nc_child); - - name_cache_unlock(); } @@ -1101,6 +1702,8 @@ static void init_crc32(void) void nchinit(void) { + int i; + desiredNegNodes = (desiredvnodes / 10); desiredNodes = desiredvnodes + desiredNegNodes; @@ -1109,39 +1712,55 @@ nchinit(void) init_crc32(); - nchashtbl = hashinit(MAX(4096, (2 *desiredNodes)), M_CACHE, &nchash); + nchashtbl = hashinit(MAX(CONFIG_NC_HASH, (2 *desiredNodes)), M_CACHE, &nchash); nchashmask = nchash; nchash++; init_string_table(); - /* Allocate mount list lock group attribute and group */ + /* Allocate name cache lock group attribute and group */ namecache_lck_grp_attr= lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(namecache_lck_grp_attr); namecache_lck_grp = lck_grp_alloc_init("Name Cache", namecache_lck_grp_attr); - /* Allocate mount list lock attribute */ + /* Allocate name cache lock attribute */ namecache_lck_attr = lck_attr_alloc_init(); - //lck_attr_setdebug(namecache_lck_attr); - /* Allocate mount list lock */ - namecache_mtx_lock = lck_mtx_alloc_init(namecache_lck_grp, namecache_lck_attr); + /* Allocate name cache lock */ + namecache_rw_lock = lck_rw_alloc_init(namecache_lck_grp, namecache_lck_attr); + + + /* Allocate string cache lock group attribute and group */ + strcache_lck_grp_attr= lck_grp_attr_alloc_init(); + + strcache_lck_grp = lck_grp_alloc_init("String Cache", strcache_lck_grp_attr); + + /* Allocate string cache lock attribute */ + strcache_lck_attr = lck_attr_alloc_init(); + + /* Allocate string cache lock */ + strtable_rw_lock = lck_rw_alloc_init(strcache_lck_grp, strcache_lck_attr); + for (i = 0; i < NUM_STRCACHE_LOCKS; i++) + lck_mtx_init(&strcache_mtx_locks[i], strcache_lck_grp, strcache_lck_attr); +} +void +name_cache_lock_shared(void) +{ + lck_rw_lock_shared(namecache_rw_lock); } void name_cache_lock(void) { - lck_mtx_lock(namecache_mtx_lock); + lck_rw_lock_exclusive(namecache_rw_lock); } void name_cache_unlock(void) { - lck_mtx_unlock(namecache_mtx_lock); - + lck_rw_done(namecache_rw_lock); } @@ -1160,7 +1779,7 @@ resize_namecache(u_int newsize) dNodes = newsize + dNegNodes; // we don't support shrinking yet - if (dNodes < desiredNodes) { + if (dNodes <= desiredNodes) { return 0; } new_table = hashinit(2 * dNodes, M_CACHE, &nchashmask); @@ -1170,7 +1789,7 @@ resize_namecache(u_int newsize) return ENOMEM; } - name_cache_lock(); + NAME_CACHE_LOCK(); // do the switch! old_table = nchashtbl; nchashtbl = new_table; @@ -1198,7 +1817,7 @@ resize_namecache(u_int newsize) desiredNodes = dNodes; desiredNegNodes = dNegNodes; - name_cache_unlock(); + NAME_CACHE_UNLOCK(); FREE(old_table, M_CACHE); return 0; @@ -1207,13 +1826,13 @@ resize_namecache(u_int newsize) static void cache_delete(struct namecache *ncp, int age_entry) { - nchstats.ncs_deletes++; + NCHSTAT(ncs_deletes); if (ncp->nc_vp) { LIST_REMOVE(ncp, nc_un.nc_link); } else { TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry); - nchstats.ncs_negtotal--; + ncs_negtotal--; } LIST_REMOVE(ncp, nc_child); @@ -1234,7 +1853,7 @@ cache_delete(struct namecache *ncp, int age_entry) TAILQ_REMOVE(&nchead, ncp, nc_entry); TAILQ_INSERT_HEAD(&nchead, ncp, nc_entry); } - remove_name_locked(ncp->nc_name); + vfs_removename(ncp->nc_name); ncp->nc_name = NULL; } @@ -1247,11 +1866,18 @@ void cache_purge(vnode_t vp) { struct namecache *ncp; + kauth_cred_t tcred = NULL; - if ((LIST_FIRST(&vp->v_nclinks) == NULL) && (LIST_FIRST(&vp->v_ncchildren) == NULL)) + if ((LIST_FIRST(&vp->v_nclinks) == NULL) && + (LIST_FIRST(&vp->v_ncchildren) == NULL) && + (vp->v_cred == NOCRED) && + (vp->v_parent == NULLVP)) return; - name_cache_lock(); + NAME_CACHE_LOCK(); + + if (vp->v_parent) + vp->v_parent->v_nc_generation++; while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) cache_delete(ncp, 1); @@ -1259,7 +1885,17 @@ cache_purge(vnode_t vp) while ( (ncp = LIST_FIRST(&vp->v_ncchildren)) ) cache_delete(ncp, 1); - name_cache_unlock(); + /* + * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held + */ + tcred = vp->v_cred; + vp->v_cred = NOCRED; + vp->v_authorized_actions = 0; + + NAME_CACHE_UNLOCK(); + + if (IS_VALID_CRED(tcred)) + kauth_cred_unref(&tcred); } /* @@ -1272,15 +1908,15 @@ cache_purge(vnode_t vp) void cache_purge_negatives(vnode_t vp) { - struct namecache *ncp; + struct namecache *ncp, *next_ncp; - name_cache_lock(); + NAME_CACHE_LOCK(); - LIST_FOREACH(ncp, &vp->v_ncchildren, nc_child) + LIST_FOREACH_SAFE(ncp, &vp->v_ncchildren, nc_child, next_ncp) if (ncp->nc_vp == NULL) cache_delete(ncp , 1); - name_cache_unlock(); + NAME_CACHE_UNLOCK(); } /* @@ -1290,13 +1926,12 @@ cache_purge_negatives(vnode_t vp) * entries at the same time. */ void -cache_purgevfs(mp) - struct mount *mp; +cache_purgevfs(struct mount *mp) { struct nchashhead *ncpp; struct namecache *ncp; - name_cache_lock(); + NAME_CACHE_LOCK(); /* Scan hash tables for applicable entries */ for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) { restart: @@ -1307,7 +1942,7 @@ restart: } } } - name_cache_unlock(); + NAME_CACHE_UNLOCK(); } @@ -1317,195 +1952,226 @@ restart: // static LIST_HEAD(stringhead, string_t) *string_ref_table; static u_long string_table_mask; -static uint32_t max_chain_len=0; -static struct stringhead *long_chain_head=NULL; static uint32_t filled_buckets=0; -static uint32_t num_dups=0; -static uint32_t nstrings=0; + typedef struct string_t { LIST_ENTRY(string_t) hash_chain; - unsigned char *str; + const char *str; uint32_t refcount; } string_t; - -static int +static void resize_string_ref_table(void) { - struct stringhead *new_table; - struct stringhead *old_table; - struct stringhead *old_head, *head; - string_t *entry, *next; - uint32_t i, hashval; - u_long new_mask, old_mask; - - new_table = hashinit((string_table_mask + 1) * 2, M_CACHE, &new_mask); - if (new_table == NULL) { - return ENOMEM; - } + struct stringhead *new_table; + struct stringhead *old_table; + struct stringhead *old_head, *head; + string_t *entry, *next; + uint32_t i, hashval; + u_long new_mask, old_mask; - // do the switch! - old_table = string_ref_table; - string_ref_table = new_table; - old_mask = string_table_mask; - string_table_mask = new_mask; + /* + * need to hold the table lock exclusively + * in order to grow the table... need to recheck + * the need to resize again after we've taken + * the lock exclusively in case some other thread + * beat us to the punch + */ + lck_rw_lock_exclusive(strtable_rw_lock); - printf("resize: max chain len %d, new table size %d\n", - max_chain_len, new_mask + 1); - max_chain_len = 0; - long_chain_head = NULL; - filled_buckets = 0; + if (4 * filled_buckets < ((string_table_mask + 1) * 3)) { + lck_rw_done(strtable_rw_lock); + return; + } + new_table = hashinit((string_table_mask + 1) * 2, M_CACHE, &new_mask); - // walk the old table and insert all the entries into - // the new table - // - for(i=0; i <= old_mask; i++) { - old_head = &old_table[i]; - for (entry=old_head->lh_first; entry != NULL; entry=next) { - hashval = hash_string(entry->str, 0); - head = &string_ref_table[hashval & string_table_mask]; - if (head->lh_first == NULL) { - filled_buckets++; - } + if (new_table == NULL) { + printf("failed to resize the hash table.\n"); + lck_rw_done(strtable_rw_lock); + return; + } + + // do the switch! + old_table = string_ref_table; + string_ref_table = new_table; + old_mask = string_table_mask; + string_table_mask = new_mask; + filled_buckets = 0; - next = entry->hash_chain.le_next; - LIST_INSERT_HEAD(head, entry, hash_chain); + // walk the old table and insert all the entries into + // the new table + // + for (i = 0; i <= old_mask; i++) { + old_head = &old_table[i]; + for (entry = old_head->lh_first; entry != NULL; entry = next) { + hashval = hash_string((const char *)entry->str, 0); + head = &string_ref_table[hashval & string_table_mask]; + if (head->lh_first == NULL) { + filled_buckets++; + } + next = entry->hash_chain.le_next; + LIST_INSERT_HEAD(head, entry, hash_chain); + } } - } - - FREE(old_table, M_CACHE); + lck_rw_done(strtable_rw_lock); - return 0; + FREE(old_table, M_CACHE); } static void init_string_table(void) { - string_ref_table = hashinit(4096, M_CACHE, &string_table_mask); + string_ref_table = hashinit(CONFIG_VFS_NAMES, M_CACHE, &string_table_mask); } -char * -vfs_addname(const char *name, size_t len, u_int hashval, u_int flags) +const char * +vfs_addname(const char *name, uint32_t len, u_int hashval, u_int flags) { - char * ptr; - - name_cache_lock(); - ptr = add_name_locked(name, len, hashval, flags); - name_cache_unlock(); - - return(ptr); + return (add_name_internal(name, len, hashval, FALSE, flags)); } -static char * -add_name_locked(const char *name, size_t len, u_int hashval, __unused u_int flags) + +static const char * +add_name_internal(const char *name, uint32_t len, u_int hashval, boolean_t need_extra_ref, __unused u_int flags) { - struct stringhead *head; - string_t *entry; - uint32_t chain_len = 0; + struct stringhead *head; + string_t *entry; + uint32_t chain_len = 0; + uint32_t hash_index; + uint32_t lock_index; + char *ptr; - // - // If the table gets more than 3/4 full, resize it - // - if (4*filled_buckets >= ((string_table_mask + 1) * 3)) { - if (resize_string_ref_table() != 0) { - printf("failed to resize the hash table.\n"); - } - } - if (hashval == 0) { - hashval = hash_string(name, 0); - } - - head = &string_ref_table[hashval & string_table_mask]; - for (entry=head->lh_first; entry != NULL; chain_len++, entry=entry->hash_chain.le_next) { - if (memcmp(entry->str, name, len) == 0 && entry->str[len] == '\0') { - entry->refcount++; - num_dups++; - break; + /* + * if the length already accounts for the null-byte, then + * subtract one so later on we don't index past the end + * of the string. + */ + if (len > 0 && name[len-1] == '\0') { + len--; + } + if (hashval == 0) { + hashval = hash_string(name, len); } - } - if (entry == NULL) { - // it wasn't already there so add it. - MALLOC(entry, string_t *, sizeof(string_t) + len + 1, M_TEMP, M_WAITOK); + /* + * take this lock 'shared' to keep the hash stable + * if someone else decides to grow the pool they + * will take this lock exclusively + */ + lck_rw_lock_shared(strtable_rw_lock); - // have to get "head" again because we could have blocked - // in malloc and thus head could have changed. - // - head = &string_ref_table[hashval & string_table_mask]; - if (head->lh_first == NULL) { - filled_buckets++; + /* + * If the table gets more than 3/4 full, resize it + */ + if (4 * filled_buckets >= ((string_table_mask + 1) * 3)) { + lck_rw_done(strtable_rw_lock); + + resize_string_ref_table(); + + lck_rw_lock_shared(strtable_rw_lock); } + hash_index = hashval & string_table_mask; + lock_index = hash_index % NUM_STRCACHE_LOCKS; + + head = &string_ref_table[hash_index]; - entry->str = (char *)((char *)entry + sizeof(string_t)); - strncpy(entry->str, name, len); - entry->str[len] = '\0'; - entry->refcount = 1; - LIST_INSERT_HEAD(head, entry, hash_chain); + lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]); - if (chain_len > max_chain_len) { - max_chain_len = chain_len; - long_chain_head = head; + for (entry = head->lh_first; entry != NULL; chain_len++, entry = entry->hash_chain.le_next) { + if (memcmp(entry->str, name, len) == 0 && entry->str[len] == 0) { + entry->refcount++; + break; + } } + if (entry == NULL) { + lck_mtx_convert_spin(&strcache_mtx_locks[lock_index]); + /* + * it wasn't already there so add it. + */ + MALLOC(entry, string_t *, sizeof(string_t) + len + 1, M_TEMP, M_WAITOK); - nstrings++; - } + if (head->lh_first == NULL) { + OSAddAtomic(1, &filled_buckets); + } + ptr = (char *)((char *)entry + sizeof(string_t)); + strncpy(ptr, name, len); + ptr[len] = '\0'; + entry->str = ptr; + entry->refcount = 1; + LIST_INSERT_HEAD(head, entry, hash_chain); + } + if (need_extra_ref == TRUE) + entry->refcount++; - return entry->str; + lck_mtx_unlock(&strcache_mtx_locks[lock_index]); + lck_rw_done(strtable_rw_lock); + + return (const char *)entry->str; } + int vfs_removename(const char *nameref) { - int i; + struct stringhead *head; + string_t *entry; + uint32_t hashval; + uint32_t hash_index; + uint32_t lock_index; + int retval = ENOENT; - name_cache_lock(); - i = remove_name_locked(nameref); - name_cache_unlock(); + hashval = hash_string(nameref, 0); - return(i); - -} + /* + * take this lock 'shared' to keep the hash stable + * if someone else decides to grow the pool they + * will take this lock exclusively + */ + lck_rw_lock_shared(strtable_rw_lock); + /* + * must compute the head behind the table lock + * since the size and location of the table + * can change on the fly + */ + hash_index = hashval & string_table_mask; + lock_index = hash_index % NUM_STRCACHE_LOCKS; + head = &string_ref_table[hash_index]; -static int -remove_name_locked(const char *nameref) -{ - struct stringhead *head; - string_t *entry; - uint32_t hashval; - char * ptr; - - hashval = hash_string(nameref, 0); - head = &string_ref_table[hashval & string_table_mask]; - for (entry=head->lh_first; entry != NULL; entry=entry->hash_chain.le_next) { - if (entry->str == (unsigned char *)nameref) { - entry->refcount--; - if (entry->refcount == 0) { - LIST_REMOVE(entry, hash_chain); - if (head->lh_first == NULL) { - filled_buckets--; - } - ptr = entry->str; - entry->str = NULL; - nstrings--; + lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]); - FREE(entry, M_TEMP); - } else { - num_dups--; - } + for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) { + if (entry->str == nameref) { + entry->refcount--; - return 0; + if (entry->refcount == 0) { + LIST_REMOVE(entry, hash_chain); + + if (head->lh_first == NULL) { + OSAddAtomic(-1, &filled_buckets); + } + } else { + entry = NULL; + } + retval = 0; + break; + } } - } + lck_mtx_unlock(&strcache_mtx_locks[lock_index]); + lck_rw_done(strtable_rw_lock); - return ENOENT; + if (entry != NULL) + FREE(entry, M_TEMP); + + return retval; } +#ifdef DUMP_STRING_TABLE void dump_string_table(void) { @@ -1513,12 +2179,14 @@ dump_string_table(void) string_t *entry; u_long i; - name_cache_lock(); + lck_rw_lock_shared(strtable_rw_lock); + for (i = 0; i <= string_table_mask; i++) { head = &string_ref_table[i]; for (entry=head->lh_first; entry != NULL; entry=entry->hash_chain.le_next) { printf("%6d - %s\n", entry->refcount, entry->str); } } - name_cache_unlock(); + lck_rw_done(strtable_rw_lock); } +#endif /* DUMP_STRING_TABLE */