X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/378393581903b274cb7a4d18e0d978071a6b592d..HEAD:/bsd/vfs/vfs_cache.c diff --git a/bsd/vfs/vfs_cache.c b/bsd/vfs/vfs_cache.c index 8cb282de6..8f75a2736 100644 --- a/bsd/vfs/vfs_cache.c +++ b/bsd/vfs/vfs_cache.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ /* @@ -58,16 +64,29 @@ * * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 */ +/* + * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ #include #include #include #include #include +#include #include #include -#include +#include #include #include +#include +#include + +#if CONFIG_MACF +#include +#endif /* * Name caching works as follows: @@ -90,28 +109,70 @@ * Structures associated with name cacheing. */ -LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ -u_long nchashmask; -u_long nchash; /* size of hash table - 1 */ -long numcache; /* number of cache entries allocated */ -int desiredNodes; -int desiredNegNodes; -TAILQ_HEAD(, namecache) nchead; /* chain of all name cache entries */ -TAILQ_HEAD(, namecache) neghead; /* chain of only negative cache entries */ -struct nchstats nchstats; /* cache effectiveness statistics */ +ZONE_DECLARE(namecache_zone, "namecache", sizeof(struct namecache), ZC_NONE); + +LIST_HEAD(nchashhead, namecache) * nchashtbl; /* Hash Table */ +u_long nchashmask; +u_long nchash; /* size of hash table - 1 */ +long numcache; /* number of cache entries allocated */ +int desiredNodes; +int desiredNegNodes; +int ncs_negtotal; +int nc_disabled = 0; +TAILQ_HEAD(, namecache) nchead; /* chain of all name cache entries */ +TAILQ_HEAD(, namecache) neghead; /* chain of only negative cache entries */ + + +#if COLLECT_STATS + +struct nchstats nchstats; /* cache effectiveness statistics */ + +#define NCHSTAT(v) { \ + nchstats.v++; \ +} +#define NAME_CACHE_LOCK() name_cache_lock() +#define NAME_CACHE_UNLOCK() name_cache_unlock() +#define NAME_CACHE_LOCK_SHARED() name_cache_lock() + +#else + +#define NCHSTAT(v) +#define NAME_CACHE_LOCK() name_cache_lock() +#define NAME_CACHE_UNLOCK() name_cache_unlock() +#define NAME_CACHE_LOCK_SHARED() name_cache_lock_shared() + +#endif + /* vars for name cache list lock */ -lck_grp_t * namecache_lck_grp; -lck_grp_attr_t * namecache_lck_grp_attr; -lck_attr_t * namecache_lck_attr; -lck_mtx_t * namecache_mtx_lock; +static LCK_GRP_DECLARE(namecache_lck_grp, "Name Cache"); +static LCK_RW_DECLARE(namecache_rw_lock, &namecache_lck_grp); + +static LCK_GRP_DECLARE(strcache_lck_grp, "String Cache"); +static LCK_ATTR_DECLARE(strcache_lck_attr, 0, 0); +LCK_RW_DECLARE_ATTR(strtable_rw_lock, &strcache_lck_grp, &strcache_lck_attr); + +static LCK_GRP_DECLARE(rootvnode_lck_grp, "rootvnode"); +LCK_RW_DECLARE(rootvnode_rw_lock, &rootvnode_lck_grp); + +#define NUM_STRCACHE_LOCKS 1024 + +lck_mtx_t strcache_mtx_locks[NUM_STRCACHE_LOCKS]; + static vnode_t cache_lookup_locked(vnode_t dvp, struct componentname *cnp); -static int remove_name_locked(const char *); -static char *add_name_locked(const char *, size_t, u_int, u_int); +static const char *add_name_internal(const char *, uint32_t, u_int, boolean_t, u_int); static void init_string_table(void); static void cache_delete(struct namecache *, int); -static void dump_string_table(void); +static void cache_enter_locked(vnode_t dvp, vnode_t vp, struct componentname *cnp, const char *strname); +static void cache_purge_locked(vnode_t vp, kauth_cred_t *credp); + +#ifdef DUMP_STRING_TABLE +/* + * Internal dump function used for debugging + */ +void dump_string_table(void); +#endif /* DUMP_STRING_TABLE */ static void init_crc32(void); static unsigned int crc32tab[256]; @@ -120,116 +181,648 @@ static unsigned int crc32tab[256]; #define NCHHASH(dvp, hash_val) \ (&nchashtbl[(dvp->v_id ^ (hash_val)) & nchashmask]) +/* + * This function tries to check if a directory vp is a subdirectory of dvp + * only from valid v_parent pointers. It is called with the name cache lock + * held and does not drop the lock anytime inside the function. + * + * It returns a boolean that indicates whether or not it was able to + * successfully infer the parent/descendent relationship via the v_parent + * pointers, or if it could not infer such relationship and that the decision + * must be delegated to the owning filesystem. + * + * If it does not defer the decision, i.e. it was successfuly able to determine + * the parent/descendent relationship, *is_subdir tells the caller if vp is a + * subdirectory of dvp. + * + * If the decision is deferred, *next_vp is where it stopped i.e. *next_vp + * is the vnode whose parent is to be determined from the filesystem. + * *is_subdir, in this case, is not indicative of anything and should be + * ignored. + * + * The return value and output args should be used as follows : + * + * defer = cache_check_vnode_issubdir(vp, dvp, is_subdir, next_vp); + * if (!defer) { + * if (*is_subdir) + * vp is subdirectory; + * else + * vp is not a subdirectory; + * } else { + * if (*next_vp) + * check this vnode's parent from the filesystem + * else + * error (likely because of forced unmount). + * } + * + */ +static boolean_t +cache_check_vnode_issubdir(vnode_t vp, vnode_t dvp, boolean_t *is_subdir, + vnode_t *next_vp) +{ + vnode_t tvp = vp; + int defer = FALSE; + + *is_subdir = FALSE; + *next_vp = NULLVP; + while (1) { + mount_t tmp; + + if (tvp == dvp) { + *is_subdir = TRUE; + break; + } else if (tvp == rootvnode) { + /* *is_subdir = FALSE */ + break; + } + tmp = tvp->v_mount; + while ((tvp->v_flag & VROOT) && tmp && tmp->mnt_vnodecovered && + tvp != dvp && tvp != rootvnode) { + tvp = tmp->mnt_vnodecovered; + tmp = tvp->v_mount; + } -// -// This function builds the path to a filename in "buff". The -// length of the buffer *INCLUDING* the trailing zero byte is -// returned in outlen. NOTE: the length includes the trailing -// zero byte and thus the length is one greater than what strlen -// would return. This is important and lots of code elsewhere -// in the kernel assumes this behavior. -// + /* + * If dvp is not at the top of a mount "stack" then + * vp is not a subdirectory of dvp either. + */ + if (tvp == dvp || tvp == rootvnode) { + /* *is_subdir = FALSE */ + break; + } + + if (!tmp) { + defer = TRUE; + *next_vp = NULLVP; + break; + } + + if ((tvp->v_flag & VISHARDLINK) || !(tvp->v_parent)) { + defer = TRUE; + *next_vp = tvp; + break; + } + + tvp = tvp->v_parent; + } + + return defer; +} + +/* maximum times retry from potentially transient errors in vnode_issubdir */ +#define MAX_ERROR_RETRY 3 + +/* + * This function checks if a given directory (vp) is a subdirectory of dvp. + * It walks backwards from vp and if it hits dvp in its parent chain, + * it is a subdirectory. If it encounters the root directory, it is not + * a subdirectory. + * + * This function returns an error if it is unsuccessful and 0 on success. + * + * On entry (and exit) vp has an iocount and if this function has to take + * any iocounts on other vnodes in the parent chain traversal, it releases them. + */ int -build_path(vnode_t first_vp, char *buff, int buflen, int *outlen) +vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx) { - vnode_t vp = first_vp; - char *end, *str; - int len, ret=0, counter=0; + vnode_t start_vp, tvp; + vnode_t vp_with_iocount; + int error = 0; + char dotdotbuf[] = ".."; + int error_retry_count = 0; /* retry count for potentially transient + * errors */ + + *is_subdir = FALSE; + tvp = start_vp = vp; + /* + * Anytime we acquire an iocount in this function, we save the vnode + * in this variable and release it before exiting. + */ + vp_with_iocount = NULLVP; + + while (1) { + boolean_t defer; + vnode_t pvp; + uint32_t vid; + struct componentname cn; + boolean_t is_subdir_locked = FALSE; + + if (tvp == dvp) { + *is_subdir = TRUE; + break; + } else if (tvp == rootvnode) { + /* *is_subdir = FALSE */ + break; + } + + NAME_CACHE_LOCK_SHARED(); + + defer = cache_check_vnode_issubdir(tvp, dvp, &is_subdir_locked, + &tvp); + + if (defer && tvp) { + vid = vnode_vid(tvp); + } + + NAME_CACHE_UNLOCK(); - end = &buff[buflen-1]; + if (!defer) { + *is_subdir = is_subdir_locked; + break; + } + + if (!tvp) { + if (error_retry_count++ < MAX_ERROR_RETRY) { + tvp = vp; + continue; + } + error = ENOENT; + break; + } + + if (tvp != start_vp) { + if (vp_with_iocount) { + vnode_put(vp_with_iocount); + vp_with_iocount = NULLVP; + } + + error = vnode_getwithvid(tvp, vid); + if (error) { + if (error_retry_count++ < MAX_ERROR_RETRY) { + tvp = vp; + error = 0; + continue; + } + break; + } + + vp_with_iocount = tvp; + } + + bzero(&cn, sizeof(cn)); + cn.cn_nameiop = LOOKUP; + cn.cn_flags = ISLASTCN | ISDOTDOT; + cn.cn_context = ctx; + cn.cn_pnbuf = &dotdotbuf[0]; + cn.cn_pnlen = sizeof(dotdotbuf); + cn.cn_nameptr = cn.cn_pnbuf; + cn.cn_namelen = 2; + + pvp = NULLVP; + if ((error = VNOP_LOOKUP(tvp, &pvp, &cn, ctx))) { + break; + } + + if (!(tvp->v_flag & VISHARDLINK) && tvp->v_parent != pvp) { + (void)vnode_update_identity(tvp, pvp, NULL, 0, 0, + VNODE_UPDATE_PARENT); + } + + if (vp_with_iocount) { + vnode_put(vp_with_iocount); + } + + vp_with_iocount = tvp = pvp; + } + + if (vp_with_iocount) { + vnode_put(vp_with_iocount); + } + + return error; +} + +/* + * This function builds the path in "buff" from the supplied vnode. + * The length of the buffer *INCLUDING* the trailing zero byte is + * returned in outlen. NOTE: the length includes the trailing zero + * byte and thus the length is one greater than what strlen would + * return. This is important and lots of code elsewhere in the kernel + * assumes this behavior. + * + * This function can call vnop in file system if the parent vnode + * does not exist or when called for hardlinks via volfs path. + * If BUILDPATH_NO_FS_ENTER is set in flags, it only uses values present + * in the name cache and does not enter the file system. + * + * If BUILDPATH_CHECK_MOVED is set in flags, we return EAGAIN when + * we encounter ENOENT during path reconstruction. ENOENT means that + * one of the parents moved while we were building the path. The + * caller can special handle this case by calling build_path again. + * + * If BUILDPATH_VOLUME_RELATIVE is set in flags, we return path + * that is relative to the nearest mount point, i.e. do not + * cross over mount points during building the path. + * + * passed in vp must have a valid io_count reference + * + * If parent vnode is non-NULL it also must have an io count. This + * allows build_path_with_parent to be safely called for operations + * unlink, rmdir and rename that already have io counts on the target + * and the directory. In this way build_path_with_parent does not have + * to try and obtain an additional io count on the parent. Taking an + * io count ont the parent can lead to dead lock if a forced unmount + * occures at the right moment. For a fuller explaination on how this + * can occur see the comment for vn_getpath_with_parent. + * + */ +int +build_path_with_parent(vnode_t first_vp, vnode_t parent_vp, char *buff, int buflen, + int *outlen, size_t *mntpt_outlen, int flags, vfs_context_t ctx) +{ + vnode_t vp, tvp; + vnode_t vp_with_iocount; + vnode_t proc_root_dir_vp; + char *end; + char *mntpt_end; + const char *str; + unsigned int len; + int ret = 0; + int fixhardlink; + + if (first_vp == NULLVP) { + return EINVAL; + } + + if (buflen <= 1) { + return ENOSPC; + } + + /* + * Grab the process fd so we can evaluate fd_rdir. + */ + if (vfs_context_proc(ctx)->p_fd && !(flags & BUILDPATH_NO_PROCROOT)) { + proc_root_dir_vp = vfs_context_proc(ctx)->p_fd->fd_rdir; + } else { + proc_root_dir_vp = NULL; + } + + vp_with_iocount = NULLVP; +again: + vp = first_vp; + + end = &buff[buflen - 1]; *end = '\0'; + mntpt_end = NULL; /* - * if this is the root dir of a file system... + * Catch a special corner case here: chroot to /full/path/to/dir, chdir to + * it, then open it. Without this check, the path to it will be + * /full/path/to/dir instead of "/". */ - if (vp && (vp->v_flag & VROOT) && vp->v_mount) { - /* - * then if it's the root fs, just put in a '/' and get out of here - */ - if (vp->v_mount->mnt_flag & MNT_ROOTFS) { - *--end = '/'; - goto out; + if (proc_root_dir_vp == first_vp) { + *--end = '/'; + goto out; + } + + /* + * holding the NAME_CACHE_LOCK in shared mode is + * sufficient to stabilize both the vp->v_parent chain + * and the 'vp->v_mount->mnt_vnodecovered' chain + * + * if we need to drop this lock, we must first grab the v_id + * from the vnode we're currently working with... if that + * vnode doesn't already have an io_count reference (the vp + * passed in comes with one), we must grab a reference + * after we drop the NAME_CACHE_LOCK via vnode_getwithvid... + * deadlocks may result if you call vnode_get while holding + * the NAME_CACHE_LOCK... we lazily release the reference + * we pick up the next time we encounter a need to drop + * the NAME_CACHE_LOCK or before we return from this routine + */ + NAME_CACHE_LOCK_SHARED(); + +#if CONFIG_FIRMLINKS + if (!(flags & BUILDPATH_NO_FIRMLINK) && + (vp->v_flag & VFMLINKTARGET) && vp->v_fmlink) { + vp = vp->v_fmlink; + } +#endif + + /* + * Check if this is the root of a file system. + */ + while (vp && vp->v_flag & VROOT) { + if (vp->v_mount == NULL) { + ret = EINVAL; + goto out_unlock; + } + if ((vp->v_mount->mnt_flag & MNT_ROOTFS) || (vp == proc_root_dir_vp)) { + /* + * It's the root of the root file system, so it's + * just "/". + */ + *--end = '/'; + + goto out_unlock; } else { - /* - * else just use the covered vnode to get the mount path + /* + * This the root of the volume and the caller does not + * want to cross mount points. Therefore just return + * '/' as the relative path. */ - vp = vp->v_mount->mnt_vnodecovered; +#if CONFIG_FIRMLINKS + if (!(flags & BUILDPATH_NO_FIRMLINK) && + (vp->v_flag & VFMLINKTARGET) && vp->v_fmlink) { + vp = vp->v_fmlink; + } else +#endif + if (flags & BUILDPATH_VOLUME_RELATIVE) { + *--end = '/'; + goto out_unlock; + } else { + vp = vp->v_mount->mnt_vnodecovered; + if (!mntpt_end && vp) { + mntpt_end = end; + } + } } } - name_cache_lock(); - while (vp && vp->v_parent != vp) { - /* - * the maximum depth of a file system hierarchy is MAXPATHLEN/2 - * (with single-char names separated by slashes). we panic if - * we've ever looped more than that. + while ((vp != NULLVP) && (vp->v_parent != vp)) { + int vid; + + /* + * For hardlinks the v_name may be stale, so if its OK + * to enter a file system, ask the file system for the + * name and parent (below). */ - if (counter++ > MAXPATHLEN/2) { - panic("build_path: vnode parent chain is too long! vp 0x%x\n", vp); - } - str = vp->v_name; + fixhardlink = (vp->v_flag & VISHARDLINK) && + (vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID) && + !(flags & BUILDPATH_NO_FS_ENTER); + + if (!fixhardlink) { + str = vp->v_name; + + if (str == NULL || *str == '\0') { + if (vp->v_parent != NULL) { + ret = EINVAL; + } else { + ret = ENOENT; + } + goto out_unlock; + } + len = (unsigned int)strlen(str); + /* + * Check that there's enough space (including space for the '/') + */ + if ((unsigned int)(end - buff) < (len + 1)) { + ret = ENOSPC; + goto out_unlock; + } + /* + * Copy the name backwards. + */ + str += len; - if (str == NULL) { - if (vp->v_parent != NULL) { - ret = EINVAL; + for (; len > 0; len--) { + *--end = *--str; } - break; + /* + * Add a path separator. + */ + *--end = '/'; } - len = strlen(str); /* - * check that there's enough space (make sure to include space for the '/') + * Walk up the parent chain. */ - if ((end - buff) < (len + 1)) { - ret = ENOSPC; - break; + if (((vp->v_parent != NULLVP) && !fixhardlink) || + (flags & BUILDPATH_NO_FS_ENTER)) { + /* + * In this if () block we are not allowed to enter the filesystem + * to conclusively get the most accurate parent identifier. + * As a result, if 'vp' does not identify '/' and it + * does not have a valid v_parent, then error out + * and disallow further path construction + */ + if ((vp->v_parent == NULLVP) && (rootvnode != vp)) { + /* + * Only '/' is allowed to have a NULL parent + * pointer. Upper level callers should ideally + * re-drive name lookup on receiving a ENOENT. + */ + ret = ENOENT; + + /* The code below will exit early if 'tvp = vp' == NULL */ + } + vp = vp->v_parent; + + /* + * if the vnode we have in hand isn't a directory and it + * has a v_parent, then we started with the resource fork + * so skip up to avoid getting a duplicate copy of the + * file name in the path. + */ + if (vp && !vnode_isdir(vp) && vp->v_parent) { + vp = vp->v_parent; + } + } else { + /* + * No parent, go get it if supported. + */ + struct vnode_attr va; + vnode_t dvp; + + /* + * Make sure file system supports obtaining a path from id. + */ + if (!(vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID)) { + ret = ENOENT; + goto out_unlock; + } + vid = vp->v_id; + + NAME_CACHE_UNLOCK(); + + if (vp != first_vp && vp != parent_vp && vp != vp_with_iocount) { + if (vp_with_iocount) { + vnode_put(vp_with_iocount); + vp_with_iocount = NULLVP; + } + if (vnode_getwithvid(vp, vid)) { + goto again; + } + vp_with_iocount = vp; + } + VATTR_INIT(&va); + VATTR_WANTED(&va, va_parentid); + + if (fixhardlink) { + VATTR_WANTED(&va, va_name); + va.va_name = zalloc(ZV_NAMEI); + } else { + va.va_name = NULL; + } + /* + * Ask the file system for its parent id and for its name (optional). + */ + ret = vnode_getattr(vp, &va, ctx); + + if (fixhardlink) { + if ((ret == 0) && (VATTR_IS_SUPPORTED(&va, va_name))) { + str = va.va_name; + vnode_update_identity(vp, NULL, str, (unsigned int)strlen(str), 0, VNODE_UPDATE_NAME); + } else if (vp->v_name) { + str = vp->v_name; + ret = 0; + } else { + ret = ENOENT; + goto bad_news; + } + len = (unsigned int)strlen(str); + + /* + * Check that there's enough space. + */ + if ((unsigned int)(end - buff) < (len + 1)) { + ret = ENOSPC; + } else { + /* Copy the name backwards. */ + str += len; + + for (; len > 0; len--) { + *--end = *--str; + } + /* + * Add a path separator. + */ + *--end = '/'; + } +bad_news: + zfree(ZV_NAMEI, va.va_name); + } + if (ret || !VATTR_IS_SUPPORTED(&va, va_parentid)) { + ret = ENOENT; + goto out; + } + /* + * Ask the file system for the parent vnode. + */ + if ((ret = VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx))) { + goto out; + } + + if (!fixhardlink && (vp->v_parent != dvp)) { + vnode_update_identity(vp, dvp, NULL, 0, 0, VNODE_UPDATE_PARENT); + } + + if (vp_with_iocount) { + vnode_put(vp_with_iocount); + } + vp = dvp; + vp_with_iocount = vp; + + NAME_CACHE_LOCK_SHARED(); + + /* + * if the vnode we have in hand isn't a directory and it + * has a v_parent, then we started with the resource fork + * so skip up to avoid getting a duplicate copy of the + * file name in the path. + */ + if (vp && !vnode_isdir(vp) && vp->v_parent) { + vp = vp->v_parent; + } } - /* - * copy it backwards - */ - str += len; - for (; len > 0; len--) { - *--end = *--str; + if (vp && (flags & BUILDPATH_CHECKACCESS)) { + vid = vp->v_id; + + NAME_CACHE_UNLOCK(); + + if (vp != first_vp && vp != parent_vp && vp != vp_with_iocount) { + if (vp_with_iocount) { + vnode_put(vp_with_iocount); + vp_with_iocount = NULLVP; + } + if (vnode_getwithvid(vp, vid)) { + goto again; + } + vp_with_iocount = vp; + } + if ((ret = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx))) { + goto out; /* no peeking */ + } + NAME_CACHE_LOCK_SHARED(); } - /* - * put in the path separator - */ - *--end = '/'; /* - * walk up the chain (as long as we're not the root) + * When a mount point is crossed switch the vp. + * Continue until we find the root or we find + * a vnode that's not the root of a mounted + * file system. */ - if (vp == first_vp && (vp->v_flag & VROOT)) { - if (vp->v_mount && vp->v_mount->mnt_vnodecovered) { - vp = vp->v_mount->mnt_vnodecovered->v_parent; + tvp = vp; + + while (tvp) { + if (tvp == proc_root_dir_vp) { + goto out_unlock; /* encountered the root */ + } + +#if CONFIG_FIRMLINKS + if (!(flags & BUILDPATH_NO_FIRMLINK) && + (tvp->v_flag & VFMLINKTARGET) && tvp->v_fmlink) { + tvp = tvp->v_fmlink; + break; + } +#endif + + if (!(tvp->v_flag & VROOT) || !tvp->v_mount) { + break; /* not the root of a mounted FS */ + } + if (flags & BUILDPATH_VOLUME_RELATIVE) { + /* Do not cross over mount points */ + tvp = NULL; } else { - vp = NULLVP; + tvp = tvp->v_mount->mnt_vnodecovered; + if (!mntpt_end && tvp) { + mntpt_end = end; + } } - } else { - vp = vp->v_parent; } - /* - * check if we're crossing a mount point and - * switch the vp if we are. - */ - if (vp && (vp->v_flag & VROOT) && vp->v_mount) { - vp = vp->v_mount->mnt_vnodecovered; + if (tvp == NULLVP) { + goto out_unlock; } + vp = tvp; } - name_cache_unlock(); +out_unlock: + NAME_CACHE_UNLOCK(); out: + if (vp_with_iocount) { + vnode_put(vp_with_iocount); + } /* - * slide it down to the beginning of the buffer + * Slide the name down to the beginning of the buffer. */ memmove(buff, end, &buff[buflen] - end); - - *outlen = &buff[buflen] - end; // length includes the trailing zero byte - + + /* + * length includes the trailing zero byte + */ + *outlen = (int)(&buff[buflen] - end); + if (mntpt_outlen && mntpt_end) { + *mntpt_outlen = (size_t)*outlen - (size_t)(&buff[buflen] - mntpt_end); + } + + /* One of the parents was moved during path reconstruction. + * The caller is interested in knowing whether any of the + * parents moved via BUILDPATH_CHECK_MOVED, so return EAGAIN. + */ + if ((ret == ENOENT) && (flags & BUILDPATH_CHECK_MOVED)) { + ret = EAGAIN; + } + return ret; } +int +build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx) +{ + return build_path_with_parent(first_vp, NULL, buff, buflen, outlen, NULL, flags, ctx); +} /* * return NULLVP if vp's parent doesn't @@ -239,10 +832,13 @@ out: vnode_t vnode_getparent(vnode_t vp) { - vnode_t pvp = NULLVP; - int pvid; + vnode_t pvp = NULLVP; + int pvid; + + NAME_CACHE_LOCK_SHARED(); + + pvp = vp->v_parent; - name_cache_lock(); /* * v_parent is stable behind the name_cache lock * however, the only thing we can really guarantee @@ -250,41 +846,86 @@ vnode_getparent(vnode_t vp) * parent of 'vp' at the time we took the name_cache lock... * once we drop the lock, vp could get re-parented */ - if ( (pvp = vp->v_parent) != NULLVP ) { - pvid = pvp->v_id; - - name_cache_unlock(); + if (pvp != NULLVP) { + pvid = pvp->v_id; - if (vnode_getwithvid(pvp, pvid) != 0) - pvp = NULL; - } else - name_cache_unlock(); + NAME_CACHE_UNLOCK(); - return (pvp); + if (vnode_getwithvid(pvp, pvid) != 0) { + pvp = NULL; + } + } else { + NAME_CACHE_UNLOCK(); + } + return pvp; } -char * +const char * vnode_getname(vnode_t vp) { - char *name = NULL; + const char *name = NULL; - name_cache_lock(); - - if (vp->v_name) - name = add_name_locked(vp->v_name, strlen(vp->v_name), 0, 0); - name_cache_unlock(); + NAME_CACHE_LOCK_SHARED(); - return (name); + if (vp->v_name) { + name = vfs_addname(vp->v_name, (unsigned int)strlen(vp->v_name), 0, 0); + } + NAME_CACHE_UNLOCK(); + + return name; } void -vnode_putname(char *name) +vnode_putname(const char *name) { - name_cache_lock(); + vfs_removename(name); +} + +static const char unknown_vnodename[] = "(unknown vnode name)"; - remove_name_locked(name); +const char * +vnode_getname_printable(vnode_t vp) +{ + const char *name = vnode_getname(vp); + if (name != NULL) { + return name; + } - name_cache_unlock(); + switch (vp->v_type) { + case VCHR: + case VBLK: + { + /* + * Create an artificial dev name from + * major and minor device number + */ + char dev_name[64]; + (void) snprintf(dev_name, sizeof(dev_name), + "%c(%u, %u)", VCHR == vp->v_type ? 'c':'b', + major(vp->v_rdev), minor(vp->v_rdev)); + /* + * Add the newly created dev name to the name + * cache to allow easier cleanup. Also, + * vfs_addname allocates memory for the new name + * and returns it. + */ + NAME_CACHE_LOCK_SHARED(); + name = vfs_addname(dev_name, (unsigned int)strlen(dev_name), 0, 0); + NAME_CACHE_UNLOCK(); + return name; + } + default: + return unknown_vnodename; + } +} + +void +vnode_putname_printable(const char *name) +{ + if (name == unknown_vnodename) { + return; + } + vnode_putname(name); } @@ -302,53 +943,147 @@ vnode_putname(char *name) * if VNODE_UPDATE_CACHE, flush the name cache entries associated with vp */ void -vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int name_hashval, int flags) +vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags) { - struct namecache *ncp; - vnode_t old_parentvp = NULLVP; - + struct namecache *ncp; + vnode_t old_parentvp = NULLVP; + int isstream = (vp->v_flag & VISNAMEDSTREAM); + int kusecountbumped = 0; + kauth_cred_t tcred = NULL; + const char *vname = NULL; + const char *tname = NULL; + + if (name_len < 0) { + return; + } if (flags & VNODE_UPDATE_PARENT) { - if (dvp && vnode_ref(dvp) != 0) - dvp = NULLVP; - } else - dvp = NULLVP; - name_cache_lock(); - - if ( (flags & VNODE_UPDATE_NAME) && (name != vp->v_name) ) { - if (vp->v_name != NULL) { - remove_name_locked(vp->v_name); - vp->v_name = NULL; + if (dvp && vnode_ref(dvp) != 0) { + dvp = NULLVP; + } + /* Don't count a stream's parent ref during unmounts */ + if (isstream && dvp && (dvp != vp) && (dvp != vp->v_parent) && (dvp->v_type == VREG)) { + vnode_lock_spin(dvp); + ++dvp->v_kusecount; + kusecountbumped = 1; + vnode_unlock(dvp); } - if (name && *name) { - if (name_len == 0) - name_len = strlen(name); - vp->v_name = add_name_locked(name, name_len, name_hashval, 0); + } else { + dvp = NULLVP; + } + if ((flags & VNODE_UPDATE_NAME)) { + if (name != vp->v_name) { + if (name && *name) { + if (name_len == 0) { + name_len = (int)strlen(name); + } + tname = vfs_addname(name, name_len, name_hashval, 0); + } + } else { + flags &= ~VNODE_UPDATE_NAME; } } - if (flags & VNODE_UPDATE_PARENT) { - if (dvp != vp && dvp != vp->v_parent) { - old_parentvp = vp->v_parent; - vp->v_parent = dvp; - dvp = NULLVP; + if ((flags & (VNODE_UPDATE_PURGE | VNODE_UPDATE_PARENT | VNODE_UPDATE_CACHE | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGEFIRMLINK))) { + NAME_CACHE_LOCK(); + +#if CONFIG_FIRMLINKS + if (flags & VNODE_UPDATE_PURGEFIRMLINK) { + vnode_t old_fvp = vp->v_fmlink; + if (old_fvp) { + vnode_lock_spin(vp); + vp->v_flag &= ~VFMLINKTARGET; + vp->v_fmlink = NULLVP; + vnode_unlock(vp); + NAME_CACHE_UNLOCK(); + + /* + * vnode_rele can result in cascading series of + * usecount releases. The combination of calling + * vnode_recycle and dont_reenter (3rd arg to + * vnode_rele_internal) ensures we don't have + * that issue. + */ + vnode_recycle(old_fvp); + vnode_rele_internal(old_fvp, O_EVTONLY, 1, 0); - if (old_parentvp) - flags |= VNODE_UPDATE_CACHE; + NAME_CACHE_LOCK(); + } + } +#endif + + if ((flags & VNODE_UPDATE_PURGE)) { + if (vp->v_parent) { + vp->v_parent->v_nc_generation++; + } + + while ((ncp = LIST_FIRST(&vp->v_nclinks))) { + cache_delete(ncp, 1); + } + + while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) { + cache_delete(ncp, 1); + } + + /* + * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held + */ + tcred = vp->v_cred; + vp->v_cred = NOCRED; + vp->v_authorized_actions = 0; + vp->v_cred_timestamp = 0; + } + if ((flags & VNODE_UPDATE_NAME)) { + vname = vp->v_name; + vp->v_name = tname; + } + if (flags & VNODE_UPDATE_PARENT) { + if (dvp != vp && dvp != vp->v_parent) { + old_parentvp = vp->v_parent; + vp->v_parent = dvp; + dvp = NULLVP; + + if (old_parentvp) { + flags |= VNODE_UPDATE_CACHE; + } + } + } + if (flags & VNODE_UPDATE_CACHE) { + while ((ncp = LIST_FIRST(&vp->v_nclinks))) { + cache_delete(ncp, 1); + } + } + NAME_CACHE_UNLOCK(); + + if (vname != NULL) { + vfs_removename(vname); + } + + if (IS_VALID_CRED(tcred)) { + kauth_cred_unref(&tcred); } } - if (flags & VNODE_UPDATE_CACHE) { - while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) - cache_delete(ncp, 1); + if (dvp != NULLVP) { + /* Back-out the ref we took if we lost a race for vp->v_parent. */ + if (kusecountbumped) { + vnode_lock_spin(dvp); + if (dvp->v_kusecount > 0) { + --dvp->v_kusecount; + } + vnode_unlock(dvp); + } + vnode_rele(dvp); } - name_cache_unlock(); - - if (dvp != NULLVP) - vnode_rele(dvp); - if (old_parentvp) { - struct uthread *ut; + struct uthread *ut; - ut = get_bsdthread_info(current_thread()); + if (isstream) { + vnode_lock_spin(old_parentvp); + if ((old_parentvp->v_type != VDIR) && (old_parentvp->v_kusecount > 0)) { + --old_parentvp->v_kusecount; + } + vnode_unlock(old_parentvp); + } + ut = get_bsdthread_info(current_thread()); /* * indicated to vnode_rele that it shouldn't do a @@ -360,10 +1095,8 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int nam ut->uu_defer_reclaims = 1; ut->uu_vreclaims = NULLVP; - while ( (vp = old_parentvp) != NULLVP ) { - - vnode_lock(vp); - + while ((vp = old_parentvp) != NULLVP) { + vnode_lock_spin(vp); vnode_rele_internal(vp, 0, 0, 1); /* @@ -373,44 +1106,50 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int nam * out the v_parent field... we'll drop the reference * that was held on the next iteration of this loop... * this short circuits a potential deep recursion if we - * have a long chain of parents in this state... + * have a long chain of parents in this state... * we'll sit in this loop until we run into * a parent in this chain that is not in this state * - * make our check and the node_rele atomic + * make our check and the vnode_rele atomic * with respect to the current vnode we're working on * by holding the vnode lock * if vnode_rele deferred the vnode_reclaim and has put * this vnode on the list to be reaped by us, than * it has left this vnode with an iocount == 1 */ - if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) && - ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) { - /* + if ((vp->v_iocount == 1) && (vp->v_usecount == 0) && + ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) { + /* * vnode_rele wanted to do a vnode_reclaim on this vnode * it should be sitting on the head of the uu_vreclaims chain * pull the parent pointer now so that when we do the * vnode_reclaim for each of the vnodes in the uu_vreclaims * list, we won't recurse back through here + * + * need to do a convert here in case vnode_rele_internal + * returns with the lock held in the spin mode... it + * can drop and retake the lock under certain circumstances */ - name_cache_lock(); + vnode_lock_convert(vp); + + NAME_CACHE_LOCK(); old_parentvp = vp->v_parent; vp->v_parent = NULLVP; - name_cache_unlock(); + NAME_CACHE_UNLOCK(); } else { - /* + /* * we're done... we ran into a vnode that isn't * being terminated */ - old_parentvp = NULLVP; + old_parentvp = NULLVP; } vnode_unlock(vp); } ut->uu_defer_reclaims = 0; - while ( (vp = ut->uu_vreclaims) != NULLVP) { - ut->uu_vreclaims = vp->v_defer_reclaimlist; - + while ((vp = ut->uu_vreclaims) != NULLVP) { + ut->uu_vreclaims = vp->v_defer_reclaimlist; + /* * vnode_put will drive the vnode_reclaim if * we are still the only reference on this vnode @@ -420,6 +1159,139 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int nam } } +#if CONFIG_FIRMLINKS +errno_t +vnode_setasfirmlink(vnode_t vp, vnode_t target_vp) +{ + int error = 0; + vnode_t old_target_vp = NULLVP; + vnode_t old_target_vp_v_fmlink = NULLVP; + kauth_cred_t target_vp_cred = NULL; + kauth_cred_t old_target_vp_cred = NULL; + + if (!vp) { + return EINVAL; + } + + if (target_vp) { + if (vp->v_fmlink == target_vp) { /* Will be checked again under the name cache lock */ + return 0; + } + + /* + * Firmlink source and target will take both a usecount + * and kusecount on each other. + */ + if ((error = vnode_ref_ext(target_vp, O_EVTONLY, VNODE_REF_FORCE))) { + return error; + } + + if ((error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE))) { + vnode_rele_ext(target_vp, O_EVTONLY, 1); + return error; + } + } + + NAME_CACHE_LOCK(); + + old_target_vp = vp->v_fmlink; + if (target_vp && (target_vp == old_target_vp)) { + NAME_CACHE_UNLOCK(); + return 0; + } + vp->v_fmlink = target_vp; + + vnode_lock_spin(vp); + vp->v_flag &= ~VFMLINKTARGET; + vnode_unlock(vp); + + if (target_vp) { + target_vp->v_fmlink = vp; + vnode_lock_spin(target_vp); + target_vp->v_flag |= VFMLINKTARGET; + vnode_unlock(target_vp); + cache_purge_locked(vp, &target_vp_cred); + } + + if (old_target_vp) { + old_target_vp_v_fmlink = old_target_vp->v_fmlink; + old_target_vp->v_fmlink = NULLVP; + vnode_lock_spin(old_target_vp); + old_target_vp->v_flag &= ~VFMLINKTARGET; + vnode_unlock(old_target_vp); + cache_purge_locked(vp, &old_target_vp_cred); + } + + NAME_CACHE_UNLOCK(); + + if (target_vp_cred && IS_VALID_CRED(target_vp_cred)) { + kauth_cred_unref(&target_vp_cred); + } + + if (old_target_vp) { + if (old_target_vp_cred && IS_VALID_CRED(old_target_vp_cred)) { + kauth_cred_unref(&old_target_vp_cred); + } + + vnode_rele_ext(old_target_vp, O_EVTONLY, 1); + if (old_target_vp_v_fmlink) { + vnode_rele_ext(old_target_vp_v_fmlink, O_EVTONLY, 1); + } + } + + return 0; +} + +errno_t +vnode_getfirmlink(vnode_t vp, vnode_t *target_vp) +{ + int error; + + if (!vp->v_fmlink) { + return ENODEV; + } + + NAME_CACHE_LOCK_SHARED(); + if (vp->v_fmlink && !(vp->v_flag & VFMLINKTARGET) && + (vnode_get(vp->v_fmlink) == 0)) { + vnode_t tvp = vp->v_fmlink; + + vnode_lock_spin(tvp); + if (tvp->v_lflag & (VL_TERMINATE | VL_DEAD)) { + vnode_unlock(tvp); + NAME_CACHE_UNLOCK(); + vnode_put(tvp); + return ENOENT; + } + if (!(tvp->v_flag & VFMLINKTARGET)) { + panic("firmlink target for vnode %p does not have flag set", vp); + } + vnode_unlock(tvp); + *target_vp = tvp; + error = 0; + } else { + *target_vp = NULLVP; + error = ENODEV; + } + NAME_CACHE_UNLOCK(); + return error; +} + +#else /* CONFIG_FIRMLINKS */ + +errno_t +vnode_setasfirmlink(__unused vnode_t vp, __unused vnode_t src_vp) +{ + return ENOTSUP; +} + +errno_t +vnode_getfirmlink(__unused vnode_t vp, __unused vnode_t *target_vp) +{ + return ENOTSUP; +} + +#endif /* * Mark a vnode as having multiple hard links. HFS makes use of this @@ -430,9 +1302,10 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int nam * so that HFS can post-process the lookup. Also, volfs will call * VNOP_GETATTR2 to determine the parent, instead of using v_parent. */ -void vnode_set_hard_link(vnode_t vp) +void +vnode_setmultipath(vnode_t vp) { - vnode_lock(vp); + vnode_lock_spin(vp); /* * In theory, we're changing the vnode's identity as far as the @@ -451,162 +1324,252 @@ void vnode_set_hard_link(vnode_t vp) } -void vnode_uncache_credentials(vnode_t vp) + +/* + * backwards compatibility + */ +void +vnode_uncache_credentials(vnode_t vp) +{ + vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); +} + + +/* + * use the exclusive form of NAME_CACHE_LOCK to protect the update of the + * following fields in the vnode: v_cred_timestamp, v_cred, v_authorized_actions + * we use this lock so that we can look at the v_cred and v_authorized_actions + * atomically while behind the NAME_CACHE_LOCK in shared mode in 'cache_lookup_path', + * which is the super-hot path... if we are updating the authorized actions for this + * vnode, we are already in the super-slow and far less frequented path so its not + * that bad that we take the lock exclusive for this case... of course we strive + * to hold it for the minimum amount of time possible + */ + +void +vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action) { - kauth_cred_t ucred = NULL; + kauth_cred_t tcred = NOCRED; - if (vp->v_cred) { - vnode_lock(vp); + NAME_CACHE_LOCK(); - ucred = vp->v_cred; - vp->v_cred = NULL; + vp->v_authorized_actions &= ~action; - vnode_unlock(vp); + if (action == KAUTH_INVALIDATE_CACHED_RIGHTS && + IS_VALID_CRED(vp->v_cred)) { + /* + * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held + */ + tcred = vp->v_cred; + vp->v_cred = NOCRED; + } + NAME_CACHE_UNLOCK(); - if (ucred) - kauth_cred_rele(ucred); + if (tcred != NOCRED) { + kauth_cred_unref(&tcred); } } -void vnode_cache_credentials(vnode_t vp, vfs_context_t context) +extern int bootarg_vnode_cache_defeat; /* default = 0, from bsd_init.c */ + +boolean_t +vnode_cache_is_authorized(vnode_t vp, vfs_context_t ctx, kauth_action_t action) { - kauth_cred_t ucred; - kauth_cred_t tcred = NOCRED; - struct timeval tv; + kauth_cred_t ucred; + boolean_t retval = FALSE; - ucred = vfs_context_ucred(context); + /* Boot argument to defeat rights caching */ + if (bootarg_vnode_cache_defeat) { + return FALSE; + } - if (vp->v_cred != ucred || (vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE)) { - vnode_lock(vp); + if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) { + /* + * a TTL is enabled on the rights cache... handle it here + * a TTL of 0 indicates that no rights should be cached + */ + if (vp->v_mount->mnt_authcache_ttl) { + if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) { + /* + * For filesystems marked only MNTK_AUTH_OPAQUE (generally network ones), + * we will only allow a SEARCH right on a directory to be cached... + * that cached right always has a default TTL associated with it + */ + if (action != KAUTH_VNODE_SEARCH || vp->v_type != VDIR) { + vp = NULLVP; + } + } + if (vp != NULLVP && vnode_cache_is_stale(vp) == TRUE) { + vnode_uncache_authorized_action(vp, vp->v_authorized_actions); + vp = NULLVP; + } + } else { + vp = NULLVP; + } + } + if (vp != NULLVP) { + ucred = vfs_context_ucred(ctx); - microuptime(&tv); - vp->v_cred_timestamp = tv.tv_sec; + NAME_CACHE_LOCK_SHARED(); - if (vp->v_cred != ucred) { - kauth_cred_ref(ucred); - - tcred = vp->v_cred; - vp->v_cred = ucred; + if (vp->v_cred == ucred && (vp->v_authorized_actions & action) == action) { + retval = TRUE; } - vnode_unlock(vp); - - if (tcred) - kauth_cred_rele(tcred); + + NAME_CACHE_UNLOCK(); } + return retval; } -/* reverse_lookup - lookup by walking back up the parent chain while leveraging - * use of the name cache lock in order to protect our starting vnode. - * NOTE - assumes you already have search access to starting point. - * returns 0 when we have reached the root, current working dir, or chroot root - * - */ -int -reverse_lookup(vnode_t start_vp, vnode_t *lookup_vpp, struct filedesc *fdp, vfs_context_t context, int *dp_authorized) -{ - int vid, done = 0; - int auth_opaque = 0; - vnode_t dp = start_vp; - vnode_t vp = NULLVP; - kauth_cred_t ucred; - struct timeval tv; - ucred = vfs_context_ucred(context); - *lookup_vpp = start_vp; +void +vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t action) +{ + kauth_cred_t tcred = NOCRED; + kauth_cred_t ucred; + struct timeval tv; + boolean_t ttl_active = FALSE; - name_cache_lock(); + ucred = vfs_context_ucred(ctx); - if ( dp->v_mount && (dp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) ) { - auth_opaque = 1; - microuptime(&tv); + if (!IS_VALID_CRED(ucred) || action == 0) { + return; } - for (;;) { - *dp_authorized = 0; - if (auth_opaque && ((tv.tv_sec - dp->v_cred_timestamp) > VCRED_EXPIRED)) - break; - if (dp->v_cred != ucred) - break; + if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) { /* - * indicate that we're allowed to traverse this directory... - * even if we bail for some reason, this information is valid and is used - * to avoid doing a vnode_authorize + * a TTL is enabled on the rights cache... handle it here + * a TTL of 0 indicates that no rights should be cached */ - *dp_authorized = 1; - - if ((dp->v_flag & VROOT) != 0 || /* Hit "/" */ - (dp == fdp->fd_cdir) || /* Hit process's working directory */ - (dp == fdp->fd_rdir)) { /* Hit process chroot()-ed root */ - done = 1; - break; + if (vp->v_mount->mnt_authcache_ttl == 0) { + return; } - if ( (vp = dp->v_parent) == NULLVP) - break; + if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) { + /* + * only cache SEARCH action for filesystems marked + * MNTK_AUTH_OPAQUE on VDIRs... + * the lookup_path code will time these out + */ + if ((action & ~KAUTH_VNODE_SEARCH) || vp->v_type != VDIR) { + return; + } + } + ttl_active = TRUE; - dp = vp; - *lookup_vpp = dp; - } /* for (;;) */ + microuptime(&tv); + } + NAME_CACHE_LOCK(); - vid = dp->v_id; - - name_cache_unlock(); - - if (done == 0 && dp != start_vp) { - if (vnode_getwithvid(dp, vid) != 0) { - *lookup_vpp = start_vp; - } + if (vp->v_cred != ucred) { + kauth_cred_ref(ucred); + /* + * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held + */ + tcred = vp->v_cred; + vp->v_cred = ucred; + vp->v_authorized_actions = 0; } + if (ttl_active == TRUE && vp->v_authorized_actions == 0) { + /* + * only reset the timestamnp on the + * first authorization cached after the previous + * timer has expired or we're switching creds... + * 'vnode_cache_is_authorized' will clear the + * authorized actions if the TTL is active and + * it has expired + */ + vp->v_cred_timestamp = (int)tv.tv_sec; + } + vp->v_authorized_actions |= action; - return((done == 1) ? 0 : -1); + NAME_CACHE_UNLOCK(); + + if (IS_VALID_CRED(tcred)) { + kauth_cred_unref(&tcred); + } } -int -cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, vfs_context_t context, int *trailing_slash, int *dp_authorized) + +boolean_t +vnode_cache_is_stale(vnode_t vp) { - char *cp; /* pointer into pathname argument */ - int vid, vvid; - int auth_opaque = 0; - vnode_t vp = NULLVP; - vnode_t tdp = NULLVP; - kauth_cred_t ucred; - struct timeval tv; - unsigned int hash; + struct timeval tv; + boolean_t retval; + + microuptime(&tv); - ucred = vfs_context_ucred(context); - *trailing_slash = 0; + if ((tv.tv_sec - vp->v_cred_timestamp) > vp->v_mount->mnt_authcache_ttl) { + retval = TRUE; + } else { + retval = FALSE; + } + + return retval; +} - name_cache_lock(); - if ( dp->v_mount && (dp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) ) { - auth_opaque = 1; +/* + * Returns: 0 Success + * ERECYCLE vnode was recycled from underneath us. Force lookup to be re-driven from namei. + * This errno value should not be seen by anyone outside of the kernel. + */ +int +cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, + vfs_context_t ctx, int *dp_authorized, vnode_t last_dp) +{ + char *cp; /* pointer into pathname argument */ + int vid; + int vvid = 0; /* protected by vp != NULLVP */ + vnode_t vp = NULLVP; + vnode_t tdp = NULLVP; + kauth_cred_t ucred; + boolean_t ttl_enabled = FALSE; + struct timeval tv; + mount_t mp; + unsigned int hash; + int error = 0; + boolean_t dotdotchecked = FALSE; + +#if CONFIG_TRIGGERS + vnode_t trigger_vp; +#endif /* CONFIG_TRIGGERS */ + + ucred = vfs_context_ucred(ctx); + ndp->ni_flag &= ~(NAMEI_TRAILINGSLASH); + + NAME_CACHE_LOCK_SHARED(); + + if (dp->v_mount && (dp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) { + ttl_enabled = TRUE; microuptime(&tv); } for (;;) { - /* + /* * Search a directory. * * The cn_hash value is for use by cache_lookup * The last component of the filename is left accessible via * cnp->cn_nameptr for callers that need the name. */ - hash = 0; + hash = 0; cp = cnp->cn_nameptr; while (*cp && (*cp != '/')) { - hash ^= crc32tab[((hash >> 24) ^ (unsigned char)*cp++)]; + hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; } /* * the crc generator can legitimately generate * a 0... however, 0 for us means that we * haven't computed a hash, so use 1 instead */ - if (hash == 0) - hash = 1; + if (hash == 0) { + hash = 1; + } cnp->cn_hash = hash; - cnp->cn_namelen = cp - cnp->cn_nameptr; + cnp->cn_namelen = (int)(cp - cnp->cn_nameptr); ndp->ni_pathlen -= cnp->cn_namelen; ndp->ni_next = cp; @@ -619,11 +1582,11 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, * and non-existing files that won't be directories specially later. */ while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) { - cp++; + cp++; ndp->ni_pathlen--; if (*cp == '\0') { - *trailing_slash = 1; + ndp->ni_flag |= NAMEI_TRAILINGSLASH; *ndp->ni_next = '\0'; } } @@ -631,46 +1594,237 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, cnp->cn_flags &= ~(MAKEENTRY | ISLASTCN | ISDOTDOT); - if (*cp == '\0') - cnp->cn_flags |= ISLASTCN; + if (*cp == '\0') { + cnp->cn_flags |= ISLASTCN; + } - if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') - cnp->cn_flags |= ISDOTDOT; + if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') { + cnp->cn_flags |= ISDOTDOT; + } *dp_authorized = 0; +#if NAMEDRSRCFORK + /* + * Process a request for a file's resource fork. + * + * Consume the _PATH_RSRCFORKSPEC suffix and tag the path. + */ + if ((ndp->ni_pathlen == sizeof(_PATH_RSRCFORKSPEC)) && + (cp[1] == '.' && cp[2] == '.') && + bcmp(cp, _PATH_RSRCFORKSPEC, sizeof(_PATH_RSRCFORKSPEC)) == 0) { + /* Skip volfs file systems that don't support native streams. */ + if ((dp->v_mount != NULL) && + (dp->v_mount->mnt_flag & MNT_DOVOLFS) && + (dp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) { + goto skiprsrcfork; + } + cnp->cn_flags |= CN_WANTSRSRCFORK; + cnp->cn_flags |= ISLASTCN; + ndp->ni_next[0] = '\0'; + ndp->ni_pathlen = 1; + } +skiprsrcfork: +#endif + +#if CONFIG_MACF + + /* + * Name cache provides authorization caching (see below) + * that will short circuit MAC checks in lookup(). + * We must perform MAC check here. On denial + * dp_authorized will remain 0 and second check will + * be perfomed in lookup(). + */ + if (!(cnp->cn_flags & DONOTAUTH)) { + error = mac_vnode_check_lookup(ctx, dp, cnp); + if (error) { + NAME_CACHE_UNLOCK(); + goto errorout; + } + } +#endif /* MAC */ + if (ttl_enabled && + (dp->v_mount->mnt_authcache_ttl == 0 || + ((tv.tv_sec - dp->v_cred_timestamp) > dp->v_mount->mnt_authcache_ttl))) { + break; + } + + /* + * NAME_CACHE_LOCK holds these fields stable + * + * We can't cache KAUTH_VNODE_SEARCHBYANYONE for root correctly + * so we make an ugly check for root here. root is always + * allowed and breaking out of here only to find out that is + * authorized by virtue of being root is very very expensive. + * However, the check for not root is valid only for filesystems + * which use local authorization. + * + * XXX: Remove the check for root when we can reliably set + * KAUTH_VNODE_SEARCHBYANYONE as root. + */ + if ((dp->v_cred != ucred || !(dp->v_authorized_actions & KAUTH_VNODE_SEARCH)) && + !(dp->v_authorized_actions & KAUTH_VNODE_SEARCHBYANYONE) && + (ttl_enabled || !vfs_context_issuser(ctx))) { + break; + } + + /* + * indicate that we're allowed to traverse this directory... + * even if we fail the cache lookup or decide to bail for + * some other reason, this information is valid and is used + * to avoid doing a vnode_authorize before the call to VNOP_LOOKUP + */ + *dp_authorized = 1; + + if ((cnp->cn_flags & (ISLASTCN | ISDOTDOT))) { + /* + * Moving the firmlinks section to be first to catch a corner case: + * When using DOTDOT to get a parent of a firmlink, we want the + * firmlink source to be resolved even if cn_nameiop != LOOKUP. + * This is because lookup() traverses DOTDOT by calling VNOP_LOOKUP + * and has no notion about firmlinks + */ +#if CONFIG_FIRMLINKS + if (cnp->cn_flags & ISDOTDOT && dp->v_fmlink && (dp->v_flag & VFMLINKTARGET)) { + dp = dp->v_fmlink; + } +#endif + if (cnp->cn_nameiop != LOOKUP) { + break; + } + if (cnp->cn_flags & LOCKPARENT) { + break; + } + if (cnp->cn_flags & NOCACHE) { + break; + } + + if (cnp->cn_flags & ISDOTDOT) { + /* + * Force directory hardlinks to go to + * file system for ".." requests. + */ + if ((dp->v_flag & VISHARDLINK)) { + break; + } + /* + * Quit here only if we can't use + * the parent directory pointer or + * don't have one. Otherwise, we'll + * use it below. + */ + if ((dp->v_flag & VROOT) || + dp == ndp->ni_rootdir || + dp->v_parent == NULLVP) { + break; + } + } + } + + if ((cnp->cn_flags & CN_SKIPNAMECACHE)) { + /* + * Force lookup to go to the filesystem with + * all cnp fields set up. + */ + break; + } + + /* + * "." and ".." aren't supposed to be cached, so check + * for them before checking the cache. + */ + if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { + vp = dp; + } else if ((cnp->cn_flags & ISDOTDOT)) { + /* + * If this is a chrooted process, we need to check if + * the process is trying to break out of its chrooted + * jail. We do that by trying to determine if dp is + * a subdirectory of ndp->ni_rootdir. If we aren't + * able to determine that by the v_parent pointers, we + * will leave the fast path. + * + * Since this function may see dotdot components + * many times and it has the name cache lock held for + * the entire duration, we optimise this by doing this + * check only once per cache_lookup_path call. + * If dotdotchecked is set, it means we've done this + * check once already and don't need to do it again. + */ + if (!dotdotchecked && (ndp->ni_rootdir != rootvnode)) { + vnode_t tvp = dp; + boolean_t defer = FALSE; + boolean_t is_subdir = FALSE; + + defer = cache_check_vnode_issubdir(tvp, + ndp->ni_rootdir, &is_subdir, &tvp); + + if (defer) { + /* defer to Filesystem */ + break; + } else if (!is_subdir) { + /* + * This process is trying to break out + * of its chrooted jail, so all its + * dotdot accesses will be translated to + * its root directory. + */ + vp = ndp->ni_rootdir; + } else { + /* + * All good, let this dotdot access + * proceed normally + */ + vp = dp->v_parent; + } + dotdotchecked = TRUE; + } else { + vp = dp->v_parent; + } + } else { + if ((vp = cache_lookup_locked(dp, cnp)) == NULLVP) { + break; + } + + if ((vp->v_flag & VISHARDLINK)) { + /* + * The file system wants a VNOP_LOOKUP on this vnode + */ + vp = NULL; + break; + } + } + if ((cnp->cn_flags & ISLASTCN)) { + break; + } + + if (vp->v_type != VDIR) { + if (vp->v_type != VLNK) { + vp = NULL; + } + break; + } - if (auth_opaque && ((tv.tv_sec - dp->v_cred_timestamp) > VCRED_EXPIRED)) - break; + if ((mp = vp->v_mountedhere) && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) { + vnode_t tmp_vp = mp->mnt_realrootvp; + if (tmp_vp == NULLVP || mp->mnt_generation != mount_generation || + mp->mnt_realrootvp_vid != tmp_vp->v_id) { + break; + } + vp = tmp_vp; + } - if (dp->v_cred != ucred) - break; +#if CONFIG_TRIGGERS /* - * indicate that we're allowed to traverse this directory... - * even if we fail the cache lookup or decide to bail for - * some other reason, this information is valid and is used - * to avoid doing a vnode_authorize before the call to VNOP_LOOKUP + * After traversing all mountpoints stacked here, if we have a + * trigger in hand, resolve it. Note that we don't need to + * leave the fast path if the mount has already happened. */ - *dp_authorized = 1; - - if ( (cnp->cn_flags & (ISLASTCN | ISDOTDOT)) ) { - if (cnp->cn_nameiop != LOOKUP) - break; - if (cnp->cn_flags & (LOCKPARENT | NOCACHE | ISDOTDOT)) - break; + if (vp->v_resolve) { + break; } - if ( (vp = cache_lookup_locked(dp, cnp)) == NULLVP) - break; - - if ( (cnp->cn_flags & ISLASTCN) ) - break; +#endif /* CONFIG_TRIGGERS */ - if (vp->v_type != VDIR) { - if (vp->v_type != VLNK) - vp = NULL; - break; - } - if (vp->v_mountedhere && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) - break; dp = vp; vp = NULLVP; @@ -678,21 +1832,21 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, cnp->cn_nameptr = ndp->ni_next + 1; ndp->ni_pathlen--; while (*cnp->cn_nameptr == '/') { - cnp->cn_nameptr++; + cnp->cn_nameptr++; ndp->ni_pathlen--; } } - if (vp != NULLVP) - vvid = vp->v_id; + if (vp != NULLVP) { + vvid = vp->v_id; + } vid = dp->v_id; - - name_cache_unlock(); + NAME_CACHE_UNLOCK(); if ((vp != NULLVP) && (vp->v_type != VLNK) && ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) { - /* - * if we've got a child and it's the last component, and + /* + * if we've got a child and it's the last component, and * the lookup doesn't need to return the parent then we * can skip grabbing an iocount on the parent, since all * we're going to do with it is a vnode_put just before @@ -700,129 +1854,180 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, * we need the parent in case the link happens to be * a relative pathname. */ - tdp = dp; - dp = NULLVP; + tdp = dp; + dp = NULLVP; } else { need_dp: - /* + /* * return the last directory we looked at - * with an io reference held + * with an io reference held. If it was the one passed + * in as a result of the last iteration of VNOP_LOOKUP, + * it should already hold an io ref. No need to increase ref. */ - if (dp == ndp->ni_usedvp) { - /* - * if this vnode matches the one passed in via USEDVP - * than this context already holds an io_count... just - * use vnode_get to get an extra ref for lookup to play - * with... can't use the getwithvid variant here because - * it will block behind a vnode_drain which would result - * in a deadlock (since we already own an io_count that the - * vnode_drain is waiting on)... vnode_get grabs the io_count - * immediately w/o waiting... it always succeeds - */ - vnode_get(dp); - } else if ( (vnode_getwithvid(dp, vid)) ) { - /* - * failure indicates the vnode - * changed identity or is being - * TERMINATED... in either case - * punt this lookup - */ - return (ENOENT); + if (last_dp != dp) { + if (dp == ndp->ni_usedvp) { + /* + * if this vnode matches the one passed in via USEDVP + * than this context already holds an io_count... just + * use vnode_get to get an extra ref for lookup to play + * with... can't use the getwithvid variant here because + * it will block behind a vnode_drain which would result + * in a deadlock (since we already own an io_count that the + * vnode_drain is waiting on)... vnode_get grabs the io_count + * immediately w/o waiting... it always succeeds + */ + vnode_get(dp); + } else if ((error = vnode_getwithvid_drainok(dp, vid))) { + /* + * failure indicates the vnode + * changed identity or is being + * TERMINATED... in either case + * punt this lookup. + * + * don't necessarily return ENOENT, though, because + * we really want to go back to disk and make sure it's + * there or not if someone else is changing this + * vnode. That being said, the one case where we do want + * to return ENOENT is when the vnode's mount point is + * in the process of unmounting and we might cause a deadlock + * in our attempt to take an iocount. An ENODEV error return + * is from vnode_get* is an indication this but we change that + * ENOENT for upper layers. + */ + if (error == ENODEV) { + error = ENOENT; + } else { + error = ERECYCLE; + } + goto errorout; + } } } if (vp != NULLVP) { - if ( (vnode_getwithvid(vp, vvid)) ) { - vp = NULLVP; + if ((vnode_getwithvid_drainok(vp, vvid))) { + vp = NULLVP; - /* + /* * can't get reference on the vp we'd like * to return... if we didn't grab a reference * on the directory (due to fast path bypass), * then we need to do it now... we can't return - * with both ni_dvp and ni_vp NULL, and no + * with both ni_dvp and ni_vp NULL, and no * error condition */ if (dp == NULLVP) { - dp = tdp; + dp = tdp; goto need_dp; } } } + ndp->ni_dvp = dp; ndp->ni_vp = vp; - return (0); +#if CONFIG_TRIGGERS + trigger_vp = vp ? vp : dp; + if ((error == 0) && (trigger_vp != NULLVP) && vnode_isdir(trigger_vp)) { + error = vnode_trigger_resolve(trigger_vp, ndp, ctx); + if (error) { + if (vp) { + vnode_put(vp); + } + if (dp) { + vnode_put(dp); + } + goto errorout; + } + } +#endif /* CONFIG_TRIGGERS */ + +errorout: + /* + * If we came into cache_lookup_path after an iteration of the lookup loop that + * resulted in a call to VNOP_LOOKUP, then VNOP_LOOKUP returned a vnode with a io ref + * on it. It is now the job of cache_lookup_path to drop the ref on this vnode + * when it is no longer needed. If we get to this point, and last_dp is not NULL + * and it is ALSO not the dvp we want to return to caller of this function, it MUST be + * the case that we got to a subsequent path component and this previous vnode is + * no longer needed. We can then drop the io ref on it. + */ + if ((last_dp != NULLVP) && (last_dp != ndp->ni_dvp)) { + vnode_put(last_dp); + } + + //initialized to 0, should be the same if no error cases occurred. + return error; } static vnode_t cache_lookup_locked(vnode_t dvp, struct componentname *cnp) { - register struct namecache *ncp; - register struct nchashhead *ncpp; - register long namelen = cnp->cn_namelen; - char *nameptr = cnp->cn_nameptr; - unsigned int hashval = (cnp->cn_hash & NCHASHMASK); - vnode_t vp; - + struct namecache *ncp; + struct nchashhead *ncpp; + long namelen = cnp->cn_namelen; + unsigned int hashval = cnp->cn_hash; + + if (nc_disabled) { + return NULL; + } + ncpp = NCHHASH(dvp, cnp->cn_hash); LIST_FOREACH(ncp, ncpp, nc_hash) { - if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { - if (memcmp(ncp->nc_name, nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) - break; + if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { + if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) { + break; + } } } - if (ncp == 0) + if (ncp == 0) { /* * We failed to find an entry */ - return (NULL); - - vp = ncp->nc_vp; - if (vp && (vp->v_flag & VISHARDLINK)) { - /* - * The file system wants a VNOP_LOOKUP on this vnode - */ - vp = NULL; + NCHSTAT(ncs_miss); + return NULL; } - - return (vp); + NCHSTAT(ncs_goodhits); + + return ncp->nc_vp; } +unsigned int hash_string(const char *cp, int len); // // Have to take a len argument because we may only need to // hash part of a componentname. // -static unsigned int +unsigned int hash_string(const char *cp, int len) { - unsigned hash = 0; - - if (len) { - while (len--) { - hash ^= crc32tab[((hash >> 24) ^ (unsigned char)*cp++)]; - } - } else { - while (*cp != '\0') { - hash ^= crc32tab[((hash >> 24) ^ (unsigned char)*cp++)]; - } - } - /* - * the crc generator can legitimately generate - * a 0... however, 0 for us means that we - * haven't computed a hash, so use 1 instead - */ - if (hash == 0) - hash = 1; - return hash; + unsigned hash = 0; + + if (len) { + while (len--) { + hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; + } + } else { + while (*cp != '\0') { + hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; + } + } + /* + * the crc generator can legitimately generate + * a 0... however, 0 for us means that we + * haven't computed a hash, so use 1 instead + */ + if (hash == 0) { + hash = 1; + } + return hash; } /* - * Lookup an entry in the cache + * Lookup an entry in the cache * - * We don't do this if the segment name is long, simply so the cache + * We don't do this if the segment name is long, simply so the cache * can avoid holding long names (which would either waste space, or * add greatly to the complexity). * @@ -835,106 +2040,199 @@ hash_string(const char *cp, int len) */ int -cache_lookup(dvp, vpp, cnp) - struct vnode *dvp; - struct vnode **vpp; - struct componentname *cnp; +cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) { - register struct namecache *ncp; - register struct nchashhead *ncpp; - register long namelen = cnp->cn_namelen; - char *nameptr = cnp->cn_nameptr; - unsigned int hashval = (cnp->cn_hash & NCHASHMASK); + struct namecache *ncp; + struct nchashhead *ncpp; + long namelen = cnp->cn_namelen; + unsigned int hashval; + boolean_t have_exclusive = FALSE; uint32_t vid; - vnode_t vp; + vnode_t vp; + + if (cnp->cn_hash == 0) { + cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + } + hashval = cnp->cn_hash; - name_cache_lock(); + if (nc_disabled) { + return 0; + } + + NAME_CACHE_LOCK_SHARED(); +relook: ncpp = NCHHASH(dvp, cnp->cn_hash); LIST_FOREACH(ncp, ncpp, nc_hash) { - if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { - if (memcmp(ncp->nc_name, nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) - break; + if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { + if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) { + break; + } } } /* We failed to find an entry */ if (ncp == 0) { - nchstats.ncs_miss++; - name_cache_unlock(); - return (0); + NCHSTAT(ncs_miss); + NAME_CACHE_UNLOCK(); + return 0; } /* We don't want to have an entry, so dump it */ if ((cnp->cn_flags & MAKEENTRY) == 0) { - nchstats.ncs_badhits++; - cache_delete(ncp, 1); - name_cache_unlock(); - return (0); - } + if (have_exclusive == TRUE) { + NCHSTAT(ncs_badhits); + cache_delete(ncp, 1); + NAME_CACHE_UNLOCK(); + return 0; + } + NAME_CACHE_UNLOCK(); + NAME_CACHE_LOCK(); + have_exclusive = TRUE; + goto relook; + } vp = ncp->nc_vp; /* We found a "positive" match, return the vnode */ - if (vp) { - nchstats.ncs_goodhits++; + if (vp) { + NCHSTAT(ncs_goodhits); vid = vp->v_id; - name_cache_unlock(); + NAME_CACHE_UNLOCK(); if (vnode_getwithvid(vp, vid)) { - name_cache_lock(); - nchstats.ncs_badvid++; - name_cache_unlock(); - return (0); +#if COLLECT_STATS + NAME_CACHE_LOCK(); + NCHSTAT(ncs_badvid); + NAME_CACHE_UNLOCK(); +#endif + return 0; } *vpp = vp; - return (-1); + return -1; } /* We found a negative match, and want to create it, so purge */ if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) { - nchstats.ncs_badhits++; - cache_delete(ncp, 1); - name_cache_unlock(); - return (0); + if (have_exclusive == TRUE) { + NCHSTAT(ncs_badhits); + cache_delete(ncp, 1); + NAME_CACHE_UNLOCK(); + return 0; + } + NAME_CACHE_UNLOCK(); + NAME_CACHE_LOCK(); + have_exclusive = TRUE; + goto relook; } /* * We found a "negative" match, ENOENT notifies client of this match. - * The nc_whiteout field records whether this is a whiteout. */ - nchstats.ncs_neghits++; + NCHSTAT(ncs_neghits); + + NAME_CACHE_UNLOCK(); + return ENOENT; +} + +const char * +cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp) +{ + const char *strname; + + if (cnp->cn_hash == 0) { + cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + } + + /* + * grab 2 references on the string entered + * one for the cache_enter_locked to consume + * and the second to be consumed by v_name (vnode_create call point) + */ + strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, TRUE, 0); + + NAME_CACHE_LOCK(); - if (ncp->nc_whiteout) - cnp->cn_flags |= ISWHITEOUT; - name_cache_unlock(); - return (ENOENT); + cache_enter_locked(dvp, vp, cnp, strname); + + NAME_CACHE_UNLOCK(); + + return strname; +} + + +/* + * Add an entry to the cache... + * but first check to see if the directory + * that this entry is to be associated with has + * had any cache_purges applied since we took + * our identity snapshot... this check needs to + * be done behind the name cache lock + */ +void +cache_enter_with_gen(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int gen) +{ + if (cnp->cn_hash == 0) { + cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + } + + NAME_CACHE_LOCK(); + + if (dvp->v_nc_generation == gen) { + (void)cache_enter_locked(dvp, vp, cnp, NULL); + } + + NAME_CACHE_UNLOCK(); } + /* * Add an entry to the cache. */ void -cache_enter(dvp, vp, cnp) - struct vnode *dvp; - struct vnode *vp; - struct componentname *cnp; +cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) { - register struct namecache *ncp, *negp; - register struct nchashhead *ncpp; + const char *strname; + + if (cnp->cn_hash == 0) { + cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + } + + /* + * grab 1 reference on the string entered + * for the cache_enter_locked to consume + */ + strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0); + + NAME_CACHE_LOCK(); + + cache_enter_locked(dvp, vp, cnp, strname); + + NAME_CACHE_UNLOCK(); +} - if (cnp->cn_hash == 0) - cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); - name_cache_lock(); +static void +cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, const char *strname) +{ + struct namecache *ncp, *negp; + struct nchashhead *ncpp; + + if (nc_disabled) { + return; + } - /* if the entry is for -ve caching vp is null */ + /* + * if the entry is for -ve caching vp is null + */ if ((vp != NULLVP) && (LIST_FIRST(&vp->v_nclinks))) { - /* + /* * someone beat us to the punch.. * this vnode is already in the cache */ - name_cache_unlock(); - return; + if (strname != NULL) { + vfs_removename(strname); + } + return; } /* * We allocate a new entry if we are less than the maximum @@ -943,29 +2241,29 @@ cache_enter(dvp, vp, cnp) */ if (numcache < desiredNodes && ((ncp = nchead.tqh_first) == NULL || - ncp->nc_hash.le_prev != 0)) { + ncp->nc_hash.le_prev != 0)) { /* * Allocate one more entry */ - ncp = (struct namecache *)_MALLOC_ZONE((u_long)sizeof *ncp, M_CACHE, M_WAITOK); + ncp = zalloc(namecache_zone); numcache++; } else { /* * reuse an old entry */ - ncp = TAILQ_FIRST(&nchead); + ncp = TAILQ_FIRST(&nchead); TAILQ_REMOVE(&nchead, ncp, nc_entry); if (ncp->nc_hash.le_prev != 0) { - /* - * still in use... we need to - * delete it before re-using it - */ - nchstats.ncs_stolen++; + /* + * still in use... we need to + * delete it before re-using it + */ + NCHSTAT(ncs_stolen); cache_delete(ncp, 0); } } - nchstats.ncs_enters++; + NCHSTAT(ncs_enters); /* * Fill in cache info, if vp is NULL this is a "negative" cache entry. @@ -973,8 +2271,30 @@ cache_enter(dvp, vp, cnp) ncp->nc_vp = vp; ncp->nc_dvp = dvp; ncp->nc_hashval = cnp->cn_hash; - ncp->nc_whiteout = FALSE; - ncp->nc_name = add_name_locked(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0); + + if (strname == NULL) { + ncp->nc_name = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0); + } else { + ncp->nc_name = strname; + } + + // + // If the bytes of the name associated with the vnode differ, + // use the name associated with the vnode since the file system + // may have set that explicitly in the case of a lookup on a + // case-insensitive file system where the case of the looked up + // name differs from what is on disk. For more details, see: + // FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories + // + const char *vn_name = vp ? vp->v_name : NULL; + unsigned int len = vn_name ? (unsigned int)strlen(vn_name) : 0; + if (vn_name && ncp && ncp->nc_name && strncmp(ncp->nc_name, vn_name, len) != 0) { + unsigned int hash = hash_string(vn_name, len); + + vfs_removename(ncp->nc_name); + ncp->nc_name = add_name_internal(vn_name, len, hash, FALSE, 0); + ncp->nc_hashval = hash; + } /* * make us the newest entry in the cache @@ -985,11 +2305,13 @@ cache_enter(dvp, vp, cnp) ncpp = NCHHASH(dvp, cnp->cn_hash); #if DIAGNOSTIC { - register struct namecache *p; + struct namecache *p; - for (p = ncpp->lh_first; p != 0; p = p->nc_hash.le_next) - if (p == ncp) + for (p = ncpp->lh_first; p != 0; p = p->nc_hash.le_next) { + if (p == ncp) { panic("cache_enter: duplicate"); + } + } } #endif /* @@ -998,32 +2320,27 @@ cache_enter(dvp, vp, cnp) LIST_INSERT_HEAD(ncpp, ncp, nc_hash); if (vp) { - /* - * add to the list of name cache entries - * that point at vp - */ + /* + * add to the list of name cache entries + * that point at vp + */ LIST_INSERT_HEAD(&vp->v_nclinks, ncp, nc_un.nc_link); } else { - /* + /* * this is a negative cache entry (vp == NULL) - * stick it on the negative cache list - * and record the whiteout state + * stick it on the negative cache list. */ - TAILQ_INSERT_TAIL(&neghead, ncp, nc_un.nc_negentry); - - if (cnp->cn_flags & ISWHITEOUT) - ncp->nc_whiteout = TRUE; - nchstats.ncs_negtotal++; - - if (nchstats.ncs_negtotal > desiredNegNodes) { - /* - * if we've reached our desired limit - * of negative cache entries, delete - * the oldest - */ - negp = TAILQ_FIRST(&neghead); - TAILQ_REMOVE(&neghead, negp, nc_un.nc_negentry); + TAILQ_INSERT_TAIL(&neghead, ncp, nc_un.nc_negentry); + ncs_negtotal++; + + if (ncs_negtotal > desiredNegNodes) { + /* + * if we've reached our desired limit + * of negative cache entries, delete + * the oldest + */ + negp = TAILQ_FIRST(&neghead); cache_delete(negp, 1); } } @@ -1031,36 +2348,40 @@ cache_enter(dvp, vp, cnp) * add us to the list of name cache entries that * are children of dvp */ - LIST_INSERT_HEAD(&dvp->v_ncchildren, ncp, nc_child); - - name_cache_unlock(); + if (vp) { + TAILQ_INSERT_TAIL(&dvp->v_ncchildren, ncp, nc_child); + } else { + TAILQ_INSERT_HEAD(&dvp->v_ncchildren, ncp, nc_child); + } } /* * Initialize CRC-32 remainder table. */ -static void init_crc32(void) +static void +init_crc32(void) { - /* + /* * the CRC-32 generator polynomial is: * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^10 * + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 */ - unsigned int crc32_polynomial = 0x04c11db7; - unsigned int i,j; + unsigned int crc32_polynomial = 0x04c11db7; + unsigned int i, j; /* * pre-calculate the CRC-32 remainder for each possible octet encoding */ - for (i = 0; i < 256; i++) { - unsigned int crc_rem = i << 24; + for (i = 0; i < 256; i++) { + unsigned int crc_rem = i << 24; - for (j = 0; j < 8; j++) { - if (crc_rem & 0x80000000) - crc_rem = (crc_rem << 1) ^ crc32_polynomial; - else - crc_rem = (crc_rem << 1); + for (j = 0; j < 8; j++) { + if (crc_rem & 0x80000000) { + crc_rem = (crc_rem << 1) ^ crc32_polynomial; + } else { + crc_rem = (crc_rem << 1); + } } crc32tab[i] = crc_rem; } @@ -1081,157 +2402,193 @@ nchinit(void) init_crc32(); - nchashtbl = hashinit(MAX(4096, (2 *desiredNodes)), M_CACHE, &nchash); + nchashtbl = hashinit(MAX(CONFIG_NC_HASH, (2 * desiredNodes)), M_CACHE, &nchash); nchashmask = nchash; nchash++; init_string_table(); - - /* Allocate mount list lock group attribute and group */ - namecache_lck_grp_attr= lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(namecache_lck_grp_attr); - - namecache_lck_grp = lck_grp_alloc_init("Name Cache", namecache_lck_grp_attr); - - /* Allocate mount list lock attribute */ - namecache_lck_attr = lck_attr_alloc_init(); - //lck_attr_setdebug(namecache_lck_attr); - - /* Allocate mount list lock */ - namecache_mtx_lock = lck_mtx_alloc_init(namecache_lck_grp, namecache_lck_attr); + for (int i = 0; i < NUM_STRCACHE_LOCKS; i++) { + lck_mtx_init(&strcache_mtx_locks[i], &strcache_lck_grp, &strcache_lck_attr); + } +} +void +name_cache_lock_shared(void) +{ + lck_rw_lock_shared(&namecache_rw_lock); } void name_cache_lock(void) { - lck_mtx_lock(namecache_mtx_lock); + lck_rw_lock_exclusive(&namecache_rw_lock); } void name_cache_unlock(void) { - lck_mtx_unlock(namecache_mtx_lock); - + lck_rw_done(&namecache_rw_lock); } int -resize_namecache(u_int newsize) +resize_namecache(int newsize) { - struct nchashhead *new_table; - struct nchashhead *old_table; - struct nchashhead *old_head, *head; - struct namecache *entry, *next; - uint32_t i, hashval; - int dNodes, dNegNodes; - u_long new_size, old_size; - - dNegNodes = (newsize / 10); - dNodes = newsize + dNegNodes; - - // we don't support shrinking yet - if (dNodes < desiredNodes) { + struct nchashhead *new_table; + struct nchashhead *old_table; + struct nchashhead *old_head, *head; + struct namecache *entry, *next; + uint32_t i, hashval; + int dNodes, dNegNodes, nelements; + u_long new_size, old_size; + + if (newsize < 0) { + return EINVAL; + } + + dNegNodes = (newsize / 10); + dNodes = newsize + dNegNodes; + // we don't support shrinking yet + if (dNodes <= desiredNodes) { + return 0; + } + + if (os_mul_overflow(dNodes, 2, &nelements)) { + return EINVAL; + } + + new_table = hashinit(nelements, M_CACHE, &nchashmask); + new_size = nchashmask + 1; + + if (new_table == NULL) { + return ENOMEM; + } + + NAME_CACHE_LOCK(); + // do the switch! + old_table = nchashtbl; + nchashtbl = new_table; + old_size = nchash; + nchash = new_size; + + // walk the old table and insert all the entries into + // the new table + // + for (i = 0; i < old_size; i++) { + old_head = &old_table[i]; + for (entry = old_head->lh_first; entry != NULL; entry = next) { + // + // XXXdbg - Beware: this assumes that hash_string() does + // the same thing as what happens in + // lookup() over in vfs_lookup.c + hashval = hash_string(entry->nc_name, 0); + entry->nc_hashval = hashval; + head = NCHHASH(entry->nc_dvp, hashval); + + next = entry->nc_hash.le_next; + LIST_INSERT_HEAD(head, entry, nc_hash); + } + } + desiredNodes = dNodes; + desiredNegNodes = dNegNodes; + + NAME_CACHE_UNLOCK(); + FREE(old_table, M_CACHE); + return 0; - } - new_table = hashinit(2 * dNodes, M_CACHE, &nchashmask); - new_size = nchashmask + 1; - - if (new_table == NULL) { - return ENOMEM; - } - - name_cache_lock(); - // do the switch! - old_table = nchashtbl; - nchashtbl = new_table; - old_size = nchash; - nchash = new_size; - - // walk the old table and insert all the entries into - // the new table - // - for(i=0; i < old_size; i++) { - old_head = &old_table[i]; - for (entry=old_head->lh_first; entry != NULL; entry=next) { - // - // XXXdbg - Beware: this assumes that hash_string() does - // the same thing as what happens in - // lookup() over in vfs_lookup.c - hashval = hash_string(entry->nc_name, 0); - entry->nc_hashval = hashval; - head = NCHHASH(entry->nc_dvp, hashval); - - next = entry->nc_hash.le_next; - LIST_INSERT_HEAD(head, entry, nc_hash); - } - } - desiredNodes = dNodes; - desiredNegNodes = dNegNodes; - - name_cache_unlock(); - FREE(old_table, M_CACHE); - - return 0; } static void -cache_delete(struct namecache *ncp, int age_entry) +cache_delete(struct namecache *ncp, int free_entry) { - nchstats.ncs_deletes++; + NCHSTAT(ncs_deletes); - if (ncp->nc_vp) { - LIST_REMOVE(ncp, nc_un.nc_link); + if (ncp->nc_vp) { + LIST_REMOVE(ncp, nc_un.nc_link); } else { - TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry); - nchstats.ncs_negtotal--; + TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry); + ncs_negtotal--; } - LIST_REMOVE(ncp, nc_child); + TAILQ_REMOVE(&(ncp->nc_dvp->v_ncchildren), ncp, nc_child); LIST_REMOVE(ncp, nc_hash); /* * this field is used to indicate * that the entry is in use and - * must be deleted before it can + * must be deleted before it can * be reused... */ ncp->nc_hash.le_prev = NULL; - if (age_entry) { - /* - * make it the next one available - * for cache_enter's use - */ - TAILQ_REMOVE(&nchead, ncp, nc_entry); - TAILQ_INSERT_HEAD(&nchead, ncp, nc_entry); - } - remove_name_locked(ncp->nc_name); + vfs_removename(ncp->nc_name); ncp->nc_name = NULL; + if (free_entry) { + TAILQ_REMOVE(&nchead, ncp, nc_entry); + zfree(namecache_zone, ncp); + numcache--; + } } /* - * purge the entry associated with the + * purge the entry associated with the * specified vnode from the name cache */ +static void +cache_purge_locked(vnode_t vp, kauth_cred_t *credp) +{ + struct namecache *ncp; + + *credp = NULL; + if ((LIST_FIRST(&vp->v_nclinks) == NULL) && + (TAILQ_FIRST(&vp->v_ncchildren) == NULL) && + (vp->v_cred == NOCRED) && + (vp->v_parent == NULLVP)) { + return; + } + + if (vp->v_parent) { + vp->v_parent->v_nc_generation++; + } + + while ((ncp = LIST_FIRST(&vp->v_nclinks))) { + cache_delete(ncp, 1); + } + + while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) { + cache_delete(ncp, 1); + } + + /* + * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held + */ + *credp = vp->v_cred; + vp->v_cred = NOCRED; + vp->v_authorized_actions = 0; +} + void cache_purge(vnode_t vp) { - struct namecache *ncp; + kauth_cred_t tcred = NULL; - if ((LIST_FIRST(&vp->v_nclinks) == NULL) && (LIST_FIRST(&vp->v_ncchildren) == NULL)) - return; + if ((LIST_FIRST(&vp->v_nclinks) == NULL) && + (TAILQ_FIRST(&vp->v_ncchildren) == NULL) && + (vp->v_cred == NOCRED) && + (vp->v_parent == NULLVP)) { + return; + } - name_cache_lock(); + NAME_CACHE_LOCK(); - while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) - cache_delete(ncp, 1); + cache_purge_locked(vp, &tcred); - while ( (ncp = LIST_FIRST(&vp->v_ncchildren)) ) - cache_delete(ncp, 1); + NAME_CACHE_UNLOCK(); - name_cache_unlock(); + if (tcred && IS_VALID_CRED(tcred)) { + kauth_cred_unref(&tcred); + } } /* @@ -1244,15 +2601,19 @@ cache_purge(vnode_t vp) void cache_purge_negatives(vnode_t vp) { - struct namecache *ncp; + struct namecache *ncp, *next_ncp; - name_cache_lock(); + NAME_CACHE_LOCK(); + + TAILQ_FOREACH_SAFE(ncp, &vp->v_ncchildren, nc_child, next_ncp) { + if (ncp->nc_vp) { + break; + } - LIST_FOREACH(ncp, &vp->v_ncchildren, nc_child) - if (ncp->nc_vp == NULL) - cache_delete(ncp , 1); + cache_delete(ncp, 1); + } - name_cache_unlock(); + NAME_CACHE_UNLOCK(); } /* @@ -1262,16 +2623,15 @@ cache_purge_negatives(vnode_t vp) * entries at the same time. */ void -cache_purgevfs(mp) - struct mount *mp; +cache_purgevfs(struct mount *mp) { struct nchashhead *ncpp; struct namecache *ncp; - name_cache_lock(); + NAME_CACHE_LOCK(); /* Scan hash tables for applicable entries */ for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) { -restart: +restart: for (ncp = ncpp->lh_first; ncp != 0; ncp = ncp->nc_hash.le_next) { if (ncp->nc_dvp->v_mount == mp) { cache_delete(ncp, 0); @@ -1279,7 +2639,7 @@ restart: } } } - name_cache_unlock(); + NAME_CACHE_UNLOCK(); } @@ -1287,210 +2647,248 @@ restart: // // String ref routines // -static LIST_HEAD(stringhead, string_t) *string_ref_table; +static LIST_HEAD(stringhead, string_t) * string_ref_table; static u_long string_table_mask; -static uint32_t max_chain_len=0; -static struct stringhead *long_chain_head=NULL; -static uint32_t filled_buckets=0; -static uint32_t num_dups=0; -static uint32_t nstrings=0; +static uint32_t filled_buckets = 0; + typedef struct string_t { - LIST_ENTRY(string_t) hash_chain; - unsigned char *str; - uint32_t refcount; + LIST_ENTRY(string_t) hash_chain; + const char *str; + uint32_t refcount; } string_t; - -static int +static void resize_string_ref_table(void) { - struct stringhead *new_table; - struct stringhead *old_table; - struct stringhead *old_head, *head; - string_t *entry, *next; - uint32_t i, hashval; - u_long new_mask, old_mask; - - new_table = hashinit((string_table_mask + 1) * 2, M_CACHE, &new_mask); - if (new_table == NULL) { - return ENOMEM; - } - - // do the switch! - old_table = string_ref_table; - string_ref_table = new_table; - old_mask = string_table_mask; - string_table_mask = new_mask; - - printf("resize: max chain len %d, new table size %d\n", - max_chain_len, new_mask + 1); - max_chain_len = 0; - long_chain_head = NULL; - filled_buckets = 0; - - // walk the old table and insert all the entries into - // the new table - // - for(i=0; i <= old_mask; i++) { - old_head = &old_table[i]; - for (entry=old_head->lh_first; entry != NULL; entry=next) { - hashval = hash_string(entry->str, 0); - head = &string_ref_table[hashval & string_table_mask]; - if (head->lh_first == NULL) { - filled_buckets++; - } - - next = entry->hash_chain.le_next; - LIST_INSERT_HEAD(head, entry, hash_chain); - } - } - - FREE(old_table, M_CACHE); - - return 0; + struct stringhead *new_table; + struct stringhead *old_table; + struct stringhead *old_head, *head; + string_t *entry, *next; + uint32_t i, hashval; + u_long new_mask, old_mask; + + /* + * need to hold the table lock exclusively + * in order to grow the table... need to recheck + * the need to resize again after we've taken + * the lock exclusively in case some other thread + * beat us to the punch + */ + lck_rw_lock_exclusive(&strtable_rw_lock); + + if (4 * filled_buckets < ((string_table_mask + 1) * 3)) { + lck_rw_done(&strtable_rw_lock); + return; + } + assert(string_table_mask < INT32_MAX); + new_table = hashinit((int)(string_table_mask + 1) * 2, M_CACHE, &new_mask); + + if (new_table == NULL) { + printf("failed to resize the hash table.\n"); + lck_rw_done(&strtable_rw_lock); + return; + } + + // do the switch! + old_table = string_ref_table; + string_ref_table = new_table; + old_mask = string_table_mask; + string_table_mask = new_mask; + filled_buckets = 0; + + // walk the old table and insert all the entries into + // the new table + // + for (i = 0; i <= old_mask; i++) { + old_head = &old_table[i]; + for (entry = old_head->lh_first; entry != NULL; entry = next) { + hashval = hash_string((const char *)entry->str, 0); + head = &string_ref_table[hashval & string_table_mask]; + if (head->lh_first == NULL) { + filled_buckets++; + } + next = entry->hash_chain.le_next; + LIST_INSERT_HEAD(head, entry, hash_chain); + } + } + lck_rw_done(&strtable_rw_lock); + + FREE(old_table, M_CACHE); } static void init_string_table(void) { - string_ref_table = hashinit(4096, M_CACHE, &string_table_mask); + string_ref_table = hashinit(CONFIG_VFS_NAMES, M_CACHE, &string_table_mask); } -char * -vfs_addname(const char *name, size_t len, u_int hashval, u_int flags) +const char * +vfs_addname(const char *name, uint32_t len, u_int hashval, u_int flags) { - char * ptr; - - name_cache_lock(); - ptr = add_name_locked(name, len, hashval, flags); - name_cache_unlock(); - - return(ptr); + return add_name_internal(name, len, hashval, FALSE, flags); } -static char * -add_name_locked(const char *name, size_t len, u_int hashval, __unused u_int flags) + +static const char * +add_name_internal(const char *name, uint32_t len, u_int hashval, boolean_t need_extra_ref, __unused u_int flags) { - struct stringhead *head; - string_t *entry; - uint32_t chain_len = 0; - - // - // If the table gets more than 3/4 full, resize it - // - if (4*filled_buckets >= ((string_table_mask + 1) * 3)) { - if (resize_string_ref_table() != 0) { - printf("failed to resize the hash table.\n"); - } - } - if (hashval == 0) { - hashval = hash_string(name, 0); - } - - head = &string_ref_table[hashval & string_table_mask]; - for (entry=head->lh_first; entry != NULL; chain_len++, entry=entry->hash_chain.le_next) { - if (memcmp(entry->str, name, len) == 0 && entry->str[len] == '\0') { - entry->refcount++; - num_dups++; - break; - } - } - - if (entry == NULL) { - // it wasn't already there so add it. - MALLOC(entry, string_t *, sizeof(string_t) + len + 1, M_TEMP, M_WAITOK); - - // have to get "head" again because we could have blocked - // in malloc and thus head could have changed. - // - head = &string_ref_table[hashval & string_table_mask]; - if (head->lh_first == NULL) { - filled_buckets++; + struct stringhead *head; + string_t *entry; + uint32_t chain_len = 0; + uint32_t hash_index; + uint32_t lock_index; + char *ptr; + + if (len > MAXPATHLEN) { + len = MAXPATHLEN; + } + + /* + * if the length already accounts for the null-byte, then + * subtract one so later on we don't index past the end + * of the string. + */ + if (len > 0 && name[len - 1] == '\0') { + len--; + } + if (hashval == 0) { + hashval = hash_string(name, len); + } + + /* + * take this lock 'shared' to keep the hash stable + * if someone else decides to grow the pool they + * will take this lock exclusively + */ + lck_rw_lock_shared(&strtable_rw_lock); + + /* + * If the table gets more than 3/4 full, resize it + */ + if (4 * filled_buckets >= ((string_table_mask + 1) * 3)) { + lck_rw_done(&strtable_rw_lock); + + resize_string_ref_table(); + + lck_rw_lock_shared(&strtable_rw_lock); } + hash_index = hashval & string_table_mask; + lock_index = hash_index % NUM_STRCACHE_LOCKS; + + head = &string_ref_table[hash_index]; + + lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]); - entry->str = (char *)((char *)entry + sizeof(string_t)); - strncpy(entry->str, name, len); - entry->str[len] = '\0'; - entry->refcount = 1; - LIST_INSERT_HEAD(head, entry, hash_chain); + for (entry = head->lh_first; entry != NULL; chain_len++, entry = entry->hash_chain.le_next) { + if (strncmp(entry->str, name, len) == 0 && entry->str[len] == 0) { + entry->refcount++; + break; + } + } + if (entry == NULL) { + lck_mtx_convert_spin(&strcache_mtx_locks[lock_index]); + /* + * it wasn't already there so add it. + */ + entry = kheap_alloc(KHEAP_DEFAULT, sizeof(string_t) + len + 1, Z_WAITOK); - if (chain_len > max_chain_len) { - max_chain_len = chain_len; - long_chain_head = head; + if (head->lh_first == NULL) { + OSAddAtomic(1, &filled_buckets); + } + ptr = (char *)((char *)entry + sizeof(string_t)); + strncpy(ptr, name, len); + ptr[len] = '\0'; + entry->str = ptr; + entry->refcount = 1; + LIST_INSERT_HEAD(head, entry, hash_chain); + } + if (need_extra_ref == TRUE) { + entry->refcount++; } - nstrings++; - } - - return entry->str; + lck_mtx_unlock(&strcache_mtx_locks[lock_index]); + lck_rw_done(&strtable_rw_lock); + + return (const char *)entry->str; } + int vfs_removename(const char *nameref) { - int i; + struct stringhead *head; + string_t *entry; + uint32_t hashval; + uint32_t hash_index; + uint32_t lock_index; + int retval = ENOENT; - name_cache_lock(); - i = remove_name_locked(nameref); - name_cache_unlock(); + hashval = hash_string(nameref, 0); - return(i); - -} + /* + * take this lock 'shared' to keep the hash stable + * if someone else decides to grow the pool they + * will take this lock exclusively + */ + lck_rw_lock_shared(&strtable_rw_lock); + /* + * must compute the head behind the table lock + * since the size and location of the table + * can change on the fly + */ + hash_index = hashval & string_table_mask; + lock_index = hash_index % NUM_STRCACHE_LOCKS; + head = &string_ref_table[hash_index]; -static int -remove_name_locked(const char *nameref) -{ - struct stringhead *head; - string_t *entry; - uint32_t hashval; - char * ptr; - - hashval = hash_string(nameref, 0); - head = &string_ref_table[hashval & string_table_mask]; - for (entry=head->lh_first; entry != NULL; entry=entry->hash_chain.le_next) { - if (entry->str == (unsigned char *)nameref) { - entry->refcount--; - if (entry->refcount == 0) { - LIST_REMOVE(entry, hash_chain); - if (head->lh_first == NULL) { - filled_buckets--; - } - ptr = entry->str; - entry->str = NULL; - nstrings--; + lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]); - FREE(entry, M_TEMP); - } else { - num_dups--; - } + for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) { + if (entry->str == nameref) { + entry->refcount--; - return 0; + if (entry->refcount == 0) { + LIST_REMOVE(entry, hash_chain); + + if (head->lh_first == NULL) { + OSAddAtomic(-1, &filled_buckets); + } + } else { + entry = NULL; + } + retval = 0; + break; + } } - } + lck_mtx_unlock(&strcache_mtx_locks[lock_index]); + lck_rw_done(&strtable_rw_lock); + + kheap_free_addr(KHEAP_DEFAULT, entry); - return ENOENT; + return retval; } +#ifdef DUMP_STRING_TABLE void dump_string_table(void) { - struct stringhead *head; - string_t *entry; - u_long i; - - name_cache_lock(); - for (i = 0; i <= string_table_mask; i++) { - head = &string_ref_table[i]; - for (entry=head->lh_first; entry != NULL; entry=entry->hash_chain.le_next) { - printf("%6d - %s\n", entry->refcount, entry->str); - } - } - name_cache_unlock(); + struct stringhead *head; + string_t *entry; + u_long i; + + lck_rw_lock_shared(&strtable_rw_lock); + + for (i = 0; i <= string_table_mask; i++) { + head = &string_ref_table[i]; + for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) { + printf("%6d - %s\n", entry->refcount, entry->str); + } + } + lck_rw_done(&strtable_rw_lock); } +#endif /* DUMP_STRING_TABLE */