int desiredNodes;
int desiredNegNodes;
int ncs_negtotal;
+int nc_disabled = 0;
TAILQ_HEAD(, namecache) nchead; /* chain of all name cache entries */
TAILQ_HEAD(, namecache) neghead; /* chain of only negative cache entries */
*/
if (((vp->v_parent != NULLVP) && !fixhardlink) ||
(flags & BUILDPATH_NO_FS_ENTER)) {
- vp = vp->v_parent;
+ /*
+ * In this if () block we are not allowed to enter the filesystem
+ * to conclusively get the most accurate parent identifier.
+ * As a result, if 'vp' does not identify '/' and it
+ * does not have a valid v_parent, then error out
+ * and disallow further path construction
+ */
+ if ((vp->v_parent == NULLVP) && (rootvnode != vp)) {
+ /* Only '/' is allowed to have a NULL parent pointer */
+ ret = EINVAL;
+
+ /* The code below will exit early if 'tvp = vp' == NULL */
+ }
+ vp = vp->v_parent;
+
/*
* if the vnode we have in hand isn't a directory and it
* has a v_parent, then we started with the resource fork
}
-boolean_t vnode_cache_is_authorized(vnode_t vp, vfs_context_t ctx, kauth_action_t action)
+extern int bootarg_vnode_cache_defeat; /* default = 0, from bsd_init.c */
+
+boolean_t
+vnode_cache_is_authorized(vnode_t vp, vfs_context_t ctx, kauth_action_t action)
{
kauth_cred_t ucred;
boolean_t retval = FALSE;
+ /* Boot argument to defeat rights caching */
+ if (bootarg_vnode_cache_defeat)
+ return FALSE;
+
if ( (vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) {
/*
* a TTL is enabled on the rights cache... handle it here
*/
int
cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp,
- vfs_context_t ctx, int *trailing_slash, int *dp_authorized, vnode_t last_dp)
+ vfs_context_t ctx, int *dp_authorized, vnode_t last_dp)
{
char *cp; /* pointer into pathname argument */
int vid;
unsigned int hash;
int error = 0;
+#if CONFIG_TRIGGERS
+ vnode_t trigger_vp;
+#endif /* CONFIG_TRIGGERS */
+
ucred = vfs_context_ucred(ctx);
- *trailing_slash = 0;
+ ndp->ni_flag &= ~(NAMEI_TRAILINGSLASH);
NAME_CACHE_LOCK_SHARED();
ndp->ni_pathlen--;
if (*cp == '\0') {
- *trailing_slash = 1;
+ ndp->ni_flag |= NAMEI_TRAILINGSLASH;
*ndp->ni_next = '\0';
}
}
*dp_authorized = 1;
if ( (cnp->cn_flags & (ISLASTCN | ISDOTDOT)) ) {
- if (cnp->cn_nameiop != LOOKUP)
- break;
- if (cnp->cn_flags & (LOCKPARENT | NOCACHE))
- break;
+ if (cnp->cn_nameiop != LOOKUP)
+ break;
+ if (cnp->cn_flags & LOCKPARENT)
+ break;
+ if (cnp->cn_flags & NOCACHE)
+ break;
if (cnp->cn_flags & ISDOTDOT) {
/*
* Force directory hardlinks to go to
vp = NULL;
break;
}
+
if ( (mp = vp->v_mountedhere) && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) {
if (mp->mnt_realrootvp == NULLVP || mp->mnt_generation != mount_generation ||
break;
vp = mp->mnt_realrootvp;
}
+
+#if CONFIG_TRIGGERS
+ /*
+ * After traversing all mountpoints stacked here, if we have a
+ * trigger in hand, resolve it. Note that we don't need to
+ * leave the fast path if the mount has already happened.
+ */
+ if ((vp->v_resolve != NULL) &&
+ (vp->v_resolve->vr_resolve_func != NULL)) {
+ break;
+ }
+#endif /* CONFIG_TRIGGERS */
+
+
dp = vp;
vp = NULLVP;
* immediately w/o waiting... it always succeeds
*/
vnode_get(dp);
- } else if ( (vnode_getwithvid(dp, vid)) ) {
+ } else if ( (vnode_getwithvid_drainok(dp, vid)) ) {
/*
* failure indicates the vnode
* changed identity or is being
}
}
if (vp != NULLVP) {
- if ( (vnode_getwithvid(vp, vvid)) ) {
+ if ( (vnode_getwithvid_drainok(vp, vvid)) ) {
vp = NULLVP;
/*
}
}
}
+
ndp->ni_dvp = dp;
ndp->ni_vp = vp;
+#if CONFIG_TRIGGERS
+ trigger_vp = vp ? vp : dp;
+ if ((error == 0) && (trigger_vp != NULLVP) && vnode_isdir(trigger_vp)) {
+ error = vnode_trigger_resolve(trigger_vp, ndp, ctx);
+ if (error) {
+ if (vp)
+ vnode_put(vp);
+ if (dp)
+ vnode_put(dp);
+ goto errorout;
+ }
+ }
+#endif /* CONFIG_TRIGGERS */
+
errorout:
/*
* If we came into cache_lookup_path after an iteration of the lookup loop that
long namelen = cnp->cn_namelen;
unsigned int hashval = (cnp->cn_hash & NCHASHMASK);
+ if (nc_disabled) {
+ return NULL;
+ }
+
ncpp = NCHHASH(dvp, cnp->cn_hash);
LIST_FOREACH(ncp, ncpp, nc_hash) {
if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
hashval = (cnp->cn_hash & NCHASHMASK);
+ if (nc_disabled) {
+ return 0;
+ }
+
NAME_CACHE_LOCK_SHARED();
relook:
struct namecache *ncp, *negp;
struct nchashhead *ncpp;
+ if (nc_disabled)
+ return;
+
/*
* if the entry is for -ve caching vp is null
*/
struct namecache *ncp;
kauth_cred_t tcred = NULL;
- if ((LIST_FIRST(&vp->v_nclinks) == NULL) && (LIST_FIRST(&vp->v_ncchildren) == NULL) && (vp->v_cred == NOCRED))
+ if ((LIST_FIRST(&vp->v_nclinks) == NULL) &&
+ (LIST_FIRST(&vp->v_ncchildren) == NULL) &&
+ (vp->v_cred == NOCRED) &&
+ (vp->v_parent == NULLVP))
return;
NAME_CACHE_LOCK();
uint32_t lock_index;
char *ptr;
- if (hashval == 0) {
- hashval = hash_string(name, 0);
- }
/*
* if the length already accounts for the null-byte, then
* subtract one so later on we don't index past the end
if (len > 0 && name[len-1] == '\0') {
len--;
}
+ if (hashval == 0) {
+ hashval = hash_string(name, len);
+ }
+
/*
* take this lock 'shared' to keep the hash stable
* if someone else decides to grow the pool they