+ if (!wantparent)
+ vnode_put(ndp->ni_dvp);
+
+ if (kdebug_enable)
+ kdebug_lookup(ndp->ni_dvp, cnp);
+ return (0);
+ }
+returned_from_lookup_path:
+ /* We'll always have an iocount on ni_vp when this finishes. */
+ error = lookup_handle_found_vnode(ndp, cnp, rdonly, vbusyflags, &keep_going, nc_generation, wantparent, atroot, ctx);
+ if (error != 0) {
+ goto bad2;
+ }
+
+ if (keep_going) {
+ dp = ndp->ni_vp;
+
+ /* namei() will handle symlinks */
+ if ((dp->v_type == VLNK) &&
+ ((cnp->cn_flags & FOLLOW) || (ndp->ni_flag & NAMEI_TRAILINGSLASH) || *ndp->ni_next == '/')) {
+ return 0;
+ }
+
+ /*
+ * Otherwise, there's more path to process.
+ * cache_lookup_path is now responsible for dropping io ref on dp
+ * when it is called again in the dirloop. This ensures we hold
+ * a ref on dp until we complete the next round of lookup.
+ */
+ last_dp = dp;
+
+ goto dirloop;
+ }
+
+ return (0);
+bad2:
+ if (ndp->ni_dvp)
+ vnode_put(ndp->ni_dvp);
+
+ vnode_put(ndp->ni_vp);
+ ndp->ni_vp = NULLVP;
+
+ if (kdebug_enable)
+ kdebug_lookup(dp, cnp);
+ return (error);
+
+bad:
+ if (dp)
+ vnode_put(dp);
+ ndp->ni_vp = NULLVP;
+
+ if (kdebug_enable)
+ kdebug_lookup(dp, cnp);
+ return (error);
+}
+
+/*
+ * Given a vnode in a union mount, traverse to the equivalent
+ * vnode in the underlying mount.
+ */
+int
+lookup_traverse_union(vnode_t dvp, vnode_t *new_dvp, vfs_context_t ctx)
+{
+ char *path = NULL, *pp;
+ const char *name, *np;
+ int len;
+ int error = 0;
+ struct nameidata nd;
+ vnode_t vp = dvp;
+
+ *new_dvp = NULL;
+
+ if (vp && vp->v_flag & VROOT) {
+ *new_dvp = vp->v_mount->mnt_vnodecovered;
+ if (vnode_getwithref(*new_dvp))
+ return ENOENT;
+ return 0;
+ }
+
+ path = (char *) kalloc(MAXPATHLEN);
+ if (path == NULL) {
+ error = ENOMEM;
+ goto done;
+ }
+
+ /*
+ * Walk back up to the mountpoint following the
+ * v_parent chain and build a slash-separated path.
+ * Then lookup that path starting with the covered vnode.
+ */
+ pp = path + (MAXPATHLEN - 1);
+ *pp = '\0';
+
+ while (1) {
+ name = vnode_getname(vp);
+ if (name == NULL) {
+ printf("lookup_traverse_union: null parent name: .%s\n", pp);
+ error = ENOENT;
+ goto done;
+ }
+ len = strlen(name);
+ if ((len + 1) > (pp - path)) { // Enough space for this name ?
+ error = ENAMETOOLONG;
+ vnode_putname(name);
+ goto done;
+ }
+ for (np = name + len; len > 0; len--) // Copy name backwards
+ *--pp = *--np;
+ vnode_putname(name);
+ vp = vp->v_parent;
+ if (vp == NULLVP || vp->v_flag & VROOT)
+ break;
+ *--pp = '/';
+ }
+
+ /* Evaluate the path in the underlying mount */
+ NDINIT(&nd, LOOKUP, OP_LOOKUP, USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(pp), ctx);
+ nd.ni_dvp = dvp->v_mount->mnt_vnodecovered;
+ error = namei(&nd);
+ if (error == 0)
+ *new_dvp = nd.ni_vp;
+ nameidone(&nd);
+done:
+ if (path)
+ kfree(path, MAXPATHLEN);
+ return error;
+}
+
+int
+lookup_validate_creation_path(struct nameidata *ndp)
+{
+ struct componentname *cnp = &ndp->ni_cnd;
+
+ /*
+ * If creating and at end of pathname, then can consider
+ * allowing file to be created.
+ */
+ if (cnp->cn_flags & RDONLY) {
+ return EROFS;
+ }
+ if ((cnp->cn_flags & ISLASTCN) && (ndp->ni_flag & NAMEI_TRAILINGSLASH) && !(cnp->cn_flags & WILLBEDIR)) {
+ return ENOENT;
+ }
+
+ return 0;
+}
+
+/*
+ * Modifies only ni_vp. Always returns with ni_vp still valid (iocount held).
+ */
+static int
+lookup_traverse_mountpoints(struct nameidata *ndp, struct componentname *cnp, vnode_t dp,
+ int vbusyflags, vfs_context_t ctx)
+{
+ mount_t mp;
+ vnode_t tdp;
+ int error = 0;
+ uint32_t depth = 0;
+ vnode_t mounted_on_dp;
+ int current_mount_generation = 0;
+#if CONFIG_TRIGGERS
+ vnode_t triggered_dp = NULLVP;
+ int retry_cnt = 0;
+#define MAX_TRIGGER_RETRIES 1
+#endif
+
+ if (dp->v_type != VDIR || cnp->cn_flags & NOCROSSMOUNT)
+ return 0;
+
+ mounted_on_dp = dp;
+#if CONFIG_TRIGGERS
+restart:
+#endif
+ current_mount_generation = mount_generation;
+
+ while (dp->v_mountedhere) {
+ vnode_lock_spin(dp);
+ if ((mp = dp->v_mountedhere)) {
+ mp->mnt_crossref++;
+ vnode_unlock(dp);
+ } else {
+ vnode_unlock(dp);
+ break;
+ }
+
+ if (ISSET(mp->mnt_lflag, MNT_LFORCE)) {
+ mount_dropcrossref(mp, dp, 0);
+ break; // don't traverse into a forced unmount
+ }
+
+
+ if (vfs_busy(mp, vbusyflags)) {
+ mount_dropcrossref(mp, dp, 0);
+ if (vbusyflags == LK_NOWAIT) {
+ error = ENOENT;
+ goto out;
+ }
+
+ continue;
+ }
+
+ error = VFS_ROOT(mp, &tdp, ctx);
+
+ mount_dropcrossref(mp, dp, 0);
+ vfs_unbusy(mp);
+
+ if (error) {
+ goto out;
+ }