+int
+lookup_validate_creation_path(struct nameidata *ndp)
+{
+ struct componentname *cnp = &ndp->ni_cnd;
+
+ /*
+ * If creating and at end of pathname, then can consider
+ * allowing file to be created.
+ */
+ if (cnp->cn_flags & RDONLY) {
+ return EROFS;
+ }
+ if ((cnp->cn_flags & ISLASTCN) && (ndp->ni_flag & NAMEI_TRAILINGSLASH) && !(cnp->cn_flags & WILLBEDIR)) {
+ return ENOENT;
+ }
+
+ return 0;
+}
+
+/*
+ * Modifies only ni_vp. Always returns with ni_vp still valid (iocount held).
+ */
+int
+lookup_traverse_mountpoints(struct nameidata *ndp, struct componentname *cnp, vnode_t dp,
+ int vbusyflags, vfs_context_t ctx)
+{
+ mount_t mp;
+ vnode_t tdp;
+ int error = 0;
+ uthread_t uth;
+ uint32_t depth = 0;
+ int dont_cache_mp = 0;
+ vnode_t mounted_on_dp;
+ int current_mount_generation = 0;
+
+ mounted_on_dp = dp;
+ current_mount_generation = mount_generation;
+
+ while ((dp->v_type == VDIR) && dp->v_mountedhere &&
+ ((cnp->cn_flags & NOCROSSMOUNT) == 0)) {
+#if CONFIG_TRIGGERS
+ /*
+ * For a trigger vnode, call its resolver when crossing its mount (if requested)
+ */
+ if (dp->v_resolve) {
+ (void) vnode_trigger_resolve(dp, ndp, ctx);
+ }
+#endif
+ vnode_lock(dp);
+
+ if ((dp->v_type == VDIR) && (mp = dp->v_mountedhere)) {
+
+ mp->mnt_crossref++;
+ vnode_unlock(dp);
+
+
+ if (vfs_busy(mp, vbusyflags)) {
+ mount_dropcrossref(mp, dp, 0);
+ if (vbusyflags == LK_NOWAIT) {
+ error = ENOENT;
+ goto out;
+ }
+
+ continue;
+ }
+
+
+ /*
+ * XXX - if this is the last component of the
+ * pathname, and it's either not a lookup operation
+ * or the NOTRIGGER flag is set for the operation,
+ * set a uthread flag to let VFS_ROOT() for autofs
+ * know it shouldn't trigger a mount.
+ */
+ uth = (struct uthread *)get_bsdthread_info(current_thread());
+ if ((cnp->cn_flags & ISLASTCN) &&
+ (cnp->cn_nameiop != LOOKUP ||
+ (cnp->cn_flags & NOTRIGGER))) {
+ uth->uu_notrigger = 1;
+ dont_cache_mp = 1;
+ }
+
+ error = VFS_ROOT(mp, &tdp, ctx);
+ /* XXX - clear the uthread flag */
+ uth->uu_notrigger = 0;
+
+ mount_dropcrossref(mp, dp, 0);
+ vfs_unbusy(mp);
+
+ if (error) {
+ goto out;
+ }
+
+ vnode_put(dp);
+ ndp->ni_vp = dp = tdp;
+ depth++;
+
+#if CONFIG_TRIGGERS
+ /*
+ * Check if root dir is a trigger vnode
+ */
+ if (dp->v_resolve) {
+ error = vnode_trigger_resolve(dp, ndp, ctx);
+ if (error) {
+ goto out;
+ }
+ }
+#endif
+
+ } else {
+ vnode_unlock(dp);
+ break;
+ }
+ }
+
+ if (depth && !dont_cache_mp) {
+ mp = mounted_on_dp->v_mountedhere;
+
+ if (mp) {
+ mount_lock_spin(mp);
+ mp->mnt_realrootvp_vid = dp->v_id;
+ mp->mnt_realrootvp = dp;
+ mp->mnt_generation = current_mount_generation;
+ mount_unlock(mp);
+ }
+ }
+
+ return 0;
+
+out:
+ return error;
+}
+
+/*
+ * Takes ni_vp and ni_dvp non-NULL. Returns with *new_dp set to the location
+ * at which to start a lookup with a resolved path, and all other iocounts dropped.
+ */
+int
+lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx)
+{
+ int error;
+ char *cp; /* pointer into pathname argument */
+ uio_t auio;
+ char uio_buf[ UIO_SIZEOF(1) ];
+ int need_newpathbuf;
+ u_int linklen;
+ struct componentname *cnp = &ndp->ni_cnd;
+ vnode_t dp;
+ char *tmppn;
+
+#ifndef __LP64__
+ if ((cnp->cn_flags & FSNODELOCKHELD)) {
+ cnp->cn_flags &= ~FSNODELOCKHELD;
+ unlock_fsnode(ndp->ni_dvp, NULL);
+ }
+#endif /* __LP64__ */
+
+ if (ndp->ni_loopcnt++ >= MAXSYMLINKS) {
+ return ELOOP;
+ }
+#if CONFIG_MACF
+ if ((error = mac_vnode_check_readlink(ctx, ndp->ni_vp)) != 0)
+ return error;
+#endif /* MAC */
+ if (ndp->ni_pathlen > 1 || !(cnp->cn_flags & HASBUF))
+ need_newpathbuf = 1;
+ else
+ need_newpathbuf = 0;
+
+ if (need_newpathbuf) {
+ MALLOC_ZONE(cp, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ if (cp == NULL) {
+ return ENOMEM;
+ }
+ } else {
+ cp = cnp->cn_pnbuf;
+ }
+ auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));
+
+ uio_addiov(auio, CAST_USER_ADDR_T(cp), MAXPATHLEN);
+
+ error = VNOP_READLINK(ndp->ni_vp, auio, ctx);
+ if (error) {
+ if (need_newpathbuf)
+ FREE_ZONE(cp, MAXPATHLEN, M_NAMEI);
+ return error;
+ }
+
+ /*
+ * Safe to set unsigned with a [larger] signed type here
+ * because 0 <= uio_resid <= MAXPATHLEN and MAXPATHLEN
+ * is only 1024.
+ */
+ linklen = MAXPATHLEN - (u_int)uio_resid(auio);
+ if (linklen + ndp->ni_pathlen > MAXPATHLEN) {
+ if (need_newpathbuf)
+ FREE_ZONE(cp, MAXPATHLEN, M_NAMEI);
+
+ return ENAMETOOLONG;
+ }
+ if (need_newpathbuf) {
+ long len = cnp->cn_pnlen;
+
+ tmppn = cnp->cn_pnbuf;
+ bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen);
+ cnp->cn_pnbuf = cp;
+ cnp->cn_pnlen = MAXPATHLEN;
+
+ if ( (cnp->cn_flags & HASBUF) )
+ FREE_ZONE(tmppn, len, M_NAMEI);
+ else
+ cnp->cn_flags |= HASBUF;
+ } else
+ cnp->cn_pnbuf[linklen] = '\0';
+
+ ndp->ni_pathlen += linklen;
+ cnp->cn_nameptr = cnp->cn_pnbuf;
+
+ /*
+ * starting point for 'relative'
+ * symbolic link path
+ */
+ dp = ndp->ni_dvp;
+
+ /*
+ * get rid of references returned via 'lookup'
+ */
+ vnode_put(ndp->ni_vp);
+ vnode_put(ndp->ni_dvp); /* ALWAYS have a dvp for a symlink */
+
+ ndp->ni_vp = NULLVP;
+ ndp->ni_dvp = NULLVP;
+
+ /*
+ * Check if symbolic link restarts us at the root
+ */
+ if (*(cnp->cn_nameptr) == '/') {
+ while (*(cnp->cn_nameptr) == '/') {
+ cnp->cn_nameptr++;
+ ndp->ni_pathlen--;
+ }
+ if ((dp = ndp->ni_rootdir) == NULLVP) {
+ return ENOENT;
+ }
+ }
+
+ *new_dp = dp;
+
+ return 0;
+}
+