+ error = EINVAL;
+ }
+out:
+ if (error) {
+ tmppn = cnp->cn_pnbuf;
+ cnp->cn_pnbuf = NULL;
+ cnp->cn_flags &= ~HASBUF;
+ FREE_ZONE(tmppn, cnp->cn_pnlen, M_NAMEI);
+ }
+ return (error);
+}
+
+/*
+ * A fiddled version of m_adj() that ensures null fill to a 4-byte
+ * boundary and only trims off the back end
+ */
+void
+nfsm_adj(mbuf_t mp, int len, int nul)
+{
+ mbuf_t m, mnext;
+ int count, i, mlen;
+ char *cp;
+
+ /*
+ * Trim from tail. Scan the mbuf chain,
+ * calculating its length and finding the last mbuf.
+ * If the adjustment only affects this mbuf, then just
+ * adjust and return. Otherwise, rescan and truncate
+ * after the remaining size.
+ */
+ count = 0;
+ m = mp;
+ for (;;) {
+ mlen = mbuf_len(m);
+ count += mlen;
+ mnext = mbuf_next(m);
+ if (mnext == NULL)
+ break;
+ m = mnext;
+ }
+ if (mlen > len) {
+ mlen -= len;
+ mbuf_setlen(m, mlen);
+ if (nul > 0) {
+ cp = (caddr_t)mbuf_data(m) + mlen - nul;
+ for (i = 0; i < nul; i++)
+ *cp++ = '\0';
+ }
+ return;
+ }
+ count -= len;
+ if (count < 0)
+ count = 0;
+ /*
+ * Correct length for chain is "count".
+ * Find the mbuf with last data, adjust its length,
+ * and toss data from remaining mbufs on chain.
+ */
+ for (m = mp; m; m = mbuf_next(m)) {
+ mlen = mbuf_len(m);
+ if (mlen >= count) {
+ mlen = count;
+ mbuf_setlen(m, count);
+ if (nul > 0) {
+ cp = (caddr_t)mbuf_data(m) + mlen - nul;
+ for (i = 0; i < nul; i++)
+ *cp++ = '\0';
+ }
+ break;
+ }
+ count -= mlen;
+ }
+ for (m = mbuf_next(m); m; m = mbuf_next(m))
+ mbuf_setlen(m, 0);
+}
+
+/*
+ * Trim the header out of the mbuf list and trim off any trailing
+ * junk so that the mbuf list has only the write data.
+ */
+int
+nfsm_chain_trim_data(struct nfsm_chain *nmc, int len, int *mlen)
+{
+ int cnt = 0, dlen, adjust;
+ caddr_t data;
+ mbuf_t m;
+
+ if (mlen)
+ *mlen = 0;
+
+ /* trim header */
+ for (m = nmc->nmc_mhead; m && (m != nmc->nmc_mcur); m = mbuf_next(m))
+ mbuf_setlen(m, 0);
+ if (!m)
+ return (EIO);
+
+ /* trim current mbuf */
+ data = mbuf_data(m);
+ dlen = mbuf_len(m);
+ adjust = nmc->nmc_ptr - data;
+ dlen -= adjust;
+ if ((dlen > 0) && (adjust > 0)) {
+ if (mbuf_setdata(m, nmc->nmc_ptr, dlen))
+ return(EIO);
+ } else
+ mbuf_setlen(m, dlen);
+
+ /* skip next len bytes */
+ for (; m && (cnt < len); m = mbuf_next(m)) {
+ dlen = mbuf_len(m);
+ cnt += dlen;
+ if (cnt > len) {
+ /* truncate to end of data */
+ mbuf_setlen(m, dlen - (cnt - len));
+ if (m == nmc->nmc_mcur)
+ nmc->nmc_left -= (cnt - len);
+ cnt = len;
+ }
+ }
+ if (mlen)
+ *mlen = cnt;
+
+ /* trim any trailing data */
+ if (m == nmc->nmc_mcur)
+ nmc->nmc_left = 0;
+ for (; m; m = mbuf_next(m))
+ mbuf_setlen(m, 0);
+
+ return (0);
+}
+
+int
+nfsm_chain_add_fattr(
+ struct nfsrv_descript *nd,
+ struct nfsm_chain *nmc,
+ struct vnode_attr *vap)
+{
+ int error = 0;
+
+ // XXX Should we assert here that all fields are supported?
+
+ nfsm_chain_add_32(error, nmc, vtonfs_type(vap->va_type, nd->nd_vers));
+ if (nd->nd_vers == NFS_VER3) {
+ nfsm_chain_add_32(error, nmc, vap->va_mode & 07777);
+ } else {
+ nfsm_chain_add_32(error, nmc, vtonfsv2_mode(vap->va_type, vap->va_mode));
+ }
+ nfsm_chain_add_32(error, nmc, vap->va_nlink);
+ nfsm_chain_add_32(error, nmc, vap->va_uid);
+ nfsm_chain_add_32(error, nmc, vap->va_gid);
+ if (nd->nd_vers == NFS_VER3) {
+ nfsm_chain_add_64(error, nmc, vap->va_data_size);
+ nfsm_chain_add_64(error, nmc, vap->va_data_alloc);
+ nfsm_chain_add_32(error, nmc, major(vap->va_rdev));
+ nfsm_chain_add_32(error, nmc, minor(vap->va_rdev));
+ nfsm_chain_add_64(error, nmc, vap->va_fsid);
+ nfsm_chain_add_64(error, nmc, vap->va_fileid);
+ } else {
+ nfsm_chain_add_32(error, nmc, vap->va_data_size);
+ nfsm_chain_add_32(error, nmc, NFS_FABLKSIZE);
+ if (vap->va_type == VFIFO)
+ nfsm_chain_add_32(error, nmc, 0xffffffff);
+ else
+ nfsm_chain_add_32(error, nmc, vap->va_rdev);
+ nfsm_chain_add_32(error, nmc, vap->va_data_alloc / NFS_FABLKSIZE);
+ nfsm_chain_add_32(error, nmc, vap->va_fsid);
+ nfsm_chain_add_32(error, nmc, vap->va_fileid);
+ }
+ nfsm_chain_add_time(error, nmc, nd->nd_vers, &vap->va_access_time);
+ nfsm_chain_add_time(error, nmc, nd->nd_vers, &vap->va_modify_time);
+ nfsm_chain_add_time(error, nmc, nd->nd_vers, &vap->va_change_time);
+
+ return (error);
+}
+
+int
+nfsm_chain_get_sattr(
+ struct nfsrv_descript *nd,
+ struct nfsm_chain *nmc,
+ struct vnode_attr *vap)
+{
+ int error = 0;
+ uint32_t val = 0;
+ uint64_t val64 = 0;
+ struct timespec now;
+
+ if (nd->nd_vers == NFS_VER2) {
+ /*
+ * There is/was a bug in the Sun client that puts 0xffff in the mode
+ * field of sattr when it should put in 0xffffffff. The u_short
+ * doesn't sign extend. So check the low order 2 bytes for 0xffff.
+ */
+ nfsm_chain_get_32(error, nmc, val);
+ if ((val & 0xffff) != 0xffff) {
+ VATTR_SET(vap, va_mode, val & 07777);
+ /* save the "type" bits for NFSv2 create */
+ VATTR_SET(vap, va_type, IFTOVT(val));
+ VATTR_CLEAR_ACTIVE(vap, va_type);
+ }
+ nfsm_chain_get_32(error, nmc, val);
+ if (val != (uint32_t)-1)
+ VATTR_SET(vap, va_uid, val);
+ nfsm_chain_get_32(error, nmc, val);
+ if (val != (uint32_t)-1)
+ VATTR_SET(vap, va_gid, val);
+ /* save the "size" bits for NFSv2 create (even if they appear unset) */
+ nfsm_chain_get_32(error, nmc, val);
+ VATTR_SET(vap, va_data_size, val);
+ if (val == (uint32_t)-1)
+ VATTR_CLEAR_ACTIVE(vap, va_data_size);
+ nfsm_chain_get_time(error, nmc, NFS_VER2,
+ vap->va_access_time.tv_sec,
+ vap->va_access_time.tv_nsec);
+ if (vap->va_access_time.tv_sec != -1)
+ VATTR_SET_ACTIVE(vap, va_access_time);
+ nfsm_chain_get_time(error, nmc, NFS_VER2,
+ vap->va_modify_time.tv_sec,
+ vap->va_modify_time.tv_nsec);
+ if (vap->va_modify_time.tv_sec != -1)
+ VATTR_SET_ACTIVE(vap, va_modify_time);
+ return (error);
+ }
+
+ /* NFSv3 */
+ nfsm_chain_get_32(error, nmc, val);
+ if (val) {
+ nfsm_chain_get_32(error, nmc, val);
+ VATTR_SET(vap, va_mode, val & 07777);
+ }
+ nfsm_chain_get_32(error, nmc, val);
+ if (val) {
+ nfsm_chain_get_32(error, nmc, val);
+ VATTR_SET(vap, va_uid, val);
+ }
+ nfsm_chain_get_32(error, nmc, val);
+ if (val) {
+ nfsm_chain_get_32(error, nmc, val);
+ VATTR_SET(vap, va_gid, val);
+ }
+ nfsm_chain_get_32(error, nmc, val);
+ if (val) {
+ nfsm_chain_get_64(error, nmc, val64);
+ VATTR_SET(vap, va_data_size, val64);
+ }
+ nanotime(&now);
+ nfsm_chain_get_32(error, nmc, val);
+ switch (val) {
+ case NFS_TIME_SET_TO_CLIENT:
+ nfsm_chain_get_time(error, nmc, nd->nd_vers,
+ vap->va_access_time.tv_sec,
+ vap->va_access_time.tv_nsec);
+ VATTR_SET_ACTIVE(vap, va_access_time);
+ vap->va_vaflags &= ~VA_UTIMES_NULL;
+ break;
+ case NFS_TIME_SET_TO_SERVER:
+ VATTR_SET(vap, va_access_time, now);
+ vap->va_vaflags |= VA_UTIMES_NULL;
+ break;
+ }
+ nfsm_chain_get_32(error, nmc, val);
+ switch (val) {
+ case NFS_TIME_SET_TO_CLIENT:
+ nfsm_chain_get_time(error, nmc, nd->nd_vers,
+ vap->va_modify_time.tv_sec,
+ vap->va_modify_time.tv_nsec);
+ VATTR_SET_ACTIVE(vap, va_modify_time);
+ vap->va_vaflags &= ~VA_UTIMES_NULL;
+ break;
+ case NFS_TIME_SET_TO_SERVER:
+ VATTR_SET(vap, va_modify_time, now);
+ if (!VATTR_IS_ACTIVE(vap, va_access_time))
+ vap->va_vaflags |= VA_UTIMES_NULL;
+ break;
+ }
+
+ return (error);
+}
+
+/*
+ * Compare two security flavor structs
+ */
+int
+nfsrv_cmp_secflavs(struct nfs_sec *sf1, struct nfs_sec *sf2)
+{
+ int i;
+
+ if (sf1->count != sf2->count)
+ return 1;
+ for (i = 0; i < sf1->count; i++)
+ if (sf1->flavors[i] != sf2->flavors[i])
+ return 1;
+ return 0;
+}
+
+/*
+ * Build hash lists of net addresses and hang them off the NFS export.
+ * Called by nfsrv_export() to set up the lists of export addresses.
+ */
+int
+nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa)
+{
+ struct nfs_export_net_args nxna;
+ struct nfs_netopt *no, *rn_no;
+ struct radix_node_head *rnh;
+ struct radix_node *rn;
+ struct sockaddr *saddr, *smask;
+ struct domain *dom;
+ int i, error;
+ unsigned int net;
+ user_addr_t uaddr;
+ kauth_cred_t cred;
+
+ uaddr = unxa->nxa_nets;
+ for (net = 0; net < unxa->nxa_netcount; net++, uaddr += sizeof(nxna)) {
+ error = copyin(uaddr, &nxna, sizeof(nxna));
+ if (error)
+ return (error);
+
+ if (nxna.nxna_flags & (NX_MAPROOT|NX_MAPALL)) {
+ struct posix_cred temp_pcred;
+ bzero(&temp_pcred, sizeof(temp_pcred));
+ temp_pcred.cr_uid = nxna.nxna_cred.cr_uid;
+ temp_pcred.cr_ngroups = nxna.nxna_cred.cr_ngroups;
+ for (i=0; i < nxna.nxna_cred.cr_ngroups && i < NGROUPS; i++)
+ temp_pcred.cr_groups[i] = nxna.nxna_cred.cr_groups[i];
+ cred = posix_cred_create(&temp_pcred);
+ if (!IS_VALID_CRED(cred))
+ return (ENOMEM);
+ } else {
+ cred = NOCRED;
+ }
+
+ if (nxna.nxna_addr.ss_len == 0) {
+ /* No address means this is a default/world export */
+ if (nx->nx_flags & NX_DEFAULTEXPORT) {
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ return (EEXIST);
+ }
+ nx->nx_flags |= NX_DEFAULTEXPORT;
+ nx->nx_defopt.nxo_flags = nxna.nxna_flags;
+ nx->nx_defopt.nxo_cred = cred;
+ bcopy(&nxna.nxna_sec, &nx->nx_defopt.nxo_sec, sizeof(struct nfs_sec));
+ nx->nx_expcnt++;
+ continue;
+ }
+
+ i = sizeof(struct nfs_netopt);
+ i += nxna.nxna_addr.ss_len + nxna.nxna_mask.ss_len;
+ MALLOC(no, struct nfs_netopt *, i, M_NETADDR, M_WAITOK);
+ if (!no) {
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ return (ENOMEM);
+ }
+ bzero(no, sizeof(struct nfs_netopt));
+ no->no_opt.nxo_flags = nxna.nxna_flags;
+ no->no_opt.nxo_cred = cred;
+ bcopy(&nxna.nxna_sec, &no->no_opt.nxo_sec, sizeof(struct nfs_sec));
+
+ saddr = (struct sockaddr *)(no + 1);
+ bcopy(&nxna.nxna_addr, saddr, nxna.nxna_addr.ss_len);
+ if (nxna.nxna_mask.ss_len) {
+ smask = (struct sockaddr *)((caddr_t)saddr + nxna.nxna_addr.ss_len);
+ bcopy(&nxna.nxna_mask, smask, nxna.nxna_mask.ss_len);
+ } else {
+ smask = NULL;
+ }
+ i = saddr->sa_family;
+ if ((rnh = nx->nx_rtable[i]) == 0) {
+ /*
+ * Seems silly to initialize every AF when most are not
+ * used, do so on demand here
+ */
+ TAILQ_FOREACH(dom, &domains, dom_entry) {
+ if (dom->dom_family == i && dom->dom_rtattach) {
+ dom->dom_rtattach((void **)&nx->nx_rtable[i],
+ dom->dom_rtoffset);
+ break;
+ }
+ }
+ if ((rnh = nx->nx_rtable[i]) == 0) {
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ _FREE(no, M_NETADDR);
+ return (ENOBUFS);
+ }
+ }
+ rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, no->no_rnodes);
+ if (rn == 0) {
+ /*
+ * One of the reasons that rnh_addaddr may fail is that
+ * the entry already exists. To check for this case, we
+ * look up the entry to see if it is there. If so, we
+ * do not need to make a new entry but do continue.
+ *
+ * XXX should this be rnh_lookup() instead?
+ */
+ int matched = 0;
+ rn = (*rnh->rnh_matchaddr)((caddr_t)saddr, rnh);
+ rn_no = (struct nfs_netopt *)rn;
+ if (rn != 0 && (rn->rn_flags & RNF_ROOT) == 0 &&
+ (rn_no->no_opt.nxo_flags == nxna.nxna_flags) &&
+ (!nfsrv_cmp_secflavs(&rn_no->no_opt.nxo_sec, &nxna.nxna_sec))) {
+ kauth_cred_t cred2 = rn_no->no_opt.nxo_cred;
+ if (cred == cred2) {
+ /* creds are same (or both NULL) */
+ matched = 1;
+ } else if (cred && cred2 && (kauth_cred_getuid(cred) == kauth_cred_getuid(cred2))) {
+ /*
+ * Now compare the effective and
+ * supplementary groups...
+ *
+ * Note: This comparison, as written,
+ * does not correctly indicate that
+ * the groups are equivalent, since
+ * other than the first supplementary
+ * group, which is also the effective
+ * group, order on the remaining groups
+ * doesn't matter, and this is an
+ * ordered compare.
+ */
+ gid_t groups[NGROUPS];
+ gid_t groups2[NGROUPS];
+ int groupcount = NGROUPS;
+ int group2count = NGROUPS;
+
+ if (!kauth_cred_getgroups(cred, groups, &groupcount) &&
+ !kauth_cred_getgroups(cred2, groups2, &group2count) &&
+ groupcount == group2count) {
+ for (i=0; i < group2count; i++)
+ if (groups[i] != groups2[i])
+ break;
+ if (i >= group2count || i >= NGROUPS)
+ matched = 1;
+ }
+ }
+ }
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ _FREE(no, M_NETADDR);
+ if (matched)
+ continue;
+ return (EPERM);
+ }
+ nx->nx_expcnt++;
+ }
+
+ return (0);
+}
+
+/*
+ * In order to properly track an export's netopt count, we need to pass
+ * an additional argument to nfsrv_free_netopt() so that it can decrement
+ * the export's netopt count.
+ */
+struct nfsrv_free_netopt_arg {
+ uint32_t *cnt;
+ struct radix_node_head *rnh;
+};
+
+int
+nfsrv_free_netopt(struct radix_node *rn, void *w)
+{
+ struct nfsrv_free_netopt_arg *fna = (struct nfsrv_free_netopt_arg *)w;
+ struct radix_node_head *rnh = fna->rnh;
+ uint32_t *cnt = fna->cnt;
+ struct nfs_netopt *nno = (struct nfs_netopt *)rn;
+
+ (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
+ if (IS_VALID_CRED(nno->no_opt.nxo_cred))
+ kauth_cred_unref(&nno->no_opt.nxo_cred);
+ _FREE((caddr_t)rn, M_NETADDR);
+ *cnt -= 1;
+ return (0);
+}
+
+/*
+ * Free the net address hash lists that are hanging off the mount points.
+ */
+int
+nfsrv_free_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa)
+{
+ struct nfs_export_net_args nxna;
+ struct radix_node_head *rnh;
+ struct radix_node *rn;
+ struct nfsrv_free_netopt_arg fna;
+ struct nfs_netopt *nno;
+ user_addr_t uaddr;
+ unsigned int net;
+ int i, error;
+
+ if (!unxa || !unxa->nxa_netcount) {
+ /* delete everything */
+ for (i = 0; i <= AF_MAX; i++)
+ if ( (rnh = nx->nx_rtable[i]) ) {
+ fna.rnh = rnh;
+ fna.cnt = &nx->nx_expcnt;
+ (*rnh->rnh_walktree)(rnh, nfsrv_free_netopt, (caddr_t)&fna);
+ _FREE((caddr_t)rnh, M_RTABLE);
+ nx->nx_rtable[i] = 0;
+ }
+ return (0);
+ }
+
+ /* delete only the exports specified */
+ uaddr = unxa->nxa_nets;
+ for (net = 0; net < unxa->nxa_netcount; net++, uaddr += sizeof(nxna)) {
+ error = copyin(uaddr, &nxna, sizeof(nxna));
+ if (error)
+ return (error);
+
+ if (nxna.nxna_addr.ss_len == 0) {
+ /* No address means this is a default/world export */
+ if (nx->nx_flags & NX_DEFAULTEXPORT) {
+ nx->nx_flags &= ~NX_DEFAULTEXPORT;
+ if (IS_VALID_CRED(nx->nx_defopt.nxo_cred)) {
+ kauth_cred_unref(&nx->nx_defopt.nxo_cred);
+ }
+ nx->nx_expcnt--;
+ }
+ continue;
+ }
+
+ if ((rnh = nx->nx_rtable[nxna.nxna_addr.ss_family]) == 0) {
+ /* AF not initialized? */
+ if (!(unxa->nxa_flags & NXA_ADD))
+ printf("nfsrv_free_addrlist: address not found (0)\n");
+ continue;
+ }
+
+ rn = (*rnh->rnh_lookup)(&nxna.nxna_addr,
+ nxna.nxna_mask.ss_len ? &nxna.nxna_mask : NULL, rnh);
+ if (!rn || (rn->rn_flags & RNF_ROOT)) {
+ if (!(unxa->nxa_flags & NXA_ADD))
+ printf("nfsrv_free_addrlist: address not found (1)\n");
+ continue;
+ }
+
+ (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
+ nno = (struct nfs_netopt *)rn;
+ if (IS_VALID_CRED(nno->no_opt.nxo_cred))
+ kauth_cred_unref(&nno->no_opt.nxo_cred);
+ _FREE((caddr_t)rn, M_NETADDR);
+
+ nx->nx_expcnt--;
+ if (nx->nx_expcnt == ((nx->nx_flags & NX_DEFAULTEXPORT) ? 1 : 0)) {
+ /* no more entries in rnh, so free it up */
+ _FREE((caddr_t)rnh, M_RTABLE);
+ nx->nx_rtable[nxna.nxna_addr.ss_family] = 0;
+ }
+ }
+
+ return (0);
+}
+
+void enablequotas(struct mount *mp, vfs_context_t ctx); // XXX
+
+int
+nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx)
+{
+ int error = 0;
+ size_t pathlen;
+ struct nfs_exportfs *nxfs, *nxfs2, *nxfs3;
+ struct nfs_export *nx, *nx2, *nx3;
+ struct nfs_filehandle nfh;
+ struct nameidata mnd, xnd;
+ vnode_t mvp = NULL, xvp = NULL;
+ mount_t mp = NULL;
+ char path[MAXPATHLEN];
+ int expisroot;
+
+ if (unxa->nxa_flags == NXA_CHECK) {
+ /* just check if the path is an NFS-exportable file system */
+ error = copyinstr(unxa->nxa_fspath, path, MAXPATHLEN, &pathlen);
+ if (error)
+ return (error);
+ NDINIT(&mnd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
+ UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
+ error = namei(&mnd);
+ if (error)
+ return (error);
+ mvp = mnd.ni_vp;
+ mp = vnode_mount(mvp);
+ /* make sure it's the root of a file system */
+ if (!vnode_isvroot(mvp))
+ error = EINVAL;
+ /* make sure the file system is NFS-exportable */
+ if (!error) {
+ nfh.nfh_len = NFSV3_MAX_FID_SIZE;
+ error = VFS_VPTOFH(mvp, (int*)&nfh.nfh_len, &nfh.nfh_fid[0], NULL);
+ }
+ if (!error && (nfh.nfh_len > (int)NFSV3_MAX_FID_SIZE))
+ error = EIO;
+ if (!error && !(mp->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED))
+ error = EISDIR;
+ vnode_put(mvp);
+ nameidone(&mnd);
+ return (error);
+ }
+
+ /* all other operations: must be super user */
+ if ((error = vfs_context_suser(ctx)))
+ return (error);
+
+ if (unxa->nxa_flags & NXA_DELETE_ALL) {
+ /* delete all exports on all file systems */
+ lck_rw_lock_exclusive(&nfsrv_export_rwlock);
+ while ((nxfs = LIST_FIRST(&nfsrv_exports))) {
+ mp = vfs_getvfs_by_mntonname(nxfs->nxfs_path);
+ if (mp) {
+ vfs_clearflags(mp, MNT_EXPORTED);
+ mount_iterdrop(mp);
+ mp = NULL;
+ }
+ /* delete all exports on this file system */
+ while ((nx = LIST_FIRST(&nxfs->nxfs_exports))) {
+ LIST_REMOVE(nx, nx_next);
+ LIST_REMOVE(nx, nx_hash);
+ /* delete all netopts for this export */
+ nfsrv_free_addrlist(nx, NULL);
+ nx->nx_flags &= ~NX_DEFAULTEXPORT;
+ if (IS_VALID_CRED(nx->nx_defopt.nxo_cred)) {
+ kauth_cred_unref(&nx->nx_defopt.nxo_cred);
+ }
+ /* free active user list for this export */
+ nfsrv_free_user_list(&nx->nx_user_list);
+ FREE(nx->nx_path, M_TEMP);
+ FREE(nx, M_TEMP);
+ }
+ LIST_REMOVE(nxfs, nxfs_next);
+ FREE(nxfs->nxfs_path, M_TEMP);
+ FREE(nxfs, M_TEMP);
+ }
+ if (nfsrv_export_hashtbl) {
+ /* all exports deleted, clean up export hash table */
+ FREE(nfsrv_export_hashtbl, M_TEMP);
+ nfsrv_export_hashtbl = NULL;
+ }
+ lck_rw_done(&nfsrv_export_rwlock);
+ return (0);
+ }
+
+ error = copyinstr(unxa->nxa_fspath, path, MAXPATHLEN, &pathlen);
+ if (error)
+ return (error);
+
+ lck_rw_lock_exclusive(&nfsrv_export_rwlock);
+
+ /* init export hash table if not already */
+ if (!nfsrv_export_hashtbl) {
+ if (nfsrv_export_hash_size <= 0)
+ nfsrv_export_hash_size = NFSRVEXPHASHSZ;
+ nfsrv_export_hashtbl = hashinit(nfsrv_export_hash_size, M_TEMP, &nfsrv_export_hash);
+ }
+
+ // first check if we've already got an exportfs with the given ID
+ LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) {
+ if (nxfs->nxfs_id == unxa->nxa_fsid)
+ break;
+ }
+ if (nxfs) {
+ /* verify exported FS path matches given path */
+ if (strncmp(path, nxfs->nxfs_path, MAXPATHLEN)) {
+ error = EEXIST;
+ goto unlock_out;
+ }
+ if ((unxa->nxa_flags & (NXA_ADD|NXA_OFFLINE)) == NXA_ADD) {
+ /* if adding, verify that the mount is still what we expect */
+ mp = vfs_getvfs_by_mntonname(nxfs->nxfs_path);
+ if (mp) {
+ mount_ref(mp, 0);
+ mount_iterdrop(mp);
+ }
+ /* find exported FS root vnode */
+ NDINIT(&mnd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
+ UIO_SYSSPACE, CAST_USER_ADDR_T(nxfs->nxfs_path), ctx);
+ error = namei(&mnd);
+ if (error)
+ goto unlock_out;
+ mvp = mnd.ni_vp;
+ /* make sure it's (still) the root of a file system */
+ if (!vnode_isvroot(mvp)) {
+ error = EINVAL;
+ goto out;
+ }
+ /* sanity check: this should be same mount */
+ if (mp != vnode_mount(mvp)) {
+ error = EINVAL;
+ goto out;
+ }
+ }
+ } else {
+ /* no current exported file system with that ID */
+ if (!(unxa->nxa_flags & NXA_ADD)) {
+ error = ENOENT;
+ goto unlock_out;
+ }
+
+ /* find exported FS root vnode */
+ NDINIT(&mnd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
+ UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
+ error = namei(&mnd);
+ if (error) {
+ if (!(unxa->nxa_flags & NXA_OFFLINE))
+ goto unlock_out;
+ } else {
+ mvp = mnd.ni_vp;
+ /* make sure it's the root of a file system */
+ if (!vnode_isvroot(mvp)) {
+ /* bail if not marked offline */
+ if (!(unxa->nxa_flags & NXA_OFFLINE)) {
+ error = EINVAL;
+ goto out;
+ }
+ vnode_put(mvp);
+ nameidone(&mnd);
+ mvp = NULL;
+ } else {
+ mp = vnode_mount(mvp);
+ mount_ref(mp, 0);
+
+ /* make sure the file system is NFS-exportable */
+ nfh.nfh_len = NFSV3_MAX_FID_SIZE;
+ error = VFS_VPTOFH(mvp, (int*)&nfh.nfh_len, &nfh.nfh_fid[0], NULL);
+ if (!error && (nfh.nfh_len > (int)NFSV3_MAX_FID_SIZE))
+ error = EIO;
+ if (!error && !(mp->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED))
+ error = EISDIR;
+ if (error)
+ goto out;
+ }
+ }
+
+ /* add an exportfs for it */
+ MALLOC(nxfs, struct nfs_exportfs *, sizeof(struct nfs_exportfs), M_TEMP, M_WAITOK);
+ if (!nxfs) {
+ error = ENOMEM;
+ goto out;
+ }
+ bzero(nxfs, sizeof(struct nfs_exportfs));
+ nxfs->nxfs_id = unxa->nxa_fsid;
+ MALLOC(nxfs->nxfs_path, char*, pathlen, M_TEMP, M_WAITOK);
+ if (!nxfs->nxfs_path) {