/*
- * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
uint32_t nfs_lock_owner_seqnum = 0;
thread_call_t nfs4_callback_timer_call;
int nfs4_callback_timer_on = 0;
+char nfs4_default_domain[MAXPATHLEN];
/* nfsiod */
lck_grp_t *nfsiod_lck_grp;
int nfs_vfs_init(struct vfsconf *);
int nfs_vfs_sysctl(int *, u_int, user_addr_t, size_t *, user_addr_t, size_t, vfs_context_t);
-struct vfsops nfs_vfsops = {
- nfs_vfs_mount,
- nfs_vfs_start,
- nfs_vfs_unmount,
- nfs_vfs_root,
- nfs_vfs_quotactl,
- nfs_vfs_getattr,
- nfs_vfs_sync,
- nfs_vfs_vget,
- nfs_vfs_fhtovp,
- nfs_vfs_vptofh,
- nfs_vfs_init,
- nfs_vfs_sysctl,
- NULL, /* setattr */
- { NULL, /* reserved */
- NULL, /* reserved */
- NULL, /* reserved */
- NULL, /* reserved */
- NULL, /* reserved */
- NULL, /* reserved */
- NULL } /* reserved */
+const struct vfsops nfs_vfsops = {
+ .vfs_mount = nfs_vfs_mount,
+ .vfs_start = nfs_vfs_start,
+ .vfs_unmount = nfs_vfs_unmount,
+ .vfs_root = nfs_vfs_root,
+ .vfs_quotactl = nfs_vfs_quotactl,
+ .vfs_getattr = nfs_vfs_getattr,
+ .vfs_sync = nfs_vfs_sync,
+ .vfs_vget = nfs_vfs_vget,
+ .vfs_fhtovp = nfs_vfs_fhtovp,
+ .vfs_vptofh = nfs_vfs_vptofh,
+ .vfs_init = nfs_vfs_init,
+ .vfs_sysctl = nfs_vfs_sysctl,
+ // We do not support the remaining VFS ops
};
int nfs4_getquota(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *);
#endif
-struct nfs_funcs nfs3_funcs = {
+const struct nfs_funcs nfs3_funcs = {
nfs3_mount,
nfs3_update_statfs,
nfs3_getquota,
nfs3_unlock_rpc,
nfs3_getlock_rpc
};
-struct nfs_funcs nfs4_funcs = {
+const struct nfs_funcs nfs4_funcs = {
nfs4_mount,
nfs4_update_statfs,
nfs4_getquota,
// PUTFH + GETATTR
numops = 2;
nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
- nfsm_chain_add_compound_header(error, &nmreq, "statfs", numops);
+ nfsm_chain_add_compound_header(error, &nmreq, "statfs", nmp->nm_minor_vers, numops);
numops--;
nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
return (error);
}
+/*
+ * Return an NFS volume name from the mntfrom name.
+ */
+static void
+nfs_get_volname(struct mount *mp, char *volname, size_t len)
+{
+ const char *ptr, *cptr;
+ const char *mntfrom = mp->mnt_vfsstat.f_mntfromname;
+ size_t mflen = strnlen(mntfrom, MAXPATHLEN+1);
+
+ if (mflen > MAXPATHLEN || mflen == 0) {
+ strlcpy(volname, "Bad volname", len);
+ return;
+ }
+
+ /* Move back over trailing slashes */
+ for (ptr = &mntfrom[mflen-1]; ptr != mntfrom && *ptr == '/'; ptr--) {
+ mflen--;
+ }
+
+ /* Find first character after the last slash */
+ cptr = ptr = NULL;
+ for(size_t i = 0; i < mflen; i++) {
+ if (mntfrom[i] == '/')
+ ptr = &mntfrom[i+1];
+ /* And the first character after the first colon */
+ else if (cptr == NULL && mntfrom[i] == ':')
+ cptr = &mntfrom[i+1];
+ }
+
+ /*
+ * No slash or nothing after the last slash
+ * use everything past the first colon
+ */
+ if (ptr == NULL || *ptr == '\0')
+ ptr = cptr;
+ /* Otherwise use the mntfrom name */
+ if (ptr == NULL)
+ ptr = mntfrom;
+
+ mflen = &mntfrom[mflen] - ptr;
+ len = mflen+1 < len ? mflen+1 : len;
+
+ strlcpy(volname, ptr, len);
+}
/*
* The NFS VFS_GETATTR function: "statfs"-type information is retrieved
lck_mtx_unlock(&nmp->nm_lock);
}
+ if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
+ /*%%% IF fail over support is implemented we may need to take nm_lock */
+ nfs_get_volname(mp, fsap->f_vol_name, MAXPATHLEN);
+ VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
+ }
if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
u_int32_t caps, valid;
nfsnode_t np = nmp->nm_dnp;
// caps |= VOL_CAP_FMT_OPENDENYMODES;
// valid |= VOL_CAP_FMT_OPENDENYMODES;
}
+ // no version of nfs supports immutable files
+ caps |= VOL_CAP_FMT_NO_IMMUTABLE_FILES;
+ valid |= VOL_CAP_FMT_NO_IMMUTABLE_FILES;
+
fsap->f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] =
// VOL_CAP_FMT_PERSISTENTOBJECTIDS |
// VOL_CAP_FMT_SYMBOLICLINKS |
if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
fsap->f_attributes.validattr.commonattr = 0;
fsap->f_attributes.validattr.volattr =
- ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
+ ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
fsap->f_attributes.validattr.dirattr = 0;
fsap->f_attributes.validattr.fileattr = 0;
fsap->f_attributes.validattr.forkattr = 0;
fsap->f_attributes.nativeattr.commonattr = 0;
fsap->f_attributes.nativeattr.volattr =
- ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
+ ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
fsap->f_attributes.nativeattr.dirattr = 0;
fsap->f_attributes.nativeattr.fileattr = 0;
fsap->f_attributes.nativeattr.forkattr = 0;
mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
mp->mnt_ioflags = 0;
mp->mnt_realrootvp = NULLVP;
- mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
+ mp->mnt_authcache_ttl = 0; /* Allways go to our lookup */
mount_lock_init(mp);
TAILQ_INIT(&mp->mnt_vnodelist);
// PUTFH, READLINK
numops = 2;
nfsm_chain_build_alloc_init(error, &nmreq, 12 * NFSX_UNSIGNED);
- nfsm_chain_add_compound_header(error, &nmreq, "readlink", numops);
+ nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
numops--;
nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
nfsm_chain_add_fh(error, &nmreq, NFS_VER4, fhp->fh_data, fhp->fh_len);
NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, NULL, 0);
numops = 2;
nfsm_chain_build_alloc_init(error, &nmreq, 9 * NFSX_UNSIGNED);
- nfsm_chain_add_compound_header(error, &nmreq, "mount", numops);
+ nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
numops--;
nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTROOTFH);
numops--;
NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, isdotdot ? NULL : fspath.np_components[comp], 0);
numops = 4;
nfsm_chain_build_alloc_init(error, &nmreq, 18 * NFSX_UNSIGNED);
- nfsm_chain_add_compound_header(error, &nmreq, "mount", numops);
+ nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
numops--;
if (dirfh.fh_len) {
nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
NFS_CLEAR_ATTRIBUTES(bitmap);
NFS4_DEFAULT_ATTRIBUTES(bitmap);
/* if no namedattr support or component is ".zfs", clear NFS_FATTR_NAMED_ATTR */
- if (NMFLAG(nmp, NONAMEDATTR) || !strcmp(fspath.np_components[comp], ".zfs"))
+ if (!NMFLAG(nmp, NAMEDATTR) || !strcmp(fspath.np_components[comp], ".zfs"))
NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
nfsm_chain_build_done(error, &nmreq);
gotfh:
/* get attrs for mount point root */
- numops = NMFLAG(nmp, NONAMEDATTR) ? 2 : 3; // PUTFH + GETATTR + OPENATTR
+ numops = NMFLAG(nmp, NAMEDATTR) ? 3 : 2; // PUTFH + GETATTR + OPENATTR
nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED);
- nfsm_chain_add_compound_header(error, &nmreq, "mount", numops);
+ nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
numops--;
nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
nfsm_chain_add_fh(error, &nmreq, NFS_VER4, dirfh.fh_data, dirfh.fh_len);
NFS_CLEAR_ATTRIBUTES(bitmap);
NFS4_DEFAULT_ATTRIBUTES(bitmap);
/* if no namedattr support or last component is ".zfs", clear NFS_FATTR_NAMED_ATTR */
- if (NMFLAG(nmp, NONAMEDATTR) || ((fspath.np_compcount > 0) && !strcmp(fspath.np_components[fspath.np_compcount-1], ".zfs")))
+ if (!NMFLAG(nmp, NAMEDATTR) || ((fspath.np_compcount > 0) && !strcmp(fspath.np_components[fspath.np_compcount-1], ".zfs")))
NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
- if (!NMFLAG(nmp, NONAMEDATTR)) {
+ if (NMFLAG(nmp, NAMEDATTR)) {
numops--;
nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
nfsm_chain_add_32(error, &nmreq, 0);
NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, NULL);
nfsmout_if(error);
- if (!NMFLAG(nmp, NONAMEDATTR)) {
+ if (NMFLAG(nmp, NAMEDATTR)) {
nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
if (error == ENOENT)
error = 0;
return (error);
}
+/* Table of maximum minor version for a given version */
+uint32_t maxminorverstab[] = {
+ 0, /* Version 0 (does not exist) */
+ 0, /* Version 1 (does not exist) */
+ 0, /* Version 2 */
+ 0, /* Version 3 */
+ 0, /* Version 4 */
+};
+
+#define NFS_MAX_SUPPORTED_VERSION ((long)(sizeof (maxminorverstab) / sizeof (uint32_t) - 1))
+#define NFS_MAX_SUPPORTED_MINOR_VERSION(v) ((long)(maxminorverstab[(v)]))
+
+#define DEFAULT_NFS_MIN_VERS VER2PVER(2, 0)
+#define DEFAULT_NFS_MAX_VERS VER2PVER(3, 0)
+
/*
* Common code to mount an NFS file system.
*/
int error = 0;
struct vfsstatfs *sbp;
struct xdrbuf xb;
- uint32_t i, val, vers = 0, minorvers, maxio, iosize, len;
+ uint32_t i, val, maxio, iosize, len;
uint32_t *mattrs;
uint32_t *mflags_mask;
uint32_t *mflags;
uint32_t argslength, attrslength;
struct nfs_location_index firstloc = { NLI_VALID, 0, 0, 0 };
-
+ static const struct nfs_etype nfs_default_etypes = {
+ .count = NFS_MAX_ETYPES,
+ .selected = NFS_MAX_ETYPES,
+ .etypes = { NFS_AES256_CTS_HMAC_SHA1_96,
+ NFS_AES128_CTS_HMAC_SHA1_96,
+ NFS_DES3_CBC_SHA1_KD
+ }
+ };
/* make sure mbuf constants are set up */
if (!nfs_mbuf_mhlen)
nfs_mbuf_init();
TAILQ_INIT(&nmp->nm_resendq);
TAILQ_INIT(&nmp->nm_iodq);
TAILQ_INIT(&nmp->nm_gsscl);
- TAILQ_INIT(&nmp->nm_gssnccl);
LIST_INIT(&nmp->nm_monlist);
vfs_setfsprivate(mp, nmp);
vfs_getnewfsid(mp);
nmp->nm_mountp = mp;
vfs_setauthopaque(mp);
+ /*
+ * Disable cache_lookup_path for NFS. NFS lookup always needs
+ * to be called to check if the directory attribute cache is
+ * valid and possibly purge the directory before calling
+ * cache_lookup.
+ */
+ vfs_setauthcache_ttl(mp, 0);
nfs_nhinit_finish();
/* set up defaults */
nmp->nm_ref = 0;
nmp->nm_vers = 0;
+ nmp->nm_min_vers = DEFAULT_NFS_MIN_VERS;
+ nmp->nm_max_vers = DEFAULT_NFS_MAX_VERS;
nmp->nm_timeo = NFS_TIMEO;
nmp->nm_retry = NFS_RETRANS;
nmp->nm_sotype = 0;
nmp->nm_acregmax = NFS_MAXATTRTIMO;
nmp->nm_acdirmin = NFS_MINDIRATTRTIMO;
nmp->nm_acdirmax = NFS_MAXDIRATTRTIMO;
+ nmp->nm_etype = nfs_default_etypes;
nmp->nm_auth = RPCAUTH_SYS;
nmp->nm_iodlink.tqe_next = NFSNOLIST;
nmp->nm_deadtimeout = 0;
}
}
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) {
- xb_get_32(error, &xb, vers);
+ /* Can't specify a single version and a range */
+ if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE))
+ error = EINVAL;
+ xb_get_32(error, &xb, nmp->nm_vers);
+ if (nmp->nm_vers > NFS_MAX_SUPPORTED_VERSION ||
+ nmp->nm_vers < NFS_VER2)
+ error = EINVAL;
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION))
- xb_get_32(error, &xb, minorvers);
+ xb_get_32(error, &xb, nmp->nm_minor_vers);
else
- minorvers = 0;
- nfsmerr_if(error);
- switch (vers) {
- case 2:
- nmp->nm_vers = NFS_VER2;
- break;
- case 3:
- nmp->nm_vers = NFS_VER3;
- break;
- case 4:
- switch (minorvers) {
- case 0:
- nmp->nm_vers = NFS_VER4;
- break;
- default:
- error = EINVAL;
- }
- break;
- default:
+ nmp->nm_minor_vers = maxminorverstab[nmp->nm_vers];
+ if (nmp->nm_minor_vers > maxminorverstab[nmp->nm_vers])
error = EINVAL;
- }
- }
+ nmp->nm_max_vers = nmp->nm_min_vers =
+ VER2PVER(nmp->nm_vers, nmp->nm_minor_vers);
+ }
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) {
- /* should have also gotten NFS version (and already gotten minorvers) */
+ /* should have also gotten NFS version (and already gotten minor version) */
if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION))
error = EINVAL;
}
+ if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
+ xb_get_32(error, &xb, nmp->nm_min_vers);
+ xb_get_32(error, &xb, nmp->nm_max_vers);
+ if ((nmp->nm_min_vers > nmp->nm_max_vers) ||
+ (PVER2MAJOR(nmp->nm_max_vers) > NFS_MAX_SUPPORTED_VERSION) ||
+ (PVER2MINOR(nmp->nm_min_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_min_vers)]) ||
+ (PVER2MINOR(nmp->nm_max_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_max_vers)]))
+ error = EINVAL;
+ }
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE))
xb_get_32(error, &xb, nmp->nm_rsize);
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE))
/* start with the first flavor */
nmp->nm_auth = nmp->nm_sec.flavors[0];
}
+ if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_KERB_ETYPE)) {
+ uint32_t etypecnt;
+ xb_get_32(error, &xb, etypecnt);
+ if (!error && ((etypecnt < 1) || (etypecnt > NFS_MAX_ETYPES)))
+ error = EINVAL;
+ nfsmerr_if(error);
+ nmp->nm_etype.count = etypecnt;
+ xb_get_32(error, &xb, nmp->nm_etype.selected);
+ nfsmerr_if(error);
+ if (etypecnt) {
+ nmp->nm_etype.selected = etypecnt; /* Nothing is selected yet, so set selected to count */
+ for (i=0; i < etypecnt; i++) {
+ xb_get_32(error, &xb, nmp->nm_etype.etypes[i]);
+ /* Check for valid encryption type */
+ switch (nmp->nm_etype.etypes[i]) {
+ case NFS_DES3_CBC_SHA1_KD:
+ case NFS_AES128_CTS_HMAC_SHA1_96:
+ case NFS_AES256_CTS_HMAC_SHA1_96:
+ break;
+ default:
+ error = EINVAL;
+ }
+ }
+ }
+ }
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST))
xb_get_32(error, &xb, nmp->nm_numgrps);
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) {
error = ENOMEM;
xb_get_32(error, &xb, nmp->nm_fh->fh_len);
nfsmerr_if(error);
- error = xb_get_bytes(&xb, (char*)&nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len, 0);
+ if (nmp->nm_fh->fh_len < 0 ||
+ (size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data))
+ error = EINVAL;
+ else
+ error = xb_get_bytes(&xb, (char*)&nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len, 0);
}
nfsmerr_if(error);
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) {
} else {
/* ignore these if not v4 */
NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOCALLBACK);
- NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NONAMEDATTR);
+ NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NAMEDATTR);
NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOACL);
NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_ACLONLY);
}
* buffers into multiple requests if the buffer size is
* larger than the I/O size.
*/
+#ifndef CONFIG_EMBEDDED
iosize = max(nmp->nm_rsize, nmp->nm_wsize);
if (iosize < PAGE_SIZE)
iosize = PAGE_SIZE;
+#else
+ iosize = PAGE_SIZE;
+#endif
nmp->nm_biosize = trunc_page_32(iosize);
/* For NFSv3 and greater, there is a (relatively) reliable ACCESS call. */
lck_mtx_unlock(&nmp->nm_lock);
return (0);
nfsmerr:
- nfs_mount_cleanup(nmp);
+ nfs_mount_drain_and_cleanup(nmp);
return (error);
}
xb_copy_32(error, &xb, &xbnew, val);
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION))
xb_copy_32(error, &xb, &xbnew, val);
+ if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
+ xb_copy_32(error, &xb, &xbnew, val);
+ xb_copy_32(error, &xb, &xbnew, val);
+ }
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE))
xb_copy_32(error, &xb, &xbnew, val);
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE))
while (!error && (count-- > 0))
xb_copy_32(error, &xb, &xbnew, val);
}
+ if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_KERB_ETYPE)) {
+ xb_copy_32(error, &xb, &xbnew, count);
+ xb_add_32(error, &xbnew, -1);
+ while (!error && (count-- > 0))
+ xb_copy_32(error, &xb, &xbnew, val);
+ }
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST))
xb_copy_32(error, &xb, &xbnew, val);
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE))
nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags)
{
struct nfsreq *req, *treq;
- struct nfs_reqqhead iodq;
+ struct nfs_reqqhead iodq, resendq;
struct timespec ts = { 1, 0 };
struct nfs_open_owner *noop, *nextnoop;
nfsnode_t np;
nfs4_mount_callback_shutdown(nmp);
/* Destroy any RPCSEC_GSS contexts */
- if (!TAILQ_EMPTY(&nmp->nm_gsscl))
- nfs_gss_clnt_ctx_unmount(nmp);
+ nfs_gss_clnt_ctx_unmount(nmp);
/* mark the socket for termination */
lck_mtx_lock(&nmp->nm_lock);
}
/*
- * Loop through outstanding request list and remove dangling
- * references to defunct nfsmount struct
+ * Be sure all requests for this mount are completed
+ * and removed from the resend queue.
+ */
+ TAILQ_INIT(&resendq);
+ lck_mtx_lock(nfs_request_mutex);
+ TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
+ if (req->r_nmp == nmp) {
+ lck_mtx_lock(&req->r_mtx);
+ if (!req->r_error && req->r_nmrep.nmc_mhead == NULL)
+ req->r_error = EIO;
+ if (req->r_flags & R_RESENDQ) {
+ lck_mtx_lock(&nmp->nm_lock);
+ req->r_flags &= ~R_RESENDQ;
+ if (req->r_rchain.tqe_next != NFSREQNOLIST) {
+ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ /*
+ * Queue up the request so that we can unreference them
+ * with out holding nfs_request_mutex
+ */
+ TAILQ_INSERT_TAIL(&resendq, req, r_rchain);
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+ wakeup(req);
+ lck_mtx_unlock(&req->r_mtx);
+ }
+ }
+ lck_mtx_unlock(nfs_request_mutex);
+
+ /* Since we've drop the request mutex we can now safely unreference the request */
+ TAILQ_FOREACH_SAFE(req, &resendq, r_rchain, treq) {
+ TAILQ_REMOVE(&resendq, req, r_rchain);
+ nfs_request_rele(req);
+ }
+
+ /*
+ * Now handle and outstanding async requests. We need to walk the
+ * request queue again this time with the nfsiod_mutex held. No
+ * other iods can grab our requests until we've put them on our own
+ * local iod queue for processing.
*/
TAILQ_INIT(&iodq);
lck_mtx_lock(nfs_request_mutex);
+ lck_mtx_lock(nfsiod_mutex);
TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
if (req->r_nmp == nmp) {
- if (req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT)) {
- /* async I/O RPC needs to be finished */
- lck_mtx_lock(nfsiod_mutex);
+ lck_mtx_lock(&req->r_mtx);
+ if (req->r_callback.rcb_func
+ && !(req->r_flags & R_WAITSENT) && !(req->r_flags & R_IOD)) {
+ /*
+ * Since R_IOD is not set then we need to handle it. If
+ * we're not on a list add it to our iod queue. Otherwise
+ * we must already be on nm_iodq which is added to our
+ * local queue below.
+ * %%% We should really keep a back pointer to our iod queue
+ * that we're on.
+ */
+ req->r_flags |= R_IOD;
if (req->r_achain.tqe_next == NFSREQNOLIST) {
TAILQ_INSERT_TAIL(&iodq, req, r_achain);
}
- lck_mtx_unlock(nfsiod_mutex);
}
- wakeup(req);
+ lck_mtx_unlock(&req->r_mtx);
}
}
- lck_mtx_unlock(nfs_request_mutex);
/* finish any async I/O RPCs queued up */
- lck_mtx_lock(nfsiod_mutex);
if (nmp->nm_iodlink.tqe_next != NFSNOLIST)
TAILQ_REMOVE(&nfsiodmounts, nmp, nm_iodlink);
TAILQ_CONCAT(&iodq, &nmp->nm_iodq, r_achain);
lck_mtx_unlock(nfsiod_mutex);
+ lck_mtx_unlock(nfs_request_mutex);
+
TAILQ_FOREACH_SAFE(req, &iodq, r_achain, treq) {
TAILQ_REMOVE(&iodq, req, r_achain);
- lck_mtx_lock(nfsiod_mutex);
- req->r_achain.tqe_next = NFSIODCOMPLETING;
- lck_mtx_unlock(nfsiod_mutex);
+ req->r_achain.tqe_next = NFSREQNOLIST;
lck_mtx_lock(&req->r_mtx);
- req->r_error = ENXIO;
docallback = !(req->r_flags & R_WAITSENT);
lck_mtx_unlock(&req->r_mtx);
if (docallback)
NFS_VFS_DBG("Unmounting %s from %s\n",
vfs_statfs(nmp->nm_mountp)->f_mntfromname,
vfs_statfs(nmp->nm_mountp)->f_mntonname);
- NFS_VFS_DBG("nfs state = %x\n", nmp->nm_state);
- NFS_VFS_DBG("nfs socket flags = %x\n", nmp->nm_sockflags);
+ NFS_VFS_DBG("nfs state = 0x%8.8x\n", nmp->nm_state);
+ NFS_VFS_DBG("nfs socket flags = 0x%8.8x\n", nmp->nm_sockflags);
NFS_VFS_DBG("nfs mount ref count is %d\n", nmp->nm_ref);
NFS_VFS_DBG("mount ref count is %d\n", nmp->nm_mountp->mnt_count);
lck_mtx_lock(&nmp->nm_lock);
if (nmp->nm_ref)
- panic("Some one has grabbed a ref %d\n", nmp->nm_ref);
+ panic("Some one has grabbed a ref %d state flags = 0x%8.8x\n", nmp->nm_ref, nmp->nm_state);
if (nmp->nm_saddr)
FREE(nmp->nm_saddr, M_SONAME);
}
#else
+static int
+nfs_sa_getport(struct sockaddr *sa, int *error)
+{
+ int port = 0;
+
+ if (sa->sa_family == AF_INET6)
+ port = ntohs(((struct sockaddr_in6*)sa)->sin6_port);
+ else if (sa->sa_family == AF_INET)
+ port = ntohs(((struct sockaddr_in*)sa)->sin_port);
+ else if (error)
+ *error = EIO;
+
+ return port;
+}
+
+static void
+nfs_sa_setport(struct sockaddr *sa, int port)
+{
+ if (sa->sa_family == AF_INET6)
+ ((struct sockaddr_in6*)sa)->sin6_port = htons(port);
+ else if (sa->sa_family == AF_INET)
+ ((struct sockaddr_in*)sa)->sin_port = htons(port);
+}
+
int
nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb)
{
uint32_t val = 0, bsize = 0;
struct sockaddr *rqsaddr;
struct timeval now;
+ struct timespec ts = { 1, 0 };
if (!nmp->nm_saddr)
return (ENXIO);
if (NMFLAG(nmp, NOQUOTA))
return (ENOTSUP);
- if (!nmp->nm_rqsaddr)
- MALLOC(nmp->nm_rqsaddr, struct sockaddr *, sizeof(struct sockaddr_storage), M_SONAME, M_WAITOK|M_ZERO);
- if (!nmp->nm_rqsaddr)
- return (ENOMEM);
- rqsaddr = nmp->nm_rqsaddr;
- if (rqsaddr->sa_family == AF_INET6)
- rqport = ntohs(((struct sockaddr_in6*)rqsaddr)->sin6_port);
- else if (rqsaddr->sa_family == AF_INET)
- rqport = ntohs(((struct sockaddr_in*)rqsaddr)->sin_port);
+ /*
+ * Allocate an address for rquotad if needed
+ */
+ if (!nmp->nm_rqsaddr) {
+ int need_free = 0;
+
+ MALLOC(rqsaddr, struct sockaddr *, sizeof(struct sockaddr_storage), M_SONAME, M_WAITOK|M_ZERO);
+ bcopy(nmp->nm_saddr, rqsaddr, min(sizeof(struct sockaddr_storage), nmp->nm_saddr->sa_len));
+ /* Set the port to zero, will call rpcbind to get the port below */
+ nfs_sa_setport(rqsaddr, 0);
+ microuptime(&now);
+
+ lck_mtx_lock(&nmp->nm_lock);
+ if (!nmp->nm_rqsaddr) {
+ nmp->nm_rqsaddr = rqsaddr;
+ nmp->nm_rqsaddrstamp = now.tv_sec;
+ } else {
+ need_free = 1;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (need_free)
+ FREE(rqsaddr, M_SONAME);
+ }
timeo = NMFLAG(nmp, SOFT) ? 10 : 60;
rqproto = IPPROTO_UDP; /* XXX should prefer TCP if mount is TCP */
/* check if we have a recently cached rquota port */
microuptime(&now);
- if (!rqport || ((nmp->nm_rqsaddrstamp + 60) >= (uint32_t)now.tv_sec)) {
+ lck_mtx_lock(&nmp->nm_lock);
+ rqsaddr = nmp->nm_rqsaddr;
+ rqport = nfs_sa_getport(rqsaddr, &error);
+ while (!error && (!rqport || ((nmp->nm_rqsaddrstamp + 60) <= (uint32_t)now.tv_sec))) {
+ error = nfs_sigintr(nmp, NULL, thd, 1);
+ if (error) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (error);
+ }
+ if (nmp->nm_state & NFSSTA_RQUOTAINPROG) {
+ nmp->nm_state |= NFSSTA_WANTRQUOTA;
+ msleep(&nmp->nm_rqsaddr, &nmp->nm_lock, PZERO-1, "nfswaitrquotaaddr", &ts);
+ rqport = nfs_sa_getport(rqsaddr, &error);
+ continue;
+ }
+ nmp->nm_state |= NFSSTA_RQUOTAINPROG;
+ lck_mtx_unlock(&nmp->nm_lock);
+
/* send portmap request to get rquota port */
- bcopy(nmp->nm_saddr, rqsaddr, min(sizeof(struct sockaddr_storage), nmp->nm_saddr->sa_len));
error = nfs_portmap_lookup(nmp, ctx, rqsaddr, NULL, RPCPROG_RQUOTA, rqvers, rqproto, timeo);
if (error)
- return (error);
- if (rqsaddr->sa_family == AF_INET6)
- rqport = ntohs(((struct sockaddr_in6*)rqsaddr)->sin6_port);
- else if (rqsaddr->sa_family == AF_INET)
- rqport = ntohs(((struct sockaddr_in*)rqsaddr)->sin_port);
- else
- return (EIO);
- if (!rqport)
- return (ENOTSUP);
+ goto out;
+ rqport = nfs_sa_getport(rqsaddr, &error);
+ if (error)
+ goto out;
+
+ if (!rqport) {
+ /*
+ * We overload PMAPPORT for the port if rquotad is not
+ * currently registered or up at the server. In the
+ * while loop above, port will be set and we will defer
+ * for a bit. Perhaps the service isn't online yet.
+ *
+ * Note that precludes using indirect, but we're not doing
+ * that here.
+ */
+ rqport = PMAPPORT;
+ nfs_sa_setport(rqsaddr, rqport);
+ }
microuptime(&now);
nmp->nm_rqsaddrstamp = now.tv_sec;
+ out:
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_state &= ~NFSSTA_RQUOTAINPROG;
+ if (nmp->nm_state & NFSSTA_WANTRQUOTA) {
+ nmp->nm_state &= ~NFSSTA_WANTRQUOTA;
+ wakeup(&nmp->nm_rqsaddr);
+ }
}
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (error)
+ return (error);
+
+ /* Using PMAPPORT for unavailabe rquota service */
+ if (rqport == PMAPPORT)
+ return (ENOTSUP);
/* rquota request */
nfsm_chain_null(&nmreq);
// PUTFH + GETATTR
numops = 2;
nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
- nfsm_chain_add_compound_header(error, &nmreq, "quota", numops);
+ nfsm_chain_add_compound_header(error, &nmreq, "quota", nmp->nm_minor_vers, numops);
numops--;
nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX);
NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCK_MODE);
NFS_BITMAP_SET(mattrs, NFS_MATTR_SECURITY);
+ if (nmp->nm_etype.selected < nmp->nm_etype.count)
+ NFS_BITMAP_SET(mattrs, NFS_MATTR_KERB_ETYPE);
NFS_BITMAP_SET(mattrs, NFS_MATTR_MAX_GROUP_LIST);
NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE);
NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT);
if (nmp->nm_vers >= NFS_VER4) {
NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_EPHEMERAL);
NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCALLBACK);
- NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONAMEDATTR);
+ NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NAMEDATTR);
NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOACL);
NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_ACLONLY);
}
NFS_BITMAP_SET(mflags, NFS_MFLAG_EPHEMERAL);
if (NMFLAG(nmp, NOCALLBACK))
NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCALLBACK);
- if (NMFLAG(nmp, NONAMEDATTR))
- NFS_BITMAP_SET(mflags, NFS_MFLAG_NONAMEDATTR);
+ if (NMFLAG(nmp, NAMEDATTR))
+ NFS_BITMAP_SET(mflags, NFS_MFLAG_NAMEDATTR);
if (NMFLAG(nmp, NOACL))
NFS_BITMAP_SET(mflags, NFS_MFLAG_NOACL);
if (NMFLAG(nmp, ACLONLY))
xb_add_bitmap(error, &xbinfo, mflags, NFS_MFLAG_BITMAP_LEN);
xb_add_32(error, &xbinfo, nmp->nm_vers); /* NFS_VERSION */
if (nmp->nm_vers >= NFS_VER4)
- xb_add_32(error, &xbinfo, 0); /* NFS_MINOR_VERSION */
+ xb_add_32(error, &xbinfo, nmp->nm_minor_vers); /* NFS_MINOR_VERSION */
xb_add_32(error, &xbinfo, nmp->nm_rsize); /* READ_SIZE */
xb_add_32(error, &xbinfo, nmp->nm_wsize); /* WRITE_SIZE */
xb_add_32(error, &xbinfo, nmp->nm_readdirsize); /* READDIR_SIZE */
xb_add_32(error, &xbinfo, 1); /* SECURITY */
xb_add_32(error, &xbinfo, nmp->nm_auth);
}
+ if (nmp->nm_etype.selected < nmp->nm_etype.count) {
+ xb_add_32(error, &xbinfo, nmp->nm_etype.count);
+ xb_add_32(error, &xbinfo, nmp->nm_etype.selected);
+ for (uint32_t j=0; j < nmp->nm_etype.count; j++)
+ xb_add_32(error, &xbinfo, nmp->nm_etype.etypes[j]);
+ nfsmerr_if(error);
+ }
xb_add_32(error, &xbinfo, nmp->nm_numgrps); /* MAX_GROUP_LIST */
nfsmerr_if(error);
snprintf(sotype, sizeof(sotype), "%s%s", (nmp->nm_sotype == SOCK_DGRAM) ? "udp" : "tcp",
user_addr_t newp, size_t newlen, vfs_context_t ctx)
{
int error = 0, val;
+#ifndef CONFIG_EMBEDDED
int softnobrowse;
+#endif
struct sysctl_req *req = NULL;
union union_vfsidctl vc;
mount_t mp;
struct nfs_exportfs *nxfs;
struct nfs_export *nx;
struct nfs_active_user_list *ulist;
- struct nfs_export_stat_desc stat_desc;
+ struct nfs_export_stat_desc stat_desc = {};
struct nfs_export_stat_rec statrec;
struct nfs_user_stat_node *unode, *unode_next;
- struct nfs_user_stat_desc ustat_desc;
+ struct nfs_user_stat_desc ustat_desc = {};
struct nfs_user_stat_user_rec ustat_rec;
struct nfs_user_stat_path_rec upath_rec;
uint bytes_avail, bytes_total, recs_copied;
case VFS_CTL_TIMEO:
case VFS_CTL_NOLOCKS:
case VFS_CTL_NSTATUS:
+#ifndef CONFIG_EMBEDDED
case VFS_CTL_QUERY:
+#endif
req = CAST_DOWN(struct sysctl_req *, oldp);
if (req == NULL) {
return EFAULT;
req->newlen = vc.vc32.vc_len;
}
break;
+#if CONFIG_EMBEDDED
+ case VFS_CTL_QUERY:
+ return EPERM;
+#endif
}
switch(name[0]) {
break;
/* build exported filesystem path */
+ memset(statrec.path, 0, sizeof(statrec.path));
snprintf(statrec.path, sizeof(statrec.path), "%s%s%s",
nxfs->nxfs_path, ((nxfs->nxfs_path[1] && nx->nx_path[0]) ? "/" : ""),
nx->nx_path);
LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
/* copy out path */
if (bytes_avail >= sizeof(struct nfs_user_stat_path_rec)) {
+ memset(upath_rec.path, 0, sizeof(upath_rec.path));
snprintf(upath_rec.path, sizeof(upath_rec.path), "%s%s%s",
nxfs->nxfs_path, ((nxfs->nxfs_path[1] && nx->nx_path[0]) ? "/" : ""),
nx->nx_path);
if (bytes_avail >= sizeof(struct nfs_user_stat_user_rec)) {
/* prepare a user stat rec for copying out */
ustat_rec.uid = unode->uid;
+ memset(&ustat_rec.sock, 0, sizeof(ustat_rec.sock));
bcopy(&unode->sock, &ustat_rec.sock, unode->sock.ss_len);
ustat_rec.ops = unode->ops;
ustat_rec.bytes_read = unode->bytes_read;
lck_mtx_unlock(&nmp->nm_lock);
}
break;
+#ifndef CONFIG_EMBEDDED
case VFS_CTL_QUERY:
lck_mtx_lock(&nmp->nm_lock);
/* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
lck_mtx_unlock(&nmp->nm_lock);
error = SYSCTL_OUT(req, &vq, sizeof(vq));
break;
+#endif
case VFS_CTL_TIMEO:
if (req->oldptr != USER_ADDR_NULL) {
lck_mtx_lock(&nmp->nm_lock);