/*
- * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
uint32_t nfs_lock_owner_seqnum = 0;
thread_call_t nfs4_callback_timer_call;
int nfs4_callback_timer_on = 0;
+char nfs4_default_domain[MAXPATHLEN];
/* nfsiod */
lck_grp_t *nfsiod_lck_grp;
int nfs_vfs_init(struct vfsconf *);
int nfs_vfs_sysctl(int *, u_int, user_addr_t, size_t *, user_addr_t, size_t, vfs_context_t);
-struct vfsops nfs_vfsops = {
- nfs_vfs_mount,
- nfs_vfs_start,
- nfs_vfs_unmount,
- nfs_vfs_root,
- nfs_vfs_quotactl,
- nfs_vfs_getattr,
- nfs_vfs_sync,
- nfs_vfs_vget,
- nfs_vfs_fhtovp,
- nfs_vfs_vptofh,
- nfs_vfs_init,
- nfs_vfs_sysctl,
- NULL, /* setattr */
- { NULL, /* reserved */
- NULL, /* reserved */
- NULL, /* reserved */
- NULL, /* reserved */
- NULL, /* reserved */
- NULL, /* reserved */
- NULL } /* reserved */
+const struct vfsops nfs_vfsops = {
+ .vfs_mount = nfs_vfs_mount,
+ .vfs_start = nfs_vfs_start,
+ .vfs_unmount = nfs_vfs_unmount,
+ .vfs_root = nfs_vfs_root,
+ .vfs_quotactl = nfs_vfs_quotactl,
+ .vfs_getattr = nfs_vfs_getattr,
+ .vfs_sync = nfs_vfs_sync,
+ .vfs_vget = nfs_vfs_vget,
+ .vfs_fhtovp = nfs_vfs_fhtovp,
+ .vfs_vptofh = nfs_vfs_vptofh,
+ .vfs_init = nfs_vfs_init,
+ .vfs_sysctl = nfs_vfs_sysctl,
+ // We do not support the remaining VFS ops
};
int nfs4_getquota(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *);
#endif
-struct nfs_funcs nfs3_funcs = {
+const struct nfs_funcs nfs3_funcs = {
nfs3_mount,
nfs3_update_statfs,
nfs3_getquota,
nfs3_unlock_rpc,
nfs3_getlock_rpc
};
-struct nfs_funcs nfs4_funcs = {
+const struct nfs_funcs nfs4_funcs = {
nfs4_mount,
nfs4_update_statfs,
nfs4_getquota,
return (error);
}
+/*
+ * Return an NFS volume name from the mntfrom name.
+ */
+static void
+nfs_get_volname(struct mount *mp, char *volname, size_t len)
+{
+ const char *ptr, *cptr;
+ const char *mntfrom = mp->mnt_vfsstat.f_mntfromname;
+ size_t mflen = strnlen(mntfrom, MAXPATHLEN+1);
+
+ if (mflen > MAXPATHLEN || mflen == 0) {
+ strlcpy(volname, "Bad volname", len);
+ return;
+ }
+
+ /* Move back over trailing slashes */
+ for (ptr = &mntfrom[mflen-1]; ptr != mntfrom && *ptr == '/'; ptr--) {
+ mflen--;
+ }
+
+ /* Find first character after the last slash */
+ cptr = ptr = NULL;
+ for(size_t i = 0; i < mflen; i++) {
+ if (mntfrom[i] == '/')
+ ptr = &mntfrom[i+1];
+ /* And the first character after the first colon */
+ else if (cptr == NULL && mntfrom[i] == ':')
+ cptr = &mntfrom[i+1];
+ }
+
+ /*
+ * No slash or nothing after the last slash
+ * use everything past the first colon
+ */
+ if (ptr == NULL || *ptr == '\0')
+ ptr = cptr;
+ /* Otherwise use the mntfrom name */
+ if (ptr == NULL)
+ ptr = mntfrom;
+
+ mflen = &mntfrom[mflen] - ptr;
+ len = mflen+1 < len ? mflen+1 : len;
+
+ strlcpy(volname, ptr, len);
+}
/*
* The NFS VFS_GETATTR function: "statfs"-type information is retrieved
lck_mtx_unlock(&nmp->nm_lock);
}
+ if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
+ /*%%% IF fail over support is implemented we may need to take nm_lock */
+ nfs_get_volname(mp, fsap->f_vol_name, MAXPATHLEN);
+ VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
+ }
if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
u_int32_t caps, valid;
nfsnode_t np = nmp->nm_dnp;
// caps |= VOL_CAP_FMT_OPENDENYMODES;
// valid |= VOL_CAP_FMT_OPENDENYMODES;
}
+ // no version of nfs supports immutable files
+ caps |= VOL_CAP_FMT_NO_IMMUTABLE_FILES;
+ valid |= VOL_CAP_FMT_NO_IMMUTABLE_FILES;
+
fsap->f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] =
// VOL_CAP_FMT_PERSISTENTOBJECTIDS |
// VOL_CAP_FMT_SYMBOLICLINKS |
if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
fsap->f_attributes.validattr.commonattr = 0;
fsap->f_attributes.validattr.volattr =
- ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
+ ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
fsap->f_attributes.validattr.dirattr = 0;
fsap->f_attributes.validattr.fileattr = 0;
fsap->f_attributes.validattr.forkattr = 0;
fsap->f_attributes.nativeattr.commonattr = 0;
fsap->f_attributes.nativeattr.volattr =
- ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
+ ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
fsap->f_attributes.nativeattr.dirattr = 0;
fsap->f_attributes.nativeattr.fileattr = 0;
fsap->f_attributes.nativeattr.forkattr = 0;
mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
mp->mnt_ioflags = 0;
mp->mnt_realrootvp = NULLVP;
- mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
+ mp->mnt_authcache_ttl = 0; /* Allways go to our lookup */
mount_lock_init(mp);
TAILQ_INIT(&mp->mnt_vnodelist);
NFS_CLEAR_ATTRIBUTES(bitmap);
NFS4_DEFAULT_ATTRIBUTES(bitmap);
/* if no namedattr support or component is ".zfs", clear NFS_FATTR_NAMED_ATTR */
- if (NMFLAG(nmp, NONAMEDATTR) || !strcmp(fspath.np_components[comp], ".zfs"))
+ if (!NMFLAG(nmp, NAMEDATTR) || !strcmp(fspath.np_components[comp], ".zfs"))
NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
nfsm_chain_build_done(error, &nmreq);
nfsmout_if(error);
nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
nfsm_chain_get_32(error, &nmrep, fh.fh_len);
+ if (fh.fh_len > sizeof(fh.fh_data))
+ error = EBADRPC;
+ nfsmout_if(error);
nfsm_chain_get_opaque(error, &nmrep, fh.fh_len, fh.fh_data);
nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
if (!error) {
gotfh:
/* get attrs for mount point root */
- numops = NMFLAG(nmp, NONAMEDATTR) ? 2 : 3; // PUTFH + GETATTR + OPENATTR
+ numops = NMFLAG(nmp, NAMEDATTR) ? 3 : 2; // PUTFH + GETATTR + OPENATTR
nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED);
nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
numops--;
NFS_CLEAR_ATTRIBUTES(bitmap);
NFS4_DEFAULT_ATTRIBUTES(bitmap);
/* if no namedattr support or last component is ".zfs", clear NFS_FATTR_NAMED_ATTR */
- if (NMFLAG(nmp, NONAMEDATTR) || ((fspath.np_compcount > 0) && !strcmp(fspath.np_components[fspath.np_compcount-1], ".zfs")))
+ if (!NMFLAG(nmp, NAMEDATTR) || ((fspath.np_compcount > 0) && !strcmp(fspath.np_components[fspath.np_compcount-1], ".zfs")))
NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
- if (!NMFLAG(nmp, NONAMEDATTR)) {
+ if (NMFLAG(nmp, NAMEDATTR)) {
numops--;
nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
nfsm_chain_add_32(error, &nmreq, 0);
NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, NULL);
nfsmout_if(error);
- if (!NMFLAG(nmp, NONAMEDATTR)) {
+ if (NMFLAG(nmp, NAMEDATTR)) {
nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
if (error == ENOENT)
error = 0;
uint32_t *mflags;
uint32_t argslength, attrslength;
struct nfs_location_index firstloc = { NLI_VALID, 0, 0, 0 };
-
+ static const struct nfs_etype nfs_default_etypes = {
+ .count = NFS_MAX_ETYPES,
+ .selected = NFS_MAX_ETYPES,
+ .etypes = { NFS_AES256_CTS_HMAC_SHA1_96,
+ NFS_AES128_CTS_HMAC_SHA1_96,
+ NFS_DES3_CBC_SHA1_KD
+ }
+ };
/* make sure mbuf constants are set up */
if (!nfs_mbuf_mhlen)
nfs_mbuf_init();
vfs_getnewfsid(mp);
nmp->nm_mountp = mp;
vfs_setauthopaque(mp);
+ /*
+ * Disable cache_lookup_path for NFS. NFS lookup always needs
+ * to be called to check if the directory attribute cache is
+ * valid and possibly purge the directory before calling
+ * cache_lookup.
+ */
+ vfs_setauthcache_ttl(mp, 0);
nfs_nhinit_finish();
nmp->nm_acregmax = NFS_MAXATTRTIMO;
nmp->nm_acdirmin = NFS_MINDIRATTRTIMO;
nmp->nm_acdirmax = NFS_MAXDIRATTRTIMO;
+ nmp->nm_etype = nfs_default_etypes;
nmp->nm_auth = RPCAUTH_SYS;
nmp->nm_iodlink.tqe_next = NFSNOLIST;
nmp->nm_deadtimeout = 0;
xb_get_32(error, &xb, val); /* version */
xb_get_32(error, &xb, argslength); /* args length */
xb_get_32(error, &xb, val); /* XDR args version */
- if (val != NFS_XDRARGS_VERSION_0)
+ if (val != NFS_XDRARGS_VERSION_0 || argslength < ((4 + NFS_MATTR_BITMAP_LEN + 1) * XDRWORD)) {
error = EINVAL;
+ }
len = NFS_MATTR_BITMAP_LEN;
xb_get_bitmap(error, &xb, mattrs, len); /* mount attribute bitmap */
attrslength = 0;
/* start with the first flavor */
nmp->nm_auth = nmp->nm_sec.flavors[0];
}
+ if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_KERB_ETYPE)) {
+ uint32_t etypecnt;
+ xb_get_32(error, &xb, etypecnt);
+ if (!error && ((etypecnt < 1) || (etypecnt > NFS_MAX_ETYPES)))
+ error = EINVAL;
+ nfsmerr_if(error);
+ nmp->nm_etype.count = etypecnt;
+ xb_get_32(error, &xb, nmp->nm_etype.selected);
+ nfsmerr_if(error);
+ if (etypecnt) {
+ nmp->nm_etype.selected = etypecnt; /* Nothing is selected yet, so set selected to count */
+ for (i=0; i < etypecnt; i++) {
+ xb_get_32(error, &xb, nmp->nm_etype.etypes[i]);
+ /* Check for valid encryption type */
+ switch (nmp->nm_etype.etypes[i]) {
+ case NFS_DES3_CBC_SHA1_KD:
+ case NFS_AES128_CTS_HMAC_SHA1_96:
+ case NFS_AES256_CTS_HMAC_SHA1_96:
+ break;
+ default:
+ error = EINVAL;
+ }
+ }
+ }
+ }
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST))
xb_get_32(error, &xb, nmp->nm_numgrps);
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) {
error = ENOMEM;
xb_get_32(error, &xb, nmp->nm_fh->fh_len);
nfsmerr_if(error);
- if (nmp->nm_fh->fh_len < 0 ||
- (size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data))
+ if ((size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data))
error = EINVAL;
else
error = xb_get_bytes(&xb, (char*)&nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len, 0);
} else {
/* ignore these if not v4 */
NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOCALLBACK);
- NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NONAMEDATTR);
+ NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NAMEDATTR);
NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOACL);
NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_ACLONLY);
}
* buffers into multiple requests if the buffer size is
* larger than the I/O size.
*/
+#ifndef CONFIG_EMBEDDED
iosize = max(nmp->nm_rsize, nmp->nm_wsize);
if (iosize < PAGE_SIZE)
iosize = PAGE_SIZE;
+#else
+ iosize = PAGE_SIZE;
+#endif
nmp->nm_biosize = trunc_page_32(iosize);
/* For NFSv3 and greater, there is a (relatively) reliable ACCESS call. */
lck_mtx_unlock(&nmp->nm_lock);
return (0);
nfsmerr:
- nfs_mount_cleanup(nmp);
+ nfs_mount_drain_and_cleanup(nmp);
return (error);
}
while (!error && (count-- > 0))
xb_copy_32(error, &xb, &xbnew, val);
}
+ if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_KERB_ETYPE)) {
+ xb_copy_32(error, &xb, &xbnew, count);
+ xb_add_32(error, &xbnew, -1);
+ while (!error && (count-- > 0))
+ xb_copy_32(error, &xb, &xbnew, val);
+ }
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST))
xb_copy_32(error, &xb, &xbnew, val);
if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE))
if ((nmp->nm_vers >= NFS_VER4) && nmp->nm_renew_timer) {
thread_call_cancel(nmp->nm_renew_timer);
thread_call_free(nmp->nm_renew_timer);
+ nmp->nm_renew_timer = NULL;
}
lck_mtx_unlock(&nmp->nm_lock);
if (nmp->nm_longid->nci_id)
FREE(nmp->nm_longid->nci_id, M_TEMP);
FREE(nmp->nm_longid, M_TEMP);
+ nmp->nm_longid = NULL;
lck_mtx_unlock(nfs_global_mutex);
}
/* Since we've drop the request mutex we can now safely unreference the request */
TAILQ_FOREACH_SAFE(req, &resendq, r_rchain, treq) {
TAILQ_REMOVE(&resendq, req, r_rchain);
+ /* Make sure we don't try and remove again in nfs_request_destroy */
+ req->r_rchain.tqe_next = NFSREQNOLIST;
nfs_request_rele(req);
}
NFS_VFS_DBG("Unmounting %s from %s\n",
vfs_statfs(nmp->nm_mountp)->f_mntfromname,
vfs_statfs(nmp->nm_mountp)->f_mntonname);
- NFS_VFS_DBG("nfs state = %x\n", nmp->nm_state);
- NFS_VFS_DBG("nfs socket flags = %x\n", nmp->nm_sockflags);
+ NFS_VFS_DBG("nfs state = 0x%8.8x\n", nmp->nm_state);
+ NFS_VFS_DBG("nfs socket flags = 0x%8.8x\n", nmp->nm_sockflags);
NFS_VFS_DBG("nfs mount ref count is %d\n", nmp->nm_ref);
NFS_VFS_DBG("mount ref count is %d\n", nmp->nm_mountp->mnt_count);
lck_mtx_lock(&nmp->nm_lock);
if (nmp->nm_ref)
- panic("Some one has grabbed a ref %d\n", nmp->nm_ref);
+ panic("Some one has grabbed a ref %d state flags = 0x%8.8x\n", nmp->nm_ref, nmp->nm_state);
if (nmp->nm_saddr)
FREE(nmp->nm_saddr, M_SONAME);
}
#else
+static int
+nfs_sa_getport(struct sockaddr *sa, int *error)
+{
+ int port = 0;
+
+ if (sa->sa_family == AF_INET6)
+ port = ntohs(((struct sockaddr_in6*)sa)->sin6_port);
+ else if (sa->sa_family == AF_INET)
+ port = ntohs(((struct sockaddr_in*)sa)->sin_port);
+ else if (error)
+ *error = EIO;
+
+ return port;
+}
+
+static void
+nfs_sa_setport(struct sockaddr *sa, int port)
+{
+ if (sa->sa_family == AF_INET6)
+ ((struct sockaddr_in6*)sa)->sin6_port = htons(port);
+ else if (sa->sa_family == AF_INET)
+ ((struct sockaddr_in*)sa)->sin_port = htons(port);
+}
+
int
nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb)
{
uint32_t val = 0, bsize = 0;
struct sockaddr *rqsaddr;
struct timeval now;
+ struct timespec ts = { 1, 0 };
if (!nmp->nm_saddr)
return (ENXIO);
if (NMFLAG(nmp, NOQUOTA))
return (ENOTSUP);
- if (!nmp->nm_rqsaddr)
- MALLOC(nmp->nm_rqsaddr, struct sockaddr *, sizeof(struct sockaddr_storage), M_SONAME, M_WAITOK|M_ZERO);
- if (!nmp->nm_rqsaddr)
- return (ENOMEM);
- rqsaddr = nmp->nm_rqsaddr;
- if (rqsaddr->sa_family == AF_INET6)
- rqport = ntohs(((struct sockaddr_in6*)rqsaddr)->sin6_port);
- else if (rqsaddr->sa_family == AF_INET)
- rqport = ntohs(((struct sockaddr_in*)rqsaddr)->sin_port);
+ /*
+ * Allocate an address for rquotad if needed
+ */
+ if (!nmp->nm_rqsaddr) {
+ int need_free = 0;
+
+ MALLOC(rqsaddr, struct sockaddr *, sizeof(struct sockaddr_storage), M_SONAME, M_WAITOK|M_ZERO);
+ bcopy(nmp->nm_saddr, rqsaddr, min(sizeof(struct sockaddr_storage), nmp->nm_saddr->sa_len));
+ /* Set the port to zero, will call rpcbind to get the port below */
+ nfs_sa_setport(rqsaddr, 0);
+ microuptime(&now);
+
+ lck_mtx_lock(&nmp->nm_lock);
+ if (!nmp->nm_rqsaddr) {
+ nmp->nm_rqsaddr = rqsaddr;
+ nmp->nm_rqsaddrstamp = now.tv_sec;
+ } else {
+ need_free = 1;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (need_free)
+ FREE(rqsaddr, M_SONAME);
+ }
timeo = NMFLAG(nmp, SOFT) ? 10 : 60;
rqproto = IPPROTO_UDP; /* XXX should prefer TCP if mount is TCP */
/* check if we have a recently cached rquota port */
microuptime(&now);
- if (!rqport || ((nmp->nm_rqsaddrstamp + 60) >= (uint32_t)now.tv_sec)) {
+ lck_mtx_lock(&nmp->nm_lock);
+ rqsaddr = nmp->nm_rqsaddr;
+ rqport = nfs_sa_getport(rqsaddr, &error);
+ while (!error && (!rqport || ((nmp->nm_rqsaddrstamp + 60) <= (uint32_t)now.tv_sec))) {
+ error = nfs_sigintr(nmp, NULL, thd, 1);
+ if (error) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (error);
+ }
+ if (nmp->nm_state & NFSSTA_RQUOTAINPROG) {
+ nmp->nm_state |= NFSSTA_WANTRQUOTA;
+ msleep(&nmp->nm_rqsaddr, &nmp->nm_lock, PZERO-1, "nfswaitrquotaaddr", &ts);
+ rqport = nfs_sa_getport(rqsaddr, &error);
+ continue;
+ }
+ nmp->nm_state |= NFSSTA_RQUOTAINPROG;
+ lck_mtx_unlock(&nmp->nm_lock);
+
/* send portmap request to get rquota port */
- bcopy(nmp->nm_saddr, rqsaddr, min(sizeof(struct sockaddr_storage), nmp->nm_saddr->sa_len));
error = nfs_portmap_lookup(nmp, ctx, rqsaddr, NULL, RPCPROG_RQUOTA, rqvers, rqproto, timeo);
if (error)
- return (error);
- if (rqsaddr->sa_family == AF_INET6)
- rqport = ntohs(((struct sockaddr_in6*)rqsaddr)->sin6_port);
- else if (rqsaddr->sa_family == AF_INET)
- rqport = ntohs(((struct sockaddr_in*)rqsaddr)->sin_port);
- else
- return (EIO);
- if (!rqport)
- return (ENOTSUP);
+ goto out;
+ rqport = nfs_sa_getport(rqsaddr, &error);
+ if (error)
+ goto out;
+
+ if (!rqport) {
+ /*
+ * We overload PMAPPORT for the port if rquotad is not
+ * currently registered or up at the server. In the
+ * while loop above, port will be set and we will defer
+ * for a bit. Perhaps the service isn't online yet.
+ *
+ * Note that precludes using indirect, but we're not doing
+ * that here.
+ */
+ rqport = PMAPPORT;
+ nfs_sa_setport(rqsaddr, rqport);
+ }
microuptime(&now);
nmp->nm_rqsaddrstamp = now.tv_sec;
+ out:
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_state &= ~NFSSTA_RQUOTAINPROG;
+ if (nmp->nm_state & NFSSTA_WANTRQUOTA) {
+ nmp->nm_state &= ~NFSSTA_WANTRQUOTA;
+ wakeup(&nmp->nm_rqsaddr);
+ }
}
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (error)
+ return (error);
+
+ /* Using PMAPPORT for unavailabe rquota service */
+ if (rqport == PMAPPORT)
+ return (ENOTSUP);
/* rquota request */
nfsm_chain_null(&nmreq);
NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX);
NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCK_MODE);
NFS_BITMAP_SET(mattrs, NFS_MATTR_SECURITY);
+ if (nmp->nm_etype.selected < nmp->nm_etype.count)
+ NFS_BITMAP_SET(mattrs, NFS_MATTR_KERB_ETYPE);
NFS_BITMAP_SET(mattrs, NFS_MATTR_MAX_GROUP_LIST);
NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE);
NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT);
if (nmp->nm_vers >= NFS_VER4) {
NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_EPHEMERAL);
NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCALLBACK);
- NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONAMEDATTR);
+ NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NAMEDATTR);
NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOACL);
NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_ACLONLY);
}
NFS_BITMAP_SET(mflags, NFS_MFLAG_EPHEMERAL);
if (NMFLAG(nmp, NOCALLBACK))
NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCALLBACK);
- if (NMFLAG(nmp, NONAMEDATTR))
- NFS_BITMAP_SET(mflags, NFS_MFLAG_NONAMEDATTR);
+ if (NMFLAG(nmp, NAMEDATTR))
+ NFS_BITMAP_SET(mflags, NFS_MFLAG_NAMEDATTR);
if (NMFLAG(nmp, NOACL))
NFS_BITMAP_SET(mflags, NFS_MFLAG_NOACL);
if (NMFLAG(nmp, ACLONLY))
xb_add_32(error, &xbinfo, 1); /* SECURITY */
xb_add_32(error, &xbinfo, nmp->nm_auth);
}
+ if (nmp->nm_etype.selected < nmp->nm_etype.count) {
+ xb_add_32(error, &xbinfo, nmp->nm_etype.count);
+ xb_add_32(error, &xbinfo, nmp->nm_etype.selected);
+ for (uint32_t j=0; j < nmp->nm_etype.count; j++)
+ xb_add_32(error, &xbinfo, nmp->nm_etype.etypes[j]);
+ nfsmerr_if(error);
+ }
xb_add_32(error, &xbinfo, nmp->nm_numgrps); /* MAX_GROUP_LIST */
nfsmerr_if(error);
snprintf(sotype, sizeof(sotype), "%s%s", (nmp->nm_sotype == SOCK_DGRAM) ? "udp" : "tcp",
user_addr_t newp, size_t newlen, vfs_context_t ctx)
{
int error = 0, val;
+#ifndef CONFIG_EMBEDDED
int softnobrowse;
+#endif
struct sysctl_req *req = NULL;
union union_vfsidctl vc;
mount_t mp;
struct nfs_exportfs *nxfs;
struct nfs_export *nx;
struct nfs_active_user_list *ulist;
- struct nfs_export_stat_desc stat_desc;
+ struct nfs_export_stat_desc stat_desc = {};
struct nfs_export_stat_rec statrec;
struct nfs_user_stat_node *unode, *unode_next;
- struct nfs_user_stat_desc ustat_desc;
+ struct nfs_user_stat_desc ustat_desc = {};
struct nfs_user_stat_user_rec ustat_rec;
struct nfs_user_stat_path_rec upath_rec;
uint bytes_avail, bytes_total, recs_copied;
case VFS_CTL_TIMEO:
case VFS_CTL_NOLOCKS:
case VFS_CTL_NSTATUS:
+#ifndef CONFIG_EMBEDDED
case VFS_CTL_QUERY:
+#endif
req = CAST_DOWN(struct sysctl_req *, oldp);
if (req == NULL) {
return EFAULT;
req->newlen = vc.vc32.vc_len;
}
break;
+#if CONFIG_EMBEDDED
+ case VFS_CTL_QUERY:
+ return EPERM;
+#endif
}
switch(name[0]) {
break;
/* build exported filesystem path */
+ memset(statrec.path, 0, sizeof(statrec.path));
snprintf(statrec.path, sizeof(statrec.path), "%s%s%s",
nxfs->nxfs_path, ((nxfs->nxfs_path[1] && nx->nx_path[0]) ? "/" : ""),
nx->nx_path);
LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
/* copy out path */
if (bytes_avail >= sizeof(struct nfs_user_stat_path_rec)) {
+ memset(upath_rec.path, 0, sizeof(upath_rec.path));
snprintf(upath_rec.path, sizeof(upath_rec.path), "%s%s%s",
nxfs->nxfs_path, ((nxfs->nxfs_path[1] && nx->nx_path[0]) ? "/" : ""),
nx->nx_path);
if (bytes_avail >= sizeof(struct nfs_user_stat_user_rec)) {
/* prepare a user stat rec for copying out */
ustat_rec.uid = unode->uid;
+ memset(&ustat_rec.sock, 0, sizeof(ustat_rec.sock));
bcopy(&unode->sock, &ustat_rec.sock, unode->sock.ss_len);
ustat_rec.ops = unode->ops;
ustat_rec.bytes_read = unode->bytes_read;
lck_mtx_unlock(&nmp->nm_lock);
}
break;
+#ifndef CONFIG_EMBEDDED
case VFS_CTL_QUERY:
lck_mtx_lock(&nmp->nm_lock);
/* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
lck_mtx_unlock(&nmp->nm_lock);
error = SYSCTL_OUT(req, &vq, sizeof(vq));
break;
+#endif
case VFS_CTL_TIMEO:
if (req->oldptr != USER_ADDR_NULL) {
lck_mtx_lock(&nmp->nm_lock);