/*
- * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/tprintf.h>
#include <libkern/OSAtomic.h>
+#include <sys/reboot.h>
#include <sys/time.h>
#include <kern/clock.h>
#include <kern/task.h>
#define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__)
#define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFS_IS_DBG(NFS_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb))
+#ifndef SUN_LEN
+#define SUN_LEN(su) \
+ (sizeof(*(su)) - sizeof((su)->sun_path) + strnlen((su)->sun_path, sizeof((su)->sun_path)))
+#endif /* SUN_LEN */
+
/* XXX */
boolean_t current_thread_aborted(void);
kern_return_t thread_terminate(thread_t);
+ZONE_DECLARE(nfs_fhandle_zone, "fhandle", sizeof(struct fhandle), ZC_NONE);
+ZONE_DECLARE(nfs_req_zone, "NFS req", sizeof(struct nfsreq), ZC_NONE);
+ZONE_DECLARE(nfsrv_descript_zone, "NFSV3 srvdesc",
+ sizeof(struct nfsrv_descript), ZC_NONE);
+
#if CONFIG_NFS_SERVER
int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */
* Get the mntfromname (or path portion only) for a given location.
*/
void
-nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, int size, int pathonly)
+nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, size_t size, int pathonly)
{
struct nfs_fs_location *fsl = locs->nl_locations[idx.nli_loc];
char *p;
nfs_socket_create(
struct nfsmount *nmp,
struct sockaddr *sa,
- int sotype,
+ uint8_t sotype,
in_port_t port,
uint32_t protocol,
uint32_t vers,
switch (sa->sa_family) {
case AF_INET:
+ if (sa->sa_len != sizeof(struct sockaddr_in)) {
+ return EINVAL;
+ }
+ sinaddr = &((struct sockaddr_in*)sa)->sin_addr;
+ if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
+ strlcpy(naddr, "<unknown>", sizeof(naddr));
+ }
+ break;
case AF_INET6:
- if (sa->sa_family == AF_INET) {
- sinaddr = &((struct sockaddr_in*)sa)->sin_addr;
- } else {
- sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr;
+ if (sa->sa_len != sizeof(struct sockaddr_in6)) {
+ return EINVAL;
}
+ sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr;
if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
strlcpy(naddr, "<unknown>", sizeof(naddr));
}
break;
case AF_LOCAL:
+ if (sa->sa_len != sizeof(struct sockaddr_un) && sa->sa_len != SUN_LEN((struct sockaddr_un *)sa)) {
+ return EINVAL;
+ }
strlcpy(naddr, ((struct sockaddr_un *)sa)->sun_path, sizeof(naddr));
break;
default:
}
return ENOMEM;
}
- lck_mtx_init(&nso->nso_lock, nfs_request_grp, LCK_ATTR_NULL);
+ lck_mtx_init(&nso->nso_lock, &nfs_request_grp, LCK_ATTR_NULL);
nso->nso_sotype = sotype;
if (nso->nso_sotype == SOCK_STREAM) {
nfs_rpc_record_state_init(&nso->nso_rrs);
if (nso->nso_sotype == SOCK_STREAM) {
nfs_rpc_record_state_cleanup(&nso->nso_rrs);
}
- lck_mtx_destroy(&nso->nso_lock, nfs_request_grp);
+ lck_mtx_destroy(&nso->nso_lock, &nfs_request_grp);
if (nso->nso_saddr) {
FREE(nso->nso_saddr, M_SONAME);
}
* Soft mounts will want to abort sooner.
*/
struct timeval timeo;
- int on = 1, proto;
+ int on = 1, proto, reserve, error;
timeo.tv_usec = 0;
timeo.tv_sec = (NMFLAG(nmp, SOFT) || nfs_can_squish(nmp)) ? 5 : 60;
sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
}
}
- if (nso->nso_sotype == SOCK_DGRAM || nso->nso_saddr->sa_family == AF_LOCAL) { /* set socket buffer sizes for UDP */
- int reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : (2 * 1024 * 1024);
- sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
- sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
+
+ /* set socket buffer sizes for UDP/TCP */
+ reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_wsize * 2);
+ {
+ error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
+ }
+
+ if (error) {
+ log(LOG_INFO, "nfs_socket_options: error %d setting SO_SNDBUF to %u\n", error, reserve);
+ }
+
+ reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_rsize * 2);
+ error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
+ if (error) {
+ log(LOG_INFO, "nfs_socket_options: error %d setting SO_RCVBUF to %u\n", error, reserve);
}
+
/* set SO_NOADDRERR to detect network changes ASAP */
sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
/* just playin' it safe with upcalls */
uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM;
fhandle_t *fh = NULL;
char *path = NULL;
- in_port_t port;
+ in_port_t port = 0;
int addrtotal = 0;
/* paranoia... check that we have at least one address in the locations */
vfs_statfs(nmp->nm_mountp)->f_mntfromname);
}
if (fh) {
- FREE(fh, M_TEMP);
+ NFS_ZFREE(nfs_fhandle_zone, fh);
}
if (path) {
- FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+ NFS_ZFREE(ZV_NAMEI, path);
}
NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
}
}
}
+ if (!error) {
+ error = nfs3_check_lockmode(nmp, saddr, nso->nso_sotype, timeo);
+ if (error) {
+ nfs_socket_search_update_error(&nss, error);
+ nfs_socket_destroy(nso);
+ return error;
+ }
+ }
if (saddr) {
- MALLOC(fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK | M_ZERO);
+ fh = zalloc(nfs_fhandle_zone);
}
if (saddr && fh) {
- MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ path = zalloc(ZV_NAMEI);
}
if (!saddr || !fh || !path) {
if (!error) {
error = ENOMEM;
}
if (fh) {
- FREE(fh, M_TEMP);
+ NFS_ZFREE(nfs_fhandle_zone, fh);
}
if (path) {
- FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+ NFS_ZFREE(ZV_NAMEI, path);
}
- fh = NULL;
- path = NULL;
nfs_socket_search_update_error(&nss, error);
nfs_socket_destroy(nso);
goto keepsearching;
if (found && (nmp->nm_auth == RPCAUTH_NONE)) {
found = 0;
}
+ OS_FALLTHROUGH;
case RPCAUTH_NONE:
case RPCAUTH_KRB5:
case RPCAUTH_KRB5I:
}
error = !found ? EAUTH : 0;
}
- FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
- path = NULL;
+ NFS_ZFREE(ZV_NAMEI, path);
if (error) {
nfs_socket_search_update_error(&nss, error);
- FREE(fh, M_TEMP);
- fh = NULL;
+ NFS_ZFREE(nfs_fhandle_zone, fh);
nfs_socket_destroy(nso);
goto keepsearching;
}
if (nmp->nm_fh) {
- FREE(nmp->nm_fh, M_TEMP);
+ NFS_ZFREE(nfs_fhandle_zone, nmp->nm_fh);
}
nmp->nm_fh = fh;
fh = NULL;
nmp->nm_nss = NULL;
nfs_socket_search_cleanup(&nss);
if (fh) {
- FREE(fh, M_TEMP);
+ NFS_ZFREE(nfs_fhandle_zone, fh);
}
if (path) {
- FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+ NFS_ZFREE(ZV_NAMEI, path);
}
NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
return 0;
* as needing a resend. (Though nfs_need_reconnect() probably
* marked them all already.)
*/
- lck_mtx_lock(nfs_request_mutex);
+ lck_mtx_lock(&nfs_request_mutex);
TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
if (rq->r_nmp == nmp) {
lck_mtx_lock(&rq->r_mtx);
lck_mtx_unlock(&rq->r_mtx);
}
}
- lck_mtx_unlock(nfs_request_mutex);
+ lck_mtx_unlock(&nfs_request_mutex);
return 0;
}
* Loop through outstanding request list and
* mark all requests as needing a resend.
*/
- lck_mtx_lock(nfs_request_mutex);
+ lck_mtx_lock(&nfs_request_mutex);
TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
if (rq->r_nmp == nmp) {
lck_mtx_lock(&rq->r_mtx);
lck_mtx_unlock(&rq->r_mtx);
}
}
- lck_mtx_unlock(nfs_request_mutex);
+ lck_mtx_unlock(&nfs_request_mutex);
}
if (!req) {
break;
}
+ /* acquire both locks in the right order: first req->r_mtx and then nmp->nm_lock */
+ lck_mtx_unlock(&nmp->nm_lock);
+ lck_mtx_lock(&req->r_mtx);
+ lck_mtx_lock(&nmp->nm_lock);
+ if ((req->r_flags & R_RESENDQ) == 0 || (req->r_rchain.tqe_next == NFSREQNOLIST)) {
+ lck_mtx_unlock(&req->r_mtx);
+ continue;
+ }
TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ req->r_flags &= ~R_RESENDQ;
req->r_rchain.tqe_next = NFSREQNOLIST;
lck_mtx_unlock(&nmp->nm_lock);
- lck_mtx_lock(&req->r_mtx);
/* Note that we have a reference on the request that was taken nfs_asyncio_resend */
if (req->r_error || req->r_nmrep.nmc_mhead) {
dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
- req->r_flags &= ~R_RESENDQ;
wakeup(req);
lck_mtx_unlock(&req->r_mtx);
if (dofinish) {
error = nfs_request_send(req, 0);
}
lck_mtx_lock(&req->r_mtx);
- if (req->r_flags & R_RESENDQ) {
- req->r_flags &= ~R_RESENDQ;
- }
if (error) {
req->r_error = error;
}
error = nfs_send(req, 0);
lck_mtx_lock(&req->r_mtx);
if (!error) {
- if (req->r_flags & R_RESENDQ) {
- req->r_flags &= ~R_RESENDQ;
- }
wakeup(req);
lck_mtx_unlock(&req->r_mtx);
nfs_request_rele(req);
}
}
req->r_error = error;
- if (req->r_flags & R_RESENDQ) {
- req->r_flags &= ~R_RESENDQ;
- }
wakeup(req);
dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
lck_mtx_unlock(&req->r_mtx);
int error, on = 1;
in_port_t port;
- lck_mtx_lock(nfs_global_mutex);
+ lck_mtx_lock(&nfs_global_mutex);
if (nfs4_cb_id == 0) {
TAILQ_INIT(&nfs4_cb_mounts);
TAILQ_INIT(&nfs4_cb_socks);
TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
if (nfs4_cb_so) {
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
return;
}
}
so = nfs4_cb_so;
+ if (NFS_PORT_INVALID(nfs_callback_port)) {
+ error = EINVAL;
+ log(LOG_INFO, "nfs callback setup: error %d nfs_callback_port %d is not valid\n", error, nfs_callback_port);
+ goto fail;
+ }
+
sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
sin.sin_len = sizeof(struct sockaddr_in);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = htonl(INADDR_ANY);
- sin.sin_port = htons(nfs_callback_port); /* try to use specified port */
+ sin.sin_port = htons((in_port_t)nfs_callback_port); /* try to use specified port */
error = sock_bind(so, (struct sockaddr *)&sin);
if (error) {
log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error);
sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
/* try to use specified port or same port as IPv4 */
- port = nfs_callback_port ? nfs_callback_port : nfs4_cb_port;
+ port = nfs_callback_port ? (in_port_t)nfs_callback_port : nfs4_cb_port;
ipv6_bind_again:
sin6.sin6_len = sizeof(struct sockaddr_in6);
sin6.sin6_family = AF_INET6;
fail:
if (error) {
nfs4_cb_so = nfs4_cb_so6 = NULL;
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
if (so) {
sock_shutdown(so, SHUT_RDWR);
sock_close(so);
sock_close(so6);
}
} else {
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
}
}
struct nfs4_cb_sock_list cb_socks;
struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
- lck_mtx_lock(nfs_global_mutex);
+ lck_mtx_lock(&nfs_global_mutex);
+ if (nmp->nm_cbid == 0) {
+ lck_mtx_unlock(&nfs_global_mutex);
+ return;
+ }
TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
/* wait for any callbacks in progress to complete */
while (nmp->nm_cbrefs) {
- msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts);
+ msleep(&nmp->nm_cbrefs, &nfs_global_mutex, PSOCK, "cbshutwait", &ts);
}
nmp->nm_cbid = 0;
if (--nfs4_cb_so_usecount) {
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
return;
}
so = nfs4_cb_so;
nfs4_cb_so = nfs4_cb_so6 = NULL;
TAILQ_INIT(&cb_socks);
TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
if (so) {
sock_shutdown(so, SHUT_RDWR);
sock_close(so);
struct timeval now;
loop:
- lck_mtx_lock(nfs_global_mutex);
+ lck_mtx_lock(&nfs_global_mutex);
if (TAILQ_EMPTY(&nfs4_cb_socks)) {
nfs4_callback_timer_on = 0;
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
return;
}
microuptime(&now);
continue;
}
TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
sock_close(ncbsp->ncbs_so);
nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
nfs4_callback_timer_on = 1;
nfs_interval_timer_start(nfs4_callback_timer_call,
NFS4_CB_TIMER_PERIOD * 1000);
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
}
/*
microuptime(&now);
ncbsp->ncbs_stamp = now.tv_sec;
- lck_mtx_lock(nfs_global_mutex);
+ lck_mtx_lock(&nfs_global_mutex);
/* add it to the list */
TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
nfs_interval_timer_start(nfs4_callback_timer_call, 500);
}
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
}
/*
mbuf_t m;
int error = 0, recv = 1;
- lck_mtx_lock(nfs_global_mutex);
+ lck_mtx_lock(&nfs_global_mutex);
while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
/* wait if upcall is already in progress */
ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
- msleep(ncbsp, nfs_global_mutex, PSOCK, "cbupcall", &ts);
+ msleep(ncbsp, &nfs_global_mutex, PSOCK, "cbupcall", &ts);
}
ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
/* loop while we make error-free progress */
while (!error && recv) {
ncbsp->ncbs_stamp = now.tv_sec;
}
- lck_mtx_lock(nfs_global_mutex);
+ lck_mtx_lock(&nfs_global_mutex);
ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
wakeup(ncbsp);
}
mbuf_t mhead = NULL, mrest = NULL, m;
struct msghdr msg;
struct nfsmount *nmp;
- fhandle_t fh;
+ fhandle_t *fh;
nfsnode_t np;
nfs_stateid stateid;
uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
size_t sentlen = 0;
xid = numops = op = status = procnum = taglen = cbid = 0;
+ fh = zalloc(nfs_fhandle_zone);
nfsm_chain_dissect_init(error, &nmreq, mreq);
nfsm_chain_get_32(error, &nmreq, xid); // RPC XID
goto nfsmout;
}
/* match the callback ID to a registered mount */
- lck_mtx_lock(nfs_global_mutex);
+ lck_mtx_lock(&nfs_global_mutex);
TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
if (nmp->nm_cbid != cbid) {
continue;
if (nmp) {
nmp->nm_cbrefs++;
}
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
if (!nmp) {
/* if no mount match, just drop socket. */
error = EPERM;
case NFS_OP_CB_GETATTR:
// (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
np = NULL;
- nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+ nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
bmlen = NFS_ATTR_BITMAP_LEN;
nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
if (error) {
numops = 0; /* don't process any more ops */
} else {
/* find the node for the file handle */
- error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
+ error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
if (error || !np) {
status = NFSERR_BADHANDLE;
error = 0;
np = NULL;
nfsm_chain_get_stateid(error, &nmreq, &stateid);
nfsm_chain_get_32(error, &nmreq, truncate);
- nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+ nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
if (error) {
status = error;
error = 0;
numops = 0; /* don't process any more ops */
} else {
/* find the node for the file handle */
- error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
+ error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
if (error || !np) {
status = NFSERR_BADHANDLE;
error = 0;
nfsm_chain_null(&nmrep);
/* drop the callback reference on the mount */
- lck_mtx_lock(nfs_global_mutex);
+ lck_mtx_lock(&nfs_global_mutex);
nmp->nm_cbrefs--;
if (!nmp->nm_cbid) {
wakeup(&nmp->nm_cbrefs);
}
- lck_mtx_unlock(nfs_global_mutex);
+ lck_mtx_unlock(&nfs_global_mutex);
break;
}
if (mreq) {
mbuf_freem(mreq);
}
+ NFS_ZFREE(nfs_fhandle_zone, fh);
return error;
}
#endif /* CONFIG_NFS4 */
if (sotype != SOCK_STREAM) {
break;
}
+ OS_FALLTHROUGH;
case EPIPE:
case EADDRNOTAVAIL:
case ENETDOWN:
u_int32_t reply = 0, rxid = 0;
int error = 0, asyncioq, t1;
+ bzero(&nmrep, sizeof(nmrep));
/* Get the xid and check that it is an rpc reply */
nfsm_chain_dissect_init(error, &nmrep, mrep);
nfsm_chain_get_32(error, &nmrep, rxid);
* Loop through the request list to match up the reply
* Iff no match, just drop it.
*/
- lck_mtx_lock(nfs_request_mutex);
+ lck_mtx_lock(&nfs_request_mutex);
TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
continue;
}
#endif /* CONFIG_NFS_GSS */
lck_mtx_unlock(&req->r_mtx);
- lck_mtx_unlock(nfs_request_mutex);
+ lck_mtx_unlock(&nfs_request_mutex);
/* if it's an async RPC with a callback, queue it up */
if (asyncioq) {
nfs_asyncio_finish(req);
if (!req) {
/* not matched to a request, so drop it. */
- lck_mtx_unlock(nfs_request_mutex);
+ lck_mtx_unlock(&nfs_request_mutex);
OSAddAtomic64(1, &nfsstats.rpcunexpected);
mbuf_freem(mrep);
}
req = *reqp;
if (!req) {
/* allocate a new NFS request structure */
- MALLOC_ZONE(newreq, struct nfsreq*, sizeof(*newreq), M_NFSREQ, M_WAITOK);
- if (!newreq) {
- mbuf_freem(nmrest->nmc_mhead);
- nmrest->nmc_mhead = NULL;
- return ENOMEM;
- }
- req = newreq;
+ req = newreq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO);
+ } else {
+ bzero(req, sizeof(*req));
}
-
- bzero(req, sizeof(*req));
if (req == newreq) {
req->r_flags = R_ALLOCATED;
}
nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
if (nfs_mount_gone(nmp)) {
if (newreq) {
- FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
+ NFS_ZFREE(nfs_req_zone, newreq);
}
return ENXIO;
}
mbuf_freem(nmrest->nmc_mhead);
nmrest->nmc_mhead = NULL;
if (newreq) {
- FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
+ NFS_ZFREE(nfs_req_zone, newreq);
}
return ENXIO;
}
panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum);
}
- lck_mtx_init(&req->r_mtx, nfs_request_grp, LCK_ATTR_NULL);
+ lck_mtx_init(&req->r_mtx, &nfs_request_grp, LCK_ATTR_NULL);
req->r_nmp = nmp;
nmp->nm_ref++;
req->r_np = np;
* Still on an async I/O queue?
* %%% But which one, we may be on a local iod.
*/
- lck_mtx_lock(nfsiod_mutex);
+ lck_mtx_lock(&nfsiod_mutex);
if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) {
TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
req->r_achain.tqe_next = NFSREQNOLIST;
}
- lck_mtx_unlock(nfsiod_mutex);
+ lck_mtx_unlock(&nfsiod_mutex);
}
lck_mtx_lock(&req->r_mtx);
wakeup(req2);
}
}
- assert((req->r_flags & R_RESENDQ) == 0);
/* XXX should we just remove this conditional, we should have a reference if we're resending */
- if (req->r_rchain.tqe_next != NFSREQNOLIST) {
+ if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) {
TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ req->r_flags &= ~R_RESENDQ;
req->r_rchain.tqe_next = NFSREQNOLIST;
- if (req->r_flags & R_RESENDQ) {
- req->r_flags &= ~R_RESENDQ;
- }
}
if (req->r_cchain.tqe_next != NFSREQNOLIST) {
TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
if (nmp) {
nfs_mount_rele(nmp);
}
- lck_mtx_destroy(&req->r_mtx, nfs_request_grp);
+ lck_mtx_destroy(&req->r_mtx, &nfs_request_grp);
if (req->r_flags & R_ALLOCATED) {
- FREE_ZONE(req, sizeof(*req), M_NFSREQ);
+ NFS_ZFREE(nfs_req_zone, req);
}
}
req->r_flags |= R_SENDING;
lck_mtx_unlock(&req->r_mtx);
- lck_mtx_lock(nfs_request_mutex);
+ lck_mtx_lock(&nfs_request_mutex);
nmp = req->r_nmp;
if (nfs_mount_gone(nmp)) {
- lck_mtx_unlock(nfs_request_mutex);
+ lck_mtx_unlock(&nfs_request_mutex);
return ENXIO;
}
OSAddAtomic64(1, &nfsstats.rpcrequests);
+ /*
+ * Make sure the request is not in the queue.
+ */
+ if (req->r_lflags & RL_QUEUED) {
+#if DEVELOPMENT
+ panic("nfs_request_send: req %p is already in global requests queue", req);
+#else
+ TAILQ_REMOVE(&nfs_reqq, req, r_chain);
+ req->r_lflags &= ~RL_QUEUED;
+#endif /* DEVELOPMENT */
+ }
+
/*
* Chain request into list of outstanding requests. Be sure
* to put it LAST so timer finds oldest requests first.
nfs_interval_timer_start(nfs_request_timer_call,
NFS_REQUESTDELAY);
}
- lck_mtx_unlock(nfs_request_mutex);
+ lck_mtx_unlock(&nfs_request_mutex);
/* Send the request... */
return nfs_send(req, wait);
u_int64_t *xidp,
int *status)
{
- struct nfsreq rq, *req = &rq;
+ struct nfsreq *req;
int error;
+ req = zalloc_flags(nfs_req_zone, Z_WAITOK);
if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) {
- return error;
+ goto out_free;
}
req->r_flags |= (flags & (R_OPTMASK | R_SOFT));
if (si) {
FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error);
nfs_request_rele(req);
+out_free:
+ NFS_ZFREE(nfs_req_zone, req);
return error;
}
struct nfsm_chain *nmrepp,
int *status)
{
- struct nfsreq rq, *req = &rq;
+ struct nfsreq *req;
int error, wait = 1;
+ req = zalloc_flags(nfs_req_zone, Z_WAITOK);
if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) {
- return error;
+ goto out_free;
}
req->r_flags |= (flags & R_OPTMASK);
if (cp == NULL) {
printf("nfs_request_gss request has no context\n");
nfs_request_rele(req);
- return NFSERR_EAUTH;
+ error = NFSERR_EAUTH;
+ goto out_free;
}
nfs_gss_clnt_ctx_ref(req, cp);
nfs_gss_clnt_ctx_unref(req);
nfs_request_rele(req);
-
+out_free:
+ NFS_ZFREE(nfs_req_zone, req);
return error;
}
#endif /* CONFIG_NFS_GSS */
nmp = req->r_nmp;
if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) {
lck_mtx_lock(&nmp->nm_lock);
- if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
+ if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
/*
* It's not going to get off the resend queue if we're in recovery.
* So, just take it off ourselves. We could be holding mount state
* busy and thus holding up the start of recovery.
*/
TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ req->r_flags &= ~R_RESENDQ;
req->r_rchain.tqe_next = NFSREQNOLIST;
- if (req->r_flags & R_RESENDQ) {
- req->r_flags &= ~R_RESENDQ;
- }
lck_mtx_unlock(&nmp->nm_lock);
req->r_flags |= R_SENDING;
lck_mtx_unlock(&req->r_mtx);
if ((nmp = req->r_nmp)) {
lck_mtx_lock(&nmp->nm_lock);
- if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
+ if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
/*
* It's not going to get off the resend queue if we're in recovery.
* So, just take it off ourselves. We could be holding mount state
* busy and thus holding up the start of recovery.
*/
TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ req->r_flags &= ~R_RESENDQ;
req->r_rchain.tqe_next = NFSREQNOLIST;
- if (req->r_flags & R_RESENDQ) {
- req->r_flags &= ~R_RESENDQ;
- }
/* Remove the R_RESENDQ reference */
assert(req->r_refs > 0);
req->r_refs--;
void
nfs_reqdequeue(struct nfsreq *req)
{
- lck_mtx_lock(nfs_request_mutex);
+ lck_mtx_lock(&nfs_request_mutex);
while (req->r_lflags & RL_BUSY) {
req->r_lflags |= RL_WAITING;
- msleep(&req->r_lflags, nfs_request_mutex, PSOCK, "reqdeq", NULL);
+ msleep(&req->r_lflags, &nfs_request_mutex, PSOCK, "reqdeq", NULL);
}
if (req->r_lflags & RL_QUEUED) {
TAILQ_REMOVE(&nfs_reqq, req, r_chain);
req->r_lflags &= ~RL_QUEUED;
}
- lck_mtx_unlock(nfs_request_mutex);
+ lck_mtx_unlock(&nfs_request_mutex);
}
/*
TAILQ_INIT(&nfs_mount_poke_queue);
restart:
- lck_mtx_lock(nfs_request_mutex);
+ lck_mtx_lock(&nfs_request_mutex);
req = TAILQ_FIRST(&nfs_reqq);
if (req == NULL) { /* no requests - turn timer off */
nfs_request_timer_on = 0;
- lck_mtx_unlock(nfs_request_mutex);
+ lck_mtx_unlock(&nfs_request_mutex);
return;
}
TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
}
/* Release our lock state, so we can become a zombie */
- lck_mtx_unlock(nfs_request_mutex);
+ lck_mtx_unlock(&nfs_request_mutex);
/*
* Note nfs_mount_make zombie(nmp) must be
* work we release nm_lock in
* nfs_make_mount_zombie with out acquiring any
* other locks. (Later, in nfs_mount_zombie we
- * will acquire nfs_request_mutex, r_mtx,
+ * will acquire &nfs_request_mutex, r_mtx,
* nm_lock in that order). So we should not be
* introducing deadlock here. We take a reference
* on the mount so that its still there when we
lck_mtx_unlock(&req->r_mtx);
}
- lck_mtx_unlock(nfs_request_mutex);
+ lck_mtx_unlock(&nfs_request_mutex);
/* poke any sockets */
while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
* This is used to determine if we need to bail on a mount.
* ETIMEDOUT is returned if there has been a soft timeout.
* EINTR is returned if there is a signal pending that is not being ignored
+ * ESHUTDOWN is return if the system is in shutdown.
* and the mount is interruptable, or if we are a thread that is in the process
* of cancellation (also SIGKILL posted).
*/
return ENXIO;
}
+ if (get_system_inshutdown()) {
+ NFS_SOCK_DBG("Shutdown in progress\n");
+ return ESHUTDOWN;
+ }
+
if (req && (req->r_flags & R_SOFTTERM)) {
return ETIMEDOUT; /* request has been terminated. */
}
struct nfsm_chain nmreq, nmrep;
mbuf_t mreq;
int error = 0, ip, pmprog, pmvers, pmproc;
- uint32_t ualen = 0;
- uint32_t port;
+ uint32_t ualen = 0, scopeid = 0, port32;
uint64_t xid = 0;
char uaddr[MAX_IPv6_STR_LEN + 16];
/* grab port from portmap response */
if (ip == 4) {
- nfsm_chain_get_32(error, &nmrep, port);
+ nfsm_chain_get_32(error, &nmrep, port32);
if (!error) {
- ((struct sockaddr_in*)sa)->sin_port = htons(port);
+ if (NFS_PORT_INVALID(port32)) {
+ error = EBADRPC;
+ } else {
+ ((struct sockaddr_in*)sa)->sin_port = htons((in_port_t)port32);
+ }
}
} else {
/* get uaddr string and convert to sockaddr */
NFS_SOCK_DBG("Got uaddr %s\n", uaddr);
if (!error) {
uaddr[ualen] = '\0';
+ if (ip == 6) {
+ scopeid = ((struct sockaddr_in6*)saddr)->sin6_scope_id;
+ }
if (!nfs_uaddr2sockaddr(uaddr, saddr)) {
error = EIO;
}
+ if (ip == 6 && scopeid != ((struct sockaddr_in6*)saddr)->sin6_scope_id) {
+ NFS_SOCK_DBG("Setting scope_id from %u to %u\n", ((struct sockaddr_in6*)saddr)->sin6_scope_id, scopeid);
+ ((struct sockaddr_in6*)saddr)->sin6_scope_id = scopeid;
+ }
}
}
}
#define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK;
+uint32_t nfs_tcp_sockbuf = 128 * 1024; /* Default value of tcp_sendspace and tcp_recvspace */
int32_t nfs_is_mobile;
#define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
int wake = (slp->ns_flag & SLP_WORKTODO);
lck_rw_done(&slp->ns_rwlock);
if (wake && nfsd_thread_count) {
- lck_mtx_lock(nfsd_mutex);
+ lck_mtx_lock(&nfsd_mutex);
nfsrv_wakenfsd(slp);
- lck_mtx_unlock(nfsd_mutex);
+ lck_mtx_unlock(&nfsd_mutex);
}
}
}
{
mbuf_t m;
char *cp1, *cp2, *mdata;
- int len, mlen, error;
+ int error;
+ size_t len, mlen;
mbuf_t om, m2, recm;
u_int32_t recmark;
if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) {
return ENOBUFS;
}
- MALLOC_ZONE(nd, struct nfsrv_descript *,
- sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK);
- if (!nd) {
- return ENOMEM;
- }
+ nd = zalloc(nfsrv_descript_zone);
m = slp->ns_rec;
slp->ns_rec = mbuf_nextpkt(m);
if (slp->ns_rec) {
if (nd->nd_gss_context) {
nfs_gss_svc_ctx_deref(nd->nd_gss_context);
}
- FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC);
+ NFS_ZFREE(nfsrv_descript_zone, nd);
return error;
}
nd->nd_mrep = NULL;
int error = 0;
uid_t user_id;
gid_t group_id;
- int ngroups;
+ short ngroups;
uint32_t val;
nd->nd_cr = NULL;
}
}
nfsmout_if(error);
- ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
+ ngroups = (len >= NGROUPS) ? NGROUPS : (short)(len + 1);
if (ngroups > 1) {
nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups);
}