]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/nfs/nfs_socket.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_socket.c
index b0fad27f148ddfc3372d4824107c12d90ee98ba4..435bbb7826a0b41d35348fb883d4c5298aa3b60f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
@@ -90,6 +90,7 @@
 #include <sys/tprintf.h>
 #include <libkern/OSAtomic.h>
 
+#include <sys/reboot.h>
 #include <sys/time.h>
 #include <kern/clock.h>
 #include <kern/task.h>
 #define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__)
 #define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFS_IS_DBG(NFS_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb))
 
+#ifndef SUN_LEN
+#define SUN_LEN(su) \
+       (sizeof(*(su)) - sizeof((su)->sun_path) + strnlen((su)->sun_path, sizeof((su)->sun_path)))
+#endif /* SUN_LEN */
+
 /* XXX */
 boolean_t       current_thread_aborted(void);
 kern_return_t   thread_terminate(thread_t);
 
+ZONE_DECLARE(nfs_fhandle_zone, "fhandle", sizeof(struct fhandle), ZC_NONE);
+ZONE_DECLARE(nfs_req_zone, "NFS req", sizeof(struct nfsreq), ZC_NONE);
+ZONE_DECLARE(nfsrv_descript_zone, "NFSV3 srvdesc",
+    sizeof(struct nfsrv_descript), ZC_NONE);
+
 
 #if CONFIG_NFS_SERVER
 int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */
@@ -315,7 +326,7 @@ nfs_location_index_cmp(struct nfs_location_index *nlip1, struct nfs_location_ind
  * Get the mntfromname (or path portion only) for a given location.
  */
 void
-nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, int size, int pathonly)
+nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, size_t size, int pathonly)
 {
        struct nfs_fs_location *fsl = locs->nl_locations[idx.nli_loc];
        char *p;
@@ -530,7 +541,7 @@ int
 nfs_socket_create(
        struct nfsmount *nmp,
        struct sockaddr *sa,
-       int sotype,
+       uint8_t sotype,
        in_port_t port,
        uint32_t protocol,
        uint32_t vers,
@@ -547,17 +558,27 @@ nfs_socket_create(
 
        switch (sa->sa_family) {
        case AF_INET:
+               if (sa->sa_len != sizeof(struct sockaddr_in)) {
+                       return EINVAL;
+               }
+               sinaddr = &((struct sockaddr_in*)sa)->sin_addr;
+               if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
+                       strlcpy(naddr, "<unknown>", sizeof(naddr));
+               }
+               break;
        case AF_INET6:
-               if (sa->sa_family == AF_INET) {
-                       sinaddr = &((struct sockaddr_in*)sa)->sin_addr;
-               } else {
-                       sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr;
+               if (sa->sa_len != sizeof(struct sockaddr_in6)) {
+                       return EINVAL;
                }
+               sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr;
                if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
                        strlcpy(naddr, "<unknown>", sizeof(naddr));
                }
                break;
        case AF_LOCAL:
+               if (sa->sa_len != sizeof(struct sockaddr_un) && sa->sa_len != SUN_LEN((struct sockaddr_un *)sa)) {
+                       return EINVAL;
+               }
                strlcpy(naddr, ((struct sockaddr_un *)sa)->sun_path, sizeof(naddr));
                break;
        default:
@@ -581,7 +602,7 @@ nfs_socket_create(
                }
                return ENOMEM;
        }
-       lck_mtx_init(&nso->nso_lock, nfs_request_grp, LCK_ATTR_NULL);
+       lck_mtx_init(&nso->nso_lock, &nfs_request_grp, LCK_ATTR_NULL);
        nso->nso_sotype = sotype;
        if (nso->nso_sotype == SOCK_STREAM) {
                nfs_rpc_record_state_init(&nso->nso_rrs);
@@ -668,7 +689,7 @@ nfs_socket_destroy(struct nfs_socket *nso)
        if (nso->nso_sotype == SOCK_STREAM) {
                nfs_rpc_record_state_cleanup(&nso->nso_rrs);
        }
-       lck_mtx_destroy(&nso->nso_lock, nfs_request_grp);
+       lck_mtx_destroy(&nso->nso_lock, &nfs_request_grp);
        if (nso->nso_saddr) {
                FREE(nso->nso_saddr, M_SONAME);
        }
@@ -693,7 +714,7 @@ nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso)
         *   Soft mounts will want to abort sooner.
         */
        struct timeval timeo;
-       int on = 1, proto;
+       int on = 1, proto, reserve, error;
 
        timeo.tv_usec = 0;
        timeo.tv_sec = (NMFLAG(nmp, SOFT) || nfs_can_squish(nmp)) ? 5 : 60;
@@ -708,11 +729,23 @@ nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso)
                        sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
                }
        }
-       if (nso->nso_sotype == SOCK_DGRAM || nso->nso_saddr->sa_family == AF_LOCAL) { /* set socket buffer sizes for UDP */
-               int reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : (2 * 1024 * 1024);
-               sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
-               sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
+
+       /* set socket buffer sizes for UDP/TCP */
+       reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_wsize * 2);
+       {
+               error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
+       }
+
+       if (error) {
+               log(LOG_INFO, "nfs_socket_options: error %d setting SO_SNDBUF to %u\n", error, reserve);
+       }
+
+       reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_rsize * 2);
+       error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
+       if (error) {
+               log(LOG_INFO, "nfs_socket_options: error %d setting SO_RCVBUF to %u\n", error, reserve);
        }
+
        /* set SO_NOADDRERR to detect network changes ASAP */
        sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
        /* just playin' it safe with upcalls */
@@ -1212,7 +1245,7 @@ nfs_connect(struct nfsmount *nmp, int verbose, int timeo)
        uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM;
        fhandle_t *fh = NULL;
        char *path = NULL;
-       in_port_t port;
+       in_port_t port = 0;
        int addrtotal = 0;
 
        /* paranoia... check that we have at least one address in the locations */
@@ -1389,10 +1422,10 @@ keepsearching:
                            vfs_statfs(nmp->nm_mountp)->f_mntfromname);
                }
                if (fh) {
-                       FREE(fh, M_TEMP);
+                       NFS_ZFREE(nfs_fhandle_zone, fh);
                }
                if (path) {
-                       FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+                       NFS_ZFREE(ZV_NAMEI, path);
                }
                NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
                    vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
@@ -1617,24 +1650,30 @@ keepsearching:
                                }
                        }
                }
+               if (!error) {
+                       error = nfs3_check_lockmode(nmp, saddr, nso->nso_sotype, timeo);
+                       if (error) {
+                               nfs_socket_search_update_error(&nss, error);
+                               nfs_socket_destroy(nso);
+                               return error;
+                       }
+               }
                if (saddr) {
-                       MALLOC(fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK | M_ZERO);
+                       fh = zalloc(nfs_fhandle_zone);
                }
                if (saddr && fh) {
-                       MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+                       path = zalloc(ZV_NAMEI);
                }
                if (!saddr || !fh || !path) {
                        if (!error) {
                                error = ENOMEM;
                        }
                        if (fh) {
-                               FREE(fh, M_TEMP);
+                               NFS_ZFREE(nfs_fhandle_zone, fh);
                        }
                        if (path) {
-                               FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+                               NFS_ZFREE(ZV_NAMEI, path);
                        }
-                       fh = NULL;
-                       path = NULL;
                        nfs_socket_search_update_error(&nss, error);
                        nfs_socket_destroy(nso);
                        goto keepsearching;
@@ -1682,6 +1721,7 @@ keepsearching:
                                                if (found && (nmp->nm_auth == RPCAUTH_NONE)) {
                                                        found = 0;
                                                }
+                                               OS_FALLTHROUGH;
                                        case RPCAUTH_NONE:
                                        case RPCAUTH_KRB5:
                                        case RPCAUTH_KRB5I:
@@ -1696,17 +1736,15 @@ keepsearching:
                        }
                        error = !found ? EAUTH : 0;
                }
-               FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
-               path = NULL;
+               NFS_ZFREE(ZV_NAMEI, path);
                if (error) {
                        nfs_socket_search_update_error(&nss, error);
-                       FREE(fh, M_TEMP);
-                       fh = NULL;
+                       NFS_ZFREE(nfs_fhandle_zone, fh);
                        nfs_socket_destroy(nso);
                        goto keepsearching;
                }
                if (nmp->nm_fh) {
-                       FREE(nmp->nm_fh, M_TEMP);
+                       NFS_ZFREE(nfs_fhandle_zone, nmp->nm_fh);
                }
                nmp->nm_fh = fh;
                fh = NULL;
@@ -1848,10 +1886,10 @@ keepsearching:
        nmp->nm_nss = NULL;
        nfs_socket_search_cleanup(&nss);
        if (fh) {
-               FREE(fh, M_TEMP);
+               NFS_ZFREE(nfs_fhandle_zone, fh);
        }
        if (path) {
-               FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+               NFS_ZFREE(ZV_NAMEI, path);
        }
        NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
        return 0;
@@ -1966,7 +2004,7 @@ nfs_reconnect(struct nfsmount *nmp)
         * as needing a resend.  (Though nfs_need_reconnect() probably
         * marked them all already.)
         */
-       lck_mtx_lock(nfs_request_mutex);
+       lck_mtx_lock(&nfs_request_mutex);
        TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
                if (rq->r_nmp == nmp) {
                        lck_mtx_lock(&rq->r_mtx);
@@ -1981,7 +2019,7 @@ nfs_reconnect(struct nfsmount *nmp)
                        lck_mtx_unlock(&rq->r_mtx);
                }
        }
-       lck_mtx_unlock(nfs_request_mutex);
+       lck_mtx_unlock(&nfs_request_mutex);
        return 0;
 }
 
@@ -2039,7 +2077,7 @@ nfs_need_reconnect(struct nfsmount *nmp)
         * Loop through outstanding request list and
         * mark all requests as needing a resend.
         */
-       lck_mtx_lock(nfs_request_mutex);
+       lck_mtx_lock(&nfs_request_mutex);
        TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
                if (rq->r_nmp == nmp) {
                        lck_mtx_lock(&rq->r_mtx);
@@ -2054,7 +2092,7 @@ nfs_need_reconnect(struct nfsmount *nmp)
                        lck_mtx_unlock(&rq->r_mtx);
                }
        }
-       lck_mtx_unlock(nfs_request_mutex);
+       lck_mtx_unlock(&nfs_request_mutex);
 }
 
 
@@ -2145,14 +2183,21 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
                        if (!req) {
                                break;
                        }
+                       /* acquire both locks in the right order: first req->r_mtx and then nmp->nm_lock */
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       lck_mtx_lock(&req->r_mtx);
+                       lck_mtx_lock(&nmp->nm_lock);
+                       if ((req->r_flags & R_RESENDQ) == 0 || (req->r_rchain.tqe_next == NFSREQNOLIST)) {
+                               lck_mtx_unlock(&req->r_mtx);
+                               continue;
+                       }
                        TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+                       req->r_flags &= ~R_RESENDQ;
                        req->r_rchain.tqe_next = NFSREQNOLIST;
                        lck_mtx_unlock(&nmp->nm_lock);
-                       lck_mtx_lock(&req->r_mtx);
                        /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
                        if (req->r_error || req->r_nmrep.nmc_mhead) {
                                dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
-                               req->r_flags &= ~R_RESENDQ;
                                wakeup(req);
                                lck_mtx_unlock(&req->r_mtx);
                                if (dofinish) {
@@ -2188,9 +2233,6 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
                                        error = nfs_request_send(req, 0);
                                }
                                lck_mtx_lock(&req->r_mtx);
-                               if (req->r_flags & R_RESENDQ) {
-                                       req->r_flags &= ~R_RESENDQ;
-                               }
                                if (error) {
                                        req->r_error = error;
                                }
@@ -2214,9 +2256,6 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
                                error = nfs_send(req, 0);
                                lck_mtx_lock(&req->r_mtx);
                                if (!error) {
-                                       if (req->r_flags & R_RESENDQ) {
-                                               req->r_flags &= ~R_RESENDQ;
-                                       }
                                        wakeup(req);
                                        lck_mtx_unlock(&req->r_mtx);
                                        nfs_request_rele(req);
@@ -2225,9 +2264,6 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
                                }
                        }
                        req->r_error = error;
-                       if (req->r_flags & R_RESENDQ) {
-                               req->r_flags &= ~R_RESENDQ;
-                       }
                        wakeup(req);
                        dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
                        lck_mtx_unlock(&req->r_mtx);
@@ -2425,7 +2461,7 @@ nfs4_mount_callback_setup(struct nfsmount *nmp)
        int error, on = 1;
        in_port_t port;
 
-       lck_mtx_lock(nfs_global_mutex);
+       lck_mtx_lock(&nfs_global_mutex);
        if (nfs4_cb_id == 0) {
                TAILQ_INIT(&nfs4_cb_mounts);
                TAILQ_INIT(&nfs4_cb_socks);
@@ -2439,7 +2475,7 @@ nfs4_mount_callback_setup(struct nfsmount *nmp)
        TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
 
        if (nfs4_cb_so) {
-               lck_mtx_unlock(nfs_global_mutex);
+               lck_mtx_unlock(&nfs_global_mutex);
                return;
        }
 
@@ -2451,11 +2487,17 @@ nfs4_mount_callback_setup(struct nfsmount *nmp)
        }
        so = nfs4_cb_so;
 
+       if (NFS_PORT_INVALID(nfs_callback_port)) {
+               error = EINVAL;
+               log(LOG_INFO, "nfs callback setup: error %d nfs_callback_port %d is not valid\n", error, nfs_callback_port);
+               goto fail;
+       }
+
        sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
        sin.sin_len = sizeof(struct sockaddr_in);
        sin.sin_family = AF_INET;
        sin.sin_addr.s_addr = htonl(INADDR_ANY);
-       sin.sin_port = htons(nfs_callback_port); /* try to use specified port */
+       sin.sin_port = htons((in_port_t)nfs_callback_port); /* try to use specified port */
        error = sock_bind(so, (struct sockaddr *)&sin);
        if (error) {
                log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error);
@@ -2501,7 +2543,7 @@ nfs4_mount_callback_setup(struct nfsmount *nmp)
        sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
        sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
        /* try to use specified port or same port as IPv4 */
-       port = nfs_callback_port ? nfs_callback_port : nfs4_cb_port;
+       port = nfs_callback_port ? (in_port_t)nfs_callback_port : nfs4_cb_port;
 ipv6_bind_again:
        sin6.sin6_len = sizeof(struct sockaddr_in6);
        sin6.sin6_family = AF_INET6;
@@ -2549,7 +2591,7 @@ ipv6_bind_again:
 fail:
        if (error) {
                nfs4_cb_so = nfs4_cb_so6 = NULL;
-               lck_mtx_unlock(nfs_global_mutex);
+               lck_mtx_unlock(&nfs_global_mutex);
                if (so) {
                        sock_shutdown(so, SHUT_RDWR);
                        sock_close(so);
@@ -2559,7 +2601,7 @@ fail:
                        sock_close(so6);
                }
        } else {
-               lck_mtx_unlock(nfs_global_mutex);
+               lck_mtx_unlock(&nfs_global_mutex);
        }
 }
 
@@ -2578,15 +2620,19 @@ nfs4_mount_callback_shutdown(struct nfsmount *nmp)
        struct nfs4_cb_sock_list cb_socks;
        struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
 
-       lck_mtx_lock(nfs_global_mutex);
+       lck_mtx_lock(&nfs_global_mutex);
+       if (nmp->nm_cbid == 0) {
+               lck_mtx_unlock(&nfs_global_mutex);
+               return;
+       }
        TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
        /* wait for any callbacks in progress to complete */
        while (nmp->nm_cbrefs) {
-               msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts);
+               msleep(&nmp->nm_cbrefs, &nfs_global_mutex, PSOCK, "cbshutwait", &ts);
        }
        nmp->nm_cbid = 0;
        if (--nfs4_cb_so_usecount) {
-               lck_mtx_unlock(nfs_global_mutex);
+               lck_mtx_unlock(&nfs_global_mutex);
                return;
        }
        so = nfs4_cb_so;
@@ -2594,7 +2640,7 @@ nfs4_mount_callback_shutdown(struct nfsmount *nmp)
        nfs4_cb_so = nfs4_cb_so6 = NULL;
        TAILQ_INIT(&cb_socks);
        TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
-       lck_mtx_unlock(nfs_global_mutex);
+       lck_mtx_unlock(&nfs_global_mutex);
        if (so) {
                sock_shutdown(so, SHUT_RDWR);
                sock_close(so);
@@ -2624,10 +2670,10 @@ nfs4_callback_timer(__unused void *param0, __unused void *param1)
        struct timeval now;
 
 loop:
-       lck_mtx_lock(nfs_global_mutex);
+       lck_mtx_lock(&nfs_global_mutex);
        if (TAILQ_EMPTY(&nfs4_cb_socks)) {
                nfs4_callback_timer_on = 0;
-               lck_mtx_unlock(nfs_global_mutex);
+               lck_mtx_unlock(&nfs_global_mutex);
                return;
        }
        microuptime(&now);
@@ -2637,7 +2683,7 @@ loop:
                        continue;
                }
                TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
-               lck_mtx_unlock(nfs_global_mutex);
+               lck_mtx_unlock(&nfs_global_mutex);
                sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
                sock_close(ncbsp->ncbs_so);
                nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
@@ -2647,7 +2693,7 @@ loop:
        nfs4_callback_timer_on = 1;
        nfs_interval_timer_start(nfs4_callback_timer_call,
            NFS4_CB_TIMER_PERIOD * 1000);
-       lck_mtx_unlock(nfs_global_mutex);
+       lck_mtx_unlock(&nfs_global_mutex);
 }
 
 /*
@@ -2711,7 +2757,7 @@ nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
        microuptime(&now);
        ncbsp->ncbs_stamp = now.tv_sec;
 
-       lck_mtx_lock(nfs_global_mutex);
+       lck_mtx_lock(&nfs_global_mutex);
 
        /* add it to the list */
        TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
@@ -2742,7 +2788,7 @@ nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
                nfs_interval_timer_start(nfs4_callback_timer_call, 500);
        }
 
-       lck_mtx_unlock(nfs_global_mutex);
+       lck_mtx_unlock(&nfs_global_mutex);
 }
 
 /*
@@ -2758,14 +2804,14 @@ nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
        mbuf_t m;
        int error = 0, recv = 1;
 
-       lck_mtx_lock(nfs_global_mutex);
+       lck_mtx_lock(&nfs_global_mutex);
        while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
                /* wait if upcall is already in progress */
                ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
-               msleep(ncbsp, nfs_global_mutex, PSOCK, "cbupcall", &ts);
+               msleep(ncbsp, &nfs_global_mutex, PSOCK, "cbupcall", &ts);
        }
        ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
-       lck_mtx_unlock(nfs_global_mutex);
+       lck_mtx_unlock(&nfs_global_mutex);
 
        /* loop while we make error-free progress */
        while (!error && recv) {
@@ -2789,9 +2835,9 @@ nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
                ncbsp->ncbs_stamp = now.tv_sec;
        }
 
-       lck_mtx_lock(nfs_global_mutex);
+       lck_mtx_lock(&nfs_global_mutex);
        ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
-       lck_mtx_unlock(nfs_global_mutex);
+       lck_mtx_unlock(&nfs_global_mutex);
        wakeup(ncbsp);
 }
 
@@ -2806,7 +2852,7 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
        mbuf_t mhead = NULL, mrest = NULL, m;
        struct msghdr msg;
        struct nfsmount *nmp;
-       fhandle_t fh;
+       fhandle_t *fh;
        nfsnode_t np;
        nfs_stateid stateid;
        uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
@@ -2817,6 +2863,7 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
        size_t sentlen = 0;
 
        xid = numops = op = status = procnum = taglen = cbid = 0;
+       fh = zalloc(nfs_fhandle_zone);
 
        nfsm_chain_dissect_init(error, &nmreq, mreq);
        nfsm_chain_get_32(error, &nmreq, xid);          // RPC XID
@@ -2893,7 +2940,7 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
                        goto nfsmout;
                }
                /* match the callback ID to a registered mount */
-               lck_mtx_lock(nfs_global_mutex);
+               lck_mtx_lock(&nfs_global_mutex);
                TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
                        if (nmp->nm_cbid != cbid) {
                                continue;
@@ -2910,7 +2957,7 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
                if (nmp) {
                        nmp->nm_cbrefs++;
                }
-               lck_mtx_unlock(nfs_global_mutex);
+               lck_mtx_unlock(&nfs_global_mutex);
                if (!nmp) {
                        /* if no mount match, just drop socket. */
                        error = EPERM;
@@ -2929,7 +2976,7 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
                        case NFS_OP_CB_GETATTR:
                                // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
                                np = NULL;
-                               nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+                               nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
                                bmlen = NFS_ATTR_BITMAP_LEN;
                                nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
                                if (error) {
@@ -2938,7 +2985,7 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
                                        numops = 0; /* don't process any more ops */
                                } else {
                                        /* find the node for the file handle */
-                                       error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
+                                       error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
                                        if (error || !np) {
                                                status = NFSERR_BADHANDLE;
                                                error = 0;
@@ -2995,14 +3042,14 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
                                np = NULL;
                                nfsm_chain_get_stateid(error, &nmreq, &stateid);
                                nfsm_chain_get_32(error, &nmreq, truncate);
-                               nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+                               nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
                                if (error) {
                                        status = error;
                                        error = 0;
                                        numops = 0; /* don't process any more ops */
                                } else {
                                        /* find the node for the file handle */
-                                       error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
+                                       error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
                                        if (error || !np) {
                                                status = NFSERR_BADHANDLE;
                                                error = 0;
@@ -3056,12 +3103,12 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
                nfsm_chain_null(&nmrep);
 
                /* drop the callback reference on the mount */
-               lck_mtx_lock(nfs_global_mutex);
+               lck_mtx_lock(&nfs_global_mutex);
                nmp->nm_cbrefs--;
                if (!nmp->nm_cbid) {
                        wakeup(&nmp->nm_cbrefs);
                }
-               lck_mtx_unlock(nfs_global_mutex);
+               lck_mtx_unlock(&nfs_global_mutex);
                break;
        }
 
@@ -3160,6 +3207,7 @@ out:
        if (mreq) {
                mbuf_freem(mreq);
        }
+       NFS_ZFREE(nfs_fhandle_zone, fh);
        return error;
 }
 #endif /* CONFIG_NFS4 */
@@ -3581,6 +3629,7 @@ again:
                if (sotype != SOCK_STREAM) {
                        break;
                }
+               OS_FALLTHROUGH;
        case EPIPE:
        case EADDRNOTAVAIL:
        case ENETDOWN:
@@ -3809,6 +3858,7 @@ nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
        u_int32_t reply = 0, rxid = 0;
        int error = 0, asyncioq, t1;
 
+       bzero(&nmrep, sizeof(nmrep));
        /* Get the xid and check that it is an rpc reply */
        nfsm_chain_dissect_init(error, &nmrep, mrep);
        nfsm_chain_get_32(error, &nmrep, rxid);
@@ -3823,7 +3873,7 @@ nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
         * Loop through the request list to match up the reply
         * Iff no match, just drop it.
         */
-       lck_mtx_lock(nfs_request_mutex);
+       lck_mtx_lock(&nfs_request_mutex);
        TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
                if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
                        continue;
@@ -3899,7 +3949,7 @@ nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
                }
 #endif /* CONFIG_NFS_GSS */
                lck_mtx_unlock(&req->r_mtx);
-               lck_mtx_unlock(nfs_request_mutex);
+               lck_mtx_unlock(&nfs_request_mutex);
                /* if it's an async RPC with a callback, queue it up */
                if (asyncioq) {
                        nfs_asyncio_finish(req);
@@ -3909,7 +3959,7 @@ nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
 
        if (!req) {
                /* not matched to a request, so drop it. */
-               lck_mtx_unlock(nfs_request_mutex);
+               lck_mtx_unlock(&nfs_request_mutex);
                OSAddAtomic64(1, &nfsstats.rpcunexpected);
                mbuf_freem(mrep);
        }
@@ -4021,16 +4071,10 @@ nfs_request_create(
        req = *reqp;
        if (!req) {
                /* allocate a new NFS request structure */
-               MALLOC_ZONE(newreq, struct nfsreq*, sizeof(*newreq), M_NFSREQ, M_WAITOK);
-               if (!newreq) {
-                       mbuf_freem(nmrest->nmc_mhead);
-                       nmrest->nmc_mhead = NULL;
-                       return ENOMEM;
-               }
-               req = newreq;
+               req = newreq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO);
+       } else {
+               bzero(req, sizeof(*req));
        }
-
-       bzero(req, sizeof(*req));
        if (req == newreq) {
                req->r_flags = R_ALLOCATED;
        }
@@ -4038,7 +4082,7 @@ nfs_request_create(
        nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
        if (nfs_mount_gone(nmp)) {
                if (newreq) {
-                       FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
+                       NFS_ZFREE(nfs_req_zone, newreq);
                }
                return ENXIO;
        }
@@ -4049,7 +4093,7 @@ nfs_request_create(
                mbuf_freem(nmrest->nmc_mhead);
                nmrest->nmc_mhead = NULL;
                if (newreq) {
-                       FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
+                       NFS_ZFREE(nfs_req_zone, newreq);
                }
                return ENXIO;
        }
@@ -4061,7 +4105,7 @@ nfs_request_create(
                panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum);
        }
 
-       lck_mtx_init(&req->r_mtx, nfs_request_grp, LCK_ATTR_NULL);
+       lck_mtx_init(&req->r_mtx, &nfs_request_grp, LCK_ATTR_NULL);
        req->r_nmp = nmp;
        nmp->nm_ref++;
        req->r_np = np;
@@ -4133,12 +4177,12 @@ nfs_request_destroy(struct nfsreq *req)
                 * Still on an async I/O queue?
                 * %%% But which one, we may be on a local iod.
                 */
-               lck_mtx_lock(nfsiod_mutex);
+               lck_mtx_lock(&nfsiod_mutex);
                if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) {
                        TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
                        req->r_achain.tqe_next = NFSREQNOLIST;
                }
-               lck_mtx_unlock(nfsiod_mutex);
+               lck_mtx_unlock(&nfsiod_mutex);
        }
 
        lck_mtx_lock(&req->r_mtx);
@@ -4156,14 +4200,11 @@ nfs_request_destroy(struct nfsreq *req)
                                wakeup(req2);
                        }
                }
-               assert((req->r_flags & R_RESENDQ) == 0);
                /* XXX should we just remove this conditional, we should have a reference if we're resending */
-               if (req->r_rchain.tqe_next != NFSREQNOLIST) {
+               if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) {
                        TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+                       req->r_flags &= ~R_RESENDQ;
                        req->r_rchain.tqe_next = NFSREQNOLIST;
-                       if (req->r_flags & R_RESENDQ) {
-                               req->r_flags &= ~R_RESENDQ;
-                       }
                }
                if (req->r_cchain.tqe_next != NFSREQNOLIST) {
                        TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
@@ -4208,9 +4249,9 @@ nfs_request_destroy(struct nfsreq *req)
        if (nmp) {
                nfs_mount_rele(nmp);
        }
-       lck_mtx_destroy(&req->r_mtx, nfs_request_grp);
+       lck_mtx_destroy(&req->r_mtx, &nfs_request_grp);
        if (req->r_flags & R_ALLOCATED) {
-               FREE_ZONE(req, sizeof(*req), M_NFSREQ);
+               NFS_ZFREE(nfs_req_zone, req);
        }
 }
 
@@ -4305,11 +4346,11 @@ nfs_request_send(struct nfsreq *req, int wait)
        req->r_flags |= R_SENDING;
        lck_mtx_unlock(&req->r_mtx);
 
-       lck_mtx_lock(nfs_request_mutex);
+       lck_mtx_lock(&nfs_request_mutex);
 
        nmp = req->r_nmp;
        if (nfs_mount_gone(nmp)) {
-               lck_mtx_unlock(nfs_request_mutex);
+               lck_mtx_unlock(&nfs_request_mutex);
                return ENXIO;
        }
 
@@ -4322,6 +4363,18 @@ nfs_request_send(struct nfsreq *req, int wait)
 
        OSAddAtomic64(1, &nfsstats.rpcrequests);
 
+       /*
+        * Make sure the request is not in the queue.
+        */
+       if (req->r_lflags & RL_QUEUED) {
+#if DEVELOPMENT
+               panic("nfs_request_send: req %p is already in global requests queue", req);
+#else
+               TAILQ_REMOVE(&nfs_reqq, req, r_chain);
+               req->r_lflags &= ~RL_QUEUED;
+#endif /* DEVELOPMENT */
+       }
+
        /*
         * Chain request into list of outstanding requests. Be sure
         * to put it LAST so timer finds oldest requests first.
@@ -4335,7 +4388,7 @@ nfs_request_send(struct nfsreq *req, int wait)
                nfs_interval_timer_start(nfs_request_timer_call,
                    NFS_REQUESTDELAY);
        }
-       lck_mtx_unlock(nfs_request_mutex);
+       lck_mtx_unlock(&nfs_request_mutex);
 
        /* Send the request... */
        return nfs_send(req, wait);
@@ -4816,11 +4869,12 @@ nfs_request2(
        u_int64_t *xidp,
        int *status)
 {
-       struct nfsreq rq, *req = &rq;
+       struct nfsreq *req;
        int error;
 
+       req = zalloc_flags(nfs_req_zone, Z_WAITOK);
        if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) {
-               return error;
+               goto out_free;
        }
        req->r_flags |= (flags & (R_OPTMASK | R_SOFT));
        if (si) {
@@ -4848,6 +4902,8 @@ nfs_request2(
 
        FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error);
        nfs_request_rele(req);
+out_free:
+       NFS_ZFREE(nfs_req_zone, req);
        return error;
 }
 
@@ -4870,18 +4926,20 @@ nfs_request_gss(
        struct nfsm_chain *nmrepp,
        int *status)
 {
-       struct nfsreq rq, *req = &rq;
+       struct nfsreq *req;
        int error, wait = 1;
 
+       req = zalloc_flags(nfs_req_zone, Z_WAITOK);
        if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) {
-               return error;
+               goto out_free;
        }
        req->r_flags |= (flags & R_OPTMASK);
 
        if (cp == NULL) {
                printf("nfs_request_gss request has no context\n");
                nfs_request_rele(req);
-               return NFSERR_EAUTH;
+               error = NFSERR_EAUTH;
+               goto out_free;
        }
        nfs_gss_clnt_ctx_ref(req, cp);
 
@@ -4918,7 +4976,8 @@ nfs_request_gss(
 
        nfs_gss_clnt_ctx_unref(req);
        nfs_request_rele(req);
-
+out_free:
+       NFS_ZFREE(nfs_req_zone, req);
        return error;
 }
 #endif /* CONFIG_NFS_GSS */
@@ -4973,17 +5032,15 @@ nfs_request_async(
                                nmp = req->r_nmp;
                                if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) {
                                        lck_mtx_lock(&nmp->nm_lock);
-                                       if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
+                                       if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
                                                /*
                                                 * It's not going to get off the resend queue if we're in recovery.
                                                 * So, just take it off ourselves.  We could be holding mount state
                                                 * busy and thus holding up the start of recovery.
                                                 */
                                                TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+                                               req->r_flags &= ~R_RESENDQ;
                                                req->r_rchain.tqe_next = NFSREQNOLIST;
-                                               if (req->r_flags & R_RESENDQ) {
-                                                       req->r_flags &= ~R_RESENDQ;
-                                               }
                                                lck_mtx_unlock(&nmp->nm_lock);
                                                req->r_flags |= R_SENDING;
                                                lck_mtx_unlock(&req->r_mtx);
@@ -5041,17 +5098,15 @@ nfs_request_async_finish(
 
                if ((nmp = req->r_nmp)) {
                        lck_mtx_lock(&nmp->nm_lock);
-                       if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
+                       if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
                                /*
                                 * It's not going to get off the resend queue if we're in recovery.
                                 * So, just take it off ourselves.  We could be holding mount state
                                 * busy and thus holding up the start of recovery.
                                 */
                                TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+                               req->r_flags &= ~R_RESENDQ;
                                req->r_rchain.tqe_next = NFSREQNOLIST;
-                               if (req->r_flags & R_RESENDQ) {
-                                       req->r_flags &= ~R_RESENDQ;
-                               }
                                /* Remove the R_RESENDQ reference */
                                assert(req->r_refs > 0);
                                req->r_refs--;
@@ -5152,16 +5207,16 @@ nfs_softterm(struct nfsreq *req)
 void
 nfs_reqdequeue(struct nfsreq *req)
 {
-       lck_mtx_lock(nfs_request_mutex);
+       lck_mtx_lock(&nfs_request_mutex);
        while (req->r_lflags & RL_BUSY) {
                req->r_lflags |= RL_WAITING;
-               msleep(&req->r_lflags, nfs_request_mutex, PSOCK, "reqdeq", NULL);
+               msleep(&req->r_lflags, &nfs_request_mutex, PSOCK, "reqdeq", NULL);
        }
        if (req->r_lflags & RL_QUEUED) {
                TAILQ_REMOVE(&nfs_reqq, req, r_chain);
                req->r_lflags &= ~RL_QUEUED;
        }
-       lck_mtx_unlock(nfs_request_mutex);
+       lck_mtx_unlock(&nfs_request_mutex);
 }
 
 /*
@@ -5226,11 +5281,11 @@ nfs_request_timer(__unused void *param0, __unused void *param1)
        TAILQ_INIT(&nfs_mount_poke_queue);
 
 restart:
-       lck_mtx_lock(nfs_request_mutex);
+       lck_mtx_lock(&nfs_request_mutex);
        req = TAILQ_FIRST(&nfs_reqq);
        if (req == NULL) {      /* no requests - turn timer off */
                nfs_request_timer_on = 0;
-               lck_mtx_unlock(nfs_request_mutex);
+               lck_mtx_unlock(&nfs_request_mutex);
                return;
        }
 
@@ -5360,7 +5415,7 @@ restart:
                                        TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
                                }
                                /* Release our lock state, so we can become a zombie */
-                               lck_mtx_unlock(nfs_request_mutex);
+                               lck_mtx_unlock(&nfs_request_mutex);
 
                                /*
                                 * Note nfs_mount_make zombie(nmp) must be
@@ -5368,7 +5423,7 @@ restart:
                                 * work we release nm_lock in
                                 * nfs_make_mount_zombie with out acquiring any
                                 * other locks. (Later, in nfs_mount_zombie we
-                                * will acquire nfs_request_mutex, r_mtx,
+                                * will acquire &nfs_request_mutex, r_mtx,
                                 * nm_lock in that order). So we should not be
                                 * introducing deadlock here. We take a reference
                                 * on the mount so that its still there when we
@@ -5469,7 +5524,7 @@ restart:
                lck_mtx_unlock(&req->r_mtx);
        }
 
-       lck_mtx_unlock(nfs_request_mutex);
+       lck_mtx_unlock(&nfs_request_mutex);
 
        /* poke any sockets */
        while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
@@ -5496,6 +5551,7 @@ nfs_noremotehang(thread_t thd)
  * This is used to determine if we need to bail on a mount.
  * ETIMEDOUT is returned if there has been a soft timeout.
  * EINTR is returned if there is a signal pending that is not being ignored
+ * ESHUTDOWN is return if the system is in shutdown.
  * and the mount is interruptable, or if we are a thread that is in the process
  * of cancellation (also SIGKILL posted).
  */
@@ -5510,6 +5566,11 @@ nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocke
                return ENXIO;
        }
 
+       if (get_system_inshutdown()) {
+               NFS_SOCK_DBG("Shutdown in progress\n");
+               return ESHUTDOWN;
+       }
+
        if (req && (req->r_flags & R_SOFTTERM)) {
                return ETIMEDOUT; /* request has been terminated. */
        }
@@ -5875,8 +5936,7 @@ nfs_portmap_lookup(
        struct nfsm_chain nmreq, nmrep;
        mbuf_t mreq;
        int error = 0, ip, pmprog, pmvers, pmproc;
-       uint32_t ualen = 0;
-       uint32_t port;
+       uint32_t ualen = 0, scopeid = 0, port32;
        uint64_t xid = 0;
        char uaddr[MAX_IPv6_STR_LEN + 16];
 
@@ -5951,9 +6011,13 @@ tryagain:
 
        /* grab port from portmap response */
        if (ip == 4) {
-               nfsm_chain_get_32(error, &nmrep, port);
+               nfsm_chain_get_32(error, &nmrep, port32);
                if (!error) {
-                       ((struct sockaddr_in*)sa)->sin_port = htons(port);
+                       if (NFS_PORT_INVALID(port32)) {
+                               error = EBADRPC;
+                       } else {
+                               ((struct sockaddr_in*)sa)->sin_port = htons((in_port_t)port32);
+                       }
                }
        } else {
                /* get uaddr string and convert to sockaddr */
@@ -5976,9 +6040,16 @@ tryagain:
                                NFS_SOCK_DBG("Got uaddr %s\n", uaddr);
                                if (!error) {
                                        uaddr[ualen] = '\0';
+                                       if (ip == 6) {
+                                               scopeid = ((struct sockaddr_in6*)saddr)->sin6_scope_id;
+                                       }
                                        if (!nfs_uaddr2sockaddr(uaddr, saddr)) {
                                                error = EIO;
                                        }
+                                       if (ip == 6 && scopeid != ((struct sockaddr_in6*)saddr)->sin6_scope_id) {
+                                               NFS_SOCK_DBG("Setting scope_id from %u to %u\n", ((struct sockaddr_in6*)saddr)->sin6_scope_id, scopeid);
+                                               ((struct sockaddr_in6*)saddr)->sin6_scope_id = scopeid;
+                                       }
                                }
                        }
                }
@@ -6038,6 +6109,7 @@ nfs_msg(thread_t thd,
 #define NFS_SQUISH_SHUTDOWN             0x1000          /* Squish all mounts on shutdown. Currently not implemented */
 
 uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK;
+uint32_t nfs_tcp_sockbuf = 128 * 1024;          /* Default value of tcp_sendspace and tcp_recvspace */
 int32_t nfs_is_mobile;
 
 #define NFS_SQUISHY_DEADTIMEOUT         8       /* Dead time out for squishy mounts */
@@ -6635,9 +6707,9 @@ dorecs:
                int wake = (slp->ns_flag & SLP_WORKTODO);
                lck_rw_done(&slp->ns_rwlock);
                if (wake && nfsd_thread_count) {
-                       lck_mtx_lock(nfsd_mutex);
+                       lck_mtx_lock(&nfsd_mutex);
                        nfsrv_wakenfsd(slp);
-                       lck_mtx_unlock(nfsd_mutex);
+                       lck_mtx_unlock(&nfsd_mutex);
                }
        }
 }
@@ -6652,7 +6724,8 @@ nfsrv_getstream(struct nfsrv_sock *slp, int waitflag)
 {
        mbuf_t m;
        char *cp1, *cp2, *mdata;
-       int len, mlen, error;
+       int error;
+       size_t len, mlen;
        mbuf_t om, m2, recm;
        u_int32_t recmark;
 
@@ -6814,11 +6887,7 @@ nfsrv_dorec(
        if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) {
                return ENOBUFS;
        }
-       MALLOC_ZONE(nd, struct nfsrv_descript *,
-           sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK);
-       if (!nd) {
-               return ENOMEM;
-       }
+       nd = zalloc(nfsrv_descript_zone);
        m = slp->ns_rec;
        slp->ns_rec = mbuf_nextpkt(m);
        if (slp->ns_rec) {
@@ -6849,7 +6918,7 @@ nfsrv_dorec(
                if (nd->nd_gss_context) {
                        nfs_gss_svc_ctx_deref(nd->nd_gss_context);
                }
-               FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC);
+               NFS_ZFREE(nfsrv_descript_zone, nd);
                return error;
        }
        nd->nd_mrep = NULL;
@@ -6872,7 +6941,7 @@ nfsrv_getreq(struct nfsrv_descript *nd)
        int error = 0;
        uid_t user_id;
        gid_t group_id;
-       int ngroups;
+       short ngroups;
        uint32_t val;
 
        nd->nd_cr = NULL;
@@ -6964,7 +7033,7 @@ nfsrv_getreq(struct nfsrv_descript *nd)
                        }
                }
                nfsmout_if(error);
-               ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
+               ngroups = (len >= NGROUPS) ? NGROUPS : (short)(len + 1);
                if (ngroups > 1) {
                        nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups);
                }