]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/nfs/nfs_socket.c
xnu-1504.3.12.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_socket.c
index 197a33ca98e49146bf8a9680e2ea5fae7ec8d287..ee1a6167988c030c9415dd721746cc413182e3c2 100644 (file)
@@ -1,31 +1,29 @@
 /*
- * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This file contains Original Code and/or Modifications of Original Code 
- * as defined in and that are subject to the Apple Public Source License 
- * Version 2.0 (the 'License'). You may not use this file except in 
- * compliance with the License.  The rights granted to you under the 
- * License may not be used to create, or enable the creation or 
- * redistribution of, unlawful or unlicensed copies of an Apple operating 
- * system, or to circumvent, violate, or enable the circumvention or 
- * violation of, any terms of an Apple operating system software license 
- * agreement.
- *
- * Please obtain a copy of the License at 
- * http://www.opensource.apple.com/apsl/ and read it before using this 
- * file.
- *
- * The Original Code and all software distributed under the License are 
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
- * Please see the License for the specific language governing rights and 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
  * limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ * 
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
 /*
 #include <sys/socket.h>
 #include <sys/syslog.h>
 #include <sys/tprintf.h>
-#include <sys/uio_internal.h>
 #include <libkern/OSAtomic.h>
 
 #include <sys/time.h>
 #include <kern/clock.h>
 #include <kern/task.h>
 #include <kern/thread.h>
+#include <kern/thread_call.h>
 #include <sys/user.h>
 
 #include <netinet/in.h>
 #include <nfs/nfs.h>
 #include <nfs/xdr_subs.h>
 #include <nfs/nfsm_subs.h>
+#include <nfs/nfs_gss.h>
 #include <nfs/nfsmount.h>
 #include <nfs/nfsnode.h>
-#include <nfs/nfsrtt.h>
 
-#include <sys/kdebug.h>
+/* XXX */
+boolean_t      current_thread_aborted(void);
+kern_return_t  thread_terminate(thread_t);
+
 
-#define FSDBG(A, B, C, D, E) \
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_NONE, \
-               (int)(B), (int)(C), (int)(D), (int)(E), 0)
-#define FSDBG_TOP(A, B, C, D, E) \
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_START, \
-               (int)(B), (int)(C), (int)(D), (int)(E), 0)
-#define FSDBG_BOT(A, B, C, D, E) \
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_END, \
-               (int)(B), (int)(C), (int)(D), (int)(E), 0)
+#if NFSSERVER
+int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */
+
+int nfsrv_getstream(struct nfsrv_sock *,int);
+int nfsrv_getreq(struct nfsrv_descript *);
+extern int nfsv3_procid[NFS_NPROCS];
+#endif /* NFSSERVER */
+
+#if NFSCLIENT
+
+int    nfs_reconnect(struct nfsmount *);
+int    nfs_connect_setup(struct nfsmount *);
+void   nfs_mount_sock_thread(void *, wait_result_t);
+void   nfs_udp_rcv(socket_t, void*, int);
+void   nfs_tcp_rcv(socket_t, void*, int);
+void   nfs_sock_poke(struct nfsmount *);
+void   nfs_request_match_reply(struct nfsmount *, mbuf_t);
+void   nfs_reqdequeue(struct nfsreq *);
+void   nfs_reqbusy(struct nfsreq *);
+struct nfsreq *nfs_reqnext(struct nfsreq *);
+int    nfs_wait_reply(struct nfsreq *);
+void   nfs_softterm(struct nfsreq *);
+
+#ifdef NFS_SOCKET_DEBUGGING
+#define NFS_SOCK_DBG(X)        printf X
+#else
+#define NFS_SOCK_DBG(X)
+#endif
 
 /*
  * Estimate rto for an nfs rpc sent via. an unreliable datagram.
          ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
 #define        NFS_SRTT(r)     (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
 #define        NFS_SDRTT(r)    (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
-/*
- * External data, mostly RPC constants in XDR form
- */
-extern u_long rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers, rpc_auth_unix,
-       rpc_msgaccepted, rpc_call, rpc_autherr,
-       rpc_auth_kerb;
-extern u_long nfs_prog;
-extern struct nfsstats nfsstats;
-extern int nfsv3_procid[NFS_NPROCS];
-extern int nfs_ticks;
-extern u_long nfs_xidwrap;
 
 /*
  * Defines which timer to use for the procnum.
@@ -177,363 +186,296 @@ static int proct[NFS_NPROCS] = {
 #define        NFS_CWNDSCALE   256
 #define        NFS_MAXCWND     (NFS_CWNDSCALE * 32)
 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
-int nfsrtton = 0;
-struct nfsrtt nfsrtt;
-
-static int     nfs_rcvlock(struct nfsreq *);
-static void    nfs_rcvunlock(struct nfsreq *);
-static int     nfs_receive(struct nfsreq *rep, mbuf_t *mp);
-static int     nfs_reconnect(struct nfsreq *rep);
-static void    nfs_repdequeue(struct nfsreq *rep);
-
-/* XXX */
-boolean_t      current_thread_aborted(void);
-kern_return_t  thread_terminate(thread_t);
-
-#ifndef NFS_NOSERVER 
-static int     nfsrv_getstream(struct nfssvc_sock *,int);
-
-int (*nfsrv3_procs[NFS_NPROCS])(struct nfsrv_descript *nd,
-                                   struct nfssvc_sock *slp,
-                                   proc_t procp,
-                                   mbuf_t *mreqp) = {
-       nfsrv_null,
-       nfsrv_getattr,
-       nfsrv_setattr,
-       nfsrv_lookup,
-       nfsrv3_access,
-       nfsrv_readlink,
-       nfsrv_read,
-       nfsrv_write,
-       nfsrv_create,
-       nfsrv_mkdir,
-       nfsrv_symlink,
-       nfsrv_mknod,
-       nfsrv_remove,
-       nfsrv_rmdir,
-       nfsrv_rename,
-       nfsrv_link,
-       nfsrv_readdir,
-       nfsrv_readdirplus,
-       nfsrv_statfs,
-       nfsrv_fsinfo,
-       nfsrv_pathconf,
-       nfsrv_commit,
-       nfsrv_noop
-};
-#endif /* NFS_NOSERVER */
-
-
-/*
- * attempt to bind a socket to a reserved port
- */
-static int
-nfs_bind_resv(struct nfsmount *nmp)
-{
-       socket_t so = nmp->nm_so;
-       struct sockaddr_in sin;
-       int error;
-       u_short tport;
-
-       if (!so)
-               return (EINVAL);
-
-       sin.sin_len = sizeof (struct sockaddr_in);
-       sin.sin_family = AF_INET;
-       sin.sin_addr.s_addr = INADDR_ANY;
-       tport = IPPORT_RESERVED - 1;
-       sin.sin_port = htons(tport);
-
-       while (((error = sock_bind(so, (struct sockaddr *) &sin)) == EADDRINUSE) &&
-              (--tport > IPPORT_RESERVED / 2))
-               sin.sin_port = htons(tport);
-       return (error);
-}
-
-/*
- * variables for managing the nfs_bind_resv_thread
- */
-int nfs_resv_mounts = 0;
-static int nfs_bind_resv_thread_state = 0;
-#define NFS_BIND_RESV_THREAD_STATE_INITTED     1
-#define NFS_BIND_RESV_THREAD_STATE_RUNNING     2
-lck_grp_t *nfs_bind_resv_lck_grp;
-lck_grp_attr_t *nfs_bind_resv_lck_grp_attr;
-lck_attr_t *nfs_bind_resv_lck_attr;
-lck_mtx_t *nfs_bind_resv_mutex;
-struct nfs_bind_resv_request {
-       TAILQ_ENTRY(nfs_bind_resv_request) brr_chain;
-       struct nfsmount *brr_nmp;
-       int brr_error;
-};
-static TAILQ_HEAD(, nfs_bind_resv_request) nfs_bind_resv_request_queue;
-
-/*
- * thread to handle any reserved port bind requests
- */
-static void
-nfs_bind_resv_thread(void)
-{
-       struct nfs_bind_resv_request *brreq;
-
-       nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_RUNNING;
-
-       while (nfs_resv_mounts > 0) {
-               lck_mtx_lock(nfs_bind_resv_mutex);
-               while ((brreq = TAILQ_FIRST(&nfs_bind_resv_request_queue))) {
-                       TAILQ_REMOVE(&nfs_bind_resv_request_queue, brreq, brr_chain);
-                       lck_mtx_unlock(nfs_bind_resv_mutex);
-                       brreq->brr_error = nfs_bind_resv(brreq->brr_nmp);
-                       wakeup(brreq);
-                       lck_mtx_lock(nfs_bind_resv_mutex);
-               }
-               msleep((caddr_t)&nfs_bind_resv_request_queue,
-                               nfs_bind_resv_mutex, PSOCK | PDROP,
-                               "nfs_bind_resv_request_queue", 0);
-       }
-
-       nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_INITTED;
-       (void) thread_terminate(current_thread());
-}
-
-int
-nfs_bind_resv_thread_wake(void)
-{
-       if (nfs_bind_resv_thread_state < NFS_BIND_RESV_THREAD_STATE_RUNNING)
-               return (EIO);
-       wakeup(&nfs_bind_resv_request_queue);
-       return (0);
-}
-
-/*
- * underprivileged procs call this to request nfs_bind_resv_thread
- * to perform the reserved port binding for them.
- */
-static int
-nfs_bind_resv_nopriv(struct nfsmount *nmp)
-{
-       struct nfs_bind_resv_request brreq;
-       int error;
-
-       if (nfs_bind_resv_thread_state < NFS_BIND_RESV_THREAD_STATE_RUNNING) {
-               if (nfs_bind_resv_thread_state < NFS_BIND_RESV_THREAD_STATE_INITTED) {
-                       nfs_bind_resv_lck_grp_attr = lck_grp_attr_alloc_init();
-                       lck_grp_attr_setstat(nfs_bind_resv_lck_grp_attr);
-                       nfs_bind_resv_lck_grp = lck_grp_alloc_init("nfs_bind_resv", nfs_bind_resv_lck_grp_attr);
-                       nfs_bind_resv_lck_attr = lck_attr_alloc_init();
-                       nfs_bind_resv_mutex = lck_mtx_alloc_init(nfs_bind_resv_lck_grp, nfs_bind_resv_lck_attr);
-                       TAILQ_INIT(&nfs_bind_resv_request_queue);
-                       nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_INITTED;
-               }
-               kernel_thread(kernel_task, nfs_bind_resv_thread);
-               nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_RUNNING;
-       }
-
-       brreq.brr_nmp = nmp;
-       brreq.brr_error = 0;
-
-       lck_mtx_lock(nfs_bind_resv_mutex);
-       TAILQ_INSERT_TAIL(&nfs_bind_resv_request_queue, &brreq, brr_chain);
-       lck_mtx_unlock(nfs_bind_resv_mutex);
-
-       error = nfs_bind_resv_thread_wake();
-       if (error) {
-               TAILQ_REMOVE(&nfs_bind_resv_request_queue, &brreq, brr_chain);
-               /* Note: we might be able to simply restart the thread */
-               return (error);
-       }
-
-       tsleep((caddr_t)&brreq, PSOCK, "nfsbindresv", 0);
-
-       return (brreq.brr_error);
-}
 
 /*
- * Initialize sockets and congestion for a new NFS connection.
- * We do not free the sockaddr if error.
+ * Initialize socket state and perform setup for a new NFS connection.
  */
 int
-nfs_connect(
-       struct nfsmount *nmp,
-       __unused struct nfsreq *rep)
+nfs_connect(struct nfsmount *nmp, int verbose)
 {
        socket_t so;
-       int error, rcvreserve, sndreserve;
+       int error, on = 1, proto;
+       sock_upcall upcall;
        struct sockaddr *saddr;
+       struct sockaddr_in sin;
        struct timeval timeo;
 
-       nmp->nm_so = 0;
+       lck_mtx_lock(&nmp->nm_lock);
+       nmp->nm_sockflags |= NMSOCK_CONNECTING;
        saddr = mbuf_data(nmp->nm_nam);
+       upcall = (nmp->nm_sotype == SOCK_STREAM) ? nfs_tcp_rcv : nfs_udp_rcv;
+       lck_mtx_unlock(&nmp->nm_lock);
        error = sock_socket(saddr->sa_family, nmp->nm_sotype,
-                                               nmp->nm_soproto, 0, 0, &nmp->nm_so);
-       if (error) {
+                           nmp->nm_soproto, upcall, nmp, &nmp->nm_so);
+       if (error)
                goto bad;
-       }
+       lck_mtx_lock(&nmp->nm_lock);
        so = nmp->nm_so;
 
        /*
         * Some servers require that the client port be a reserved port number.
         */
        if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
-               proc_t p;
-               /*
-                * sobind() requires current_proc() to have superuser privs.
-                * If this bind is part of a reconnect, and the current proc
-                * doesn't have superuser privs, we hand the sobind() off to
-                * a kernel thread to process.
-                */
-               if ((nmp->nm_state & NFSSTA_MOUNTED) &&
-                   (p = current_proc()) && suser(kauth_cred_get(), 0)) {
-                       /* request nfs_bind_resv_thread() to do bind */
-                       error = nfs_bind_resv_nopriv(nmp);
-               } else {
-                       error = nfs_bind_resv(nmp);
+               int portrange = IP_PORTRANGE_LOW;
+               error = sock_setsockopt(so, IPPROTO_IP, IP_PORTRANGE, &portrange, sizeof(portrange));
+               if (!error) {   /* bind now to check for failure */
+                       sin.sin_len = sizeof (struct sockaddr_in);
+                       sin.sin_family = AF_INET;
+                       sin.sin_addr.s_addr = INADDR_ANY;
+                       sin.sin_port = 0;
+                       error = sock_bind(so, (struct sockaddr *) &sin);
                }
-               if (error)
+               if (error) {
+                       lck_mtx_unlock(&nmp->nm_lock);
                        goto bad;
+               }
        }
 
        /*
         * Protocols that do not require connections may be optionally left
-        * unconnected for servers that reply from a port other than NFS_PORT.
+        * unconnected for servers that reply from a different address/port.
         */
        if (nmp->nm_flag & NFSMNT_NOCONN) {
                if (nmp->nm_sotype == SOCK_STREAM) {
                        error = ENOTCONN;
+                       lck_mtx_unlock(&nmp->nm_lock);
                        goto bad;
                }
        } else {
-               struct timeval  tv;
-               tv.tv_sec = 2;
-               tv.tv_usec = 0;
+               int tocnt = 0, optlen = sizeof(error);
+               struct timespec ts = { 1, 0 };
+
+               lck_mtx_unlock(&nmp->nm_lock);
                error = sock_connect(so, mbuf_data(nmp->nm_nam), MSG_DONTWAIT);
-               if (error && error != EINPROGRESS) {
+               if (error && (error != EINPROGRESS))
                        goto bad;
-               }
-               
-               while ((error = sock_connectwait(so, &tv)) == EINPROGRESS) {
-                       if (rep && (error = nfs_sigintr(nmp, rep, rep->r_procp))) {
-                               goto bad;
+               lck_mtx_lock(&nmp->nm_lock);
+               while (!sock_isconnected(so)) {
+                       nfs_mount_check_dead_timeout(nmp);
+                       if ((tocnt++ == 30) && verbose) /* log a warning if connect is taking a while */
+                               log(LOG_INFO, "nfs_connect: socket connect taking a while for %s\n",
+                                       vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+                       /* check for error on socket */
+                       sock_getsockopt(so, SOL_SOCKET, SO_ERROR, &error, &optlen);
+                       if (error) {
+                               if (verbose)
+                                       log(LOG_INFO, "nfs_connect: socket error %d for %s\n",
+                                               error, vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+                               break;
                        }
+                       /* abort if this is taking too long or we're unmounting */
+                       if ((tocnt > 120) || (nmp->nm_sockflags & NMSOCK_UNMOUNT)) {
+                               error = ENOTCONN;
+                               break;
+                       }
+                       if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))
+                               break;
+                       msleep(&nmp->nm_so, &nmp->nm_lock, PSOCK, "nfs_socket_connect", &ts);
+               }
+               if ((tocnt > 30) && verbose)
+                       log(LOG_INFO, "nfs_connect: socket connect %s for %s\n",
+                               error ? "aborted" : "completed",
+                               vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+               if (error) {
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       goto bad;
                }
        }
-       
+
        /*
-        * Always time out on recieve, this allows us to reconnect the
-        * socket to deal with network changes.
+        * Set socket send/receive timeouts
+        * - Receive timeout shouldn't matter because all receives are performed
+        *   in the socket upcall non-blocking.
+        * - Send timeout should allow us to react to a blocked socket.
+        *   Soft mounts will want to abort sooner.
         */
        timeo.tv_usec = 0;
-       timeo.tv_sec = 2;
-       error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
-       if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) {
-               timeo.tv_sec = 5;
-       } else {
-               timeo.tv_sec = 0;
+       timeo.tv_sec = (nmp->nm_flag & NFSMNT_SOFT) ? 10 : 60;
+       error |= sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+       error |= sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+       if (error) {
+               log(LOG_INFO, "nfs_connect: socket timeout setting errors for %s\n",
+                       vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+               error = 0;
        }
-       error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
-       
-       if (nmp->nm_sotype == SOCK_DGRAM) {
-               sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3;
-               rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) *
-                       (nmp->nm_readahead > 0 ? nmp->nm_readahead + 1 : 2);
-       } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
-               sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3;
-               rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) *
-                       (nmp->nm_readahead > 0 ? nmp->nm_readahead + 1 : 2);
-       } else {
-               int proto;
-               int on = 1;
-               
-               sock_gettype(so, NULL, NULL, &proto);
-               if (nmp->nm_sotype != SOCK_STREAM)
-                       panic("nfscon sotype");
 
-               // Assume that SOCK_STREAM always requires a connection
+       if (nmp->nm_sotype == SOCK_STREAM) {
+               /* Assume that SOCK_STREAM always requires a connection */
                sock_setsockopt(so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
-               
-               if (proto == IPPROTO_TCP) {
+               /* set nodelay for TCP */
+               sock_gettype(so, NULL, NULL, &proto);
+               if (proto == IPPROTO_TCP)
                        sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
-               }
-
-               sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + sizeof (u_long)) * 3;
-               rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + sizeof (u_long)) *
-                               (nmp->nm_readahead > 0 ? nmp->nm_readahead + 1 : 2);
        }
 
-       if (sndreserve > NFS_MAXSOCKBUF)
-               sndreserve = NFS_MAXSOCKBUF;
-       if (rcvreserve > NFS_MAXSOCKBUF)
-               rcvreserve = NFS_MAXSOCKBUF;
-       error = sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &sndreserve, sizeof(sndreserve));
-       if (error) {
-               goto bad;
+       if (nmp->nm_sotype == SOCK_DGRAM) { /* set socket buffer sizes for UDP */
+               int reserve = NFS_UDPSOCKBUF;
+               error |= sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
+               error |= sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
+               if (error) {
+                       log(LOG_INFO, "nfs_connect: socket buffer setting errors for %s\n",
+                               vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+                       error = 0;
+               }
        }
-       error = sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &rcvreserve, sizeof(rcvreserve));
+
+       /* set SO_NOADDRERR to detect network changes ASAP */
+       error = sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
        if (error) {
+               lck_mtx_unlock(&nmp->nm_lock);
                goto bad;
        }
+       /* just playin' it safe */
+       sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
 
-       sock_nointerrupt(so, 1);
+       if (!(nmp->nm_flag & NFSMNT_INT))
+               sock_nointerrupt(so, 1);
 
-       /* Initialize other non-zero congestion variables */
+       /* Initialize socket state variables */
        nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
                nmp->nm_srtt[3] = (NFS_TIMEO << 3);
        nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
                nmp->nm_sdrtt[3] = 0;
-       nmp->nm_cwnd = NFS_MAXCWND / 2;     /* Initial send window */
-       nmp->nm_sent = 0;
-       FSDBG(529, nmp, nmp->nm_state, nmp->nm_soflags, nmp->nm_cwnd);
-       nmp->nm_timeouts = 0;
-       return (0);
-
+       if (nmp->nm_sotype == SOCK_DGRAM) {
+               /* XXX do we really want to reset this on each reconnect? */
+               nmp->nm_cwnd = NFS_MAXCWND / 2;     /* Initial send window */
+               nmp->nm_sent = 0;
+       } else if (nmp->nm_sotype == SOCK_STREAM) {
+               nmp->nm_markerleft = sizeof(nmp->nm_fragleft);
+               nmp->nm_fragleft = nmp->nm_reclen = 0;
+               nmp->nm_timeouts = 0;
+       }
+       nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
+       nmp->nm_sockflags |= NMSOCK_SETUP;
+       FSDBG(529, nmp, nmp->nm_state, nmp->nm_flag, nmp->nm_cwnd);
+       lck_mtx_unlock(&nmp->nm_lock);
+       error = nfs_connect_setup(nmp);
 bad:
-       nfs_disconnect(nmp);
+       lck_mtx_lock(&nmp->nm_lock);
+       nmp->nm_sockflags &= ~(NMSOCK_CONNECTING|NMSOCK_SETUP);
+       if (!error) {
+               nmp->nm_sockflags |= NMSOCK_READY;
+               wakeup(&nmp->nm_sockflags);
+       }
+       lck_mtx_unlock(&nmp->nm_lock);
+       return (error);
+}
+
+/* setup & confirm socket connection is functional */
+int
+nfs_connect_setup(struct nfsmount *nmp)
+{
+       struct nfsm_chain nmreq, nmrep;
+       int error = 0, status;
+       u_int64_t xid;
+
+       if (nmp->nm_vers >= NFS_VER4) {
+               error = nfs4_setclientid(nmp);
+               if (error)
+                       return (error);
+               error = nfs4_renew(nmp, R_SETUP);
+               if ((error == NFSERR_ADMIN_REVOKED) ||
+                   (error == NFSERR_EXPIRED) ||
+                   (error == NFSERR_LEASE_MOVED) ||
+                   (error == NFSERR_STALE_CLIENTID)) {
+                       lck_mtx_lock(&nmp->nm_lock);
+                       nmp->nm_state |= NFSSTA_RECOVER;
+                       lck_mtx_unlock(&nmp->nm_lock);
+               }
+       } else {
+               /* verify connection's OK by sending a NULL request */
+               nfsm_chain_null(&nmreq);
+               nfsm_chain_null(&nmrep);
+               nfsm_chain_build_alloc_init(error, &nmreq, 0);
+               nfsm_chain_build_done(error, &nmreq);
+               nfsmout_if(error);
+               error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC_NULL,
+                               current_thread(), NULL, R_SETUP, &nmrep, &xid, &status);
+               if (!error)
+                       error = status;
+nfsmout:
+               nfsm_chain_cleanup(&nmreq);
+               nfsm_chain_cleanup(&nmrep);
+       }
        return (error);
 }
 
 /*
- * Reconnect routine:
- * Called when a connection is broken on a reliable protocol.
- * - clean up the old socket
+ * NFS socket reconnect routine:
+ * Called when a connection is broken.
+ * - disconnect the old socket
  * - nfs_connect() again
  * - set R_MUSTRESEND for all outstanding requests on mount point
  * If this fails the mount point is DEAD!
- * nb: Must be called with the nfs_sndlock() set on the mount point.
  */
-static int
-nfs_reconnect(struct nfsreq *rep)
+int
+nfs_reconnect(struct nfsmount *nmp)
 {
-       struct nfsreq *rp;
-       struct nfsmount *nmp = rep->r_nmp;
-       int error;
+       struct nfsreq *rq;
+       struct timeval now;
+       thread_t thd = current_thread();
+       int error, wentdown = 0, verbose = 1;
+       time_t lastmsg;
+
+       microuptime(&now);
+       lastmsg = now.tv_sec - (nmp->nm_tprintf_delay - nmp->nm_tprintf_initial_delay);
 
        nfs_disconnect(nmp);
-       while ((error = nfs_connect(nmp, rep))) {
+
+       while ((error = nfs_connect(nmp, verbose))) {
+               verbose = 0;
+               nfs_disconnect(nmp);
                if (error == EINTR || error == ERESTART)
                        return (EINTR);
                if (error == EIO)
                        return (EIO);
-               nfs_down(rep->r_nmp, rep->r_procp, error, NFSSTA_TIMEO,
-                       "can not connect");
-               rep->r_flags |= R_TPRINTFMSG;
+               microuptime(&now);
+               if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) {
+                       lastmsg = now.tv_sec;
+                       nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect");
+                       wentdown = 1;
+               }
+               lck_mtx_lock(&nmp->nm_lock);
                if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
                        /* we're not yet completely mounted and */
                        /* we can't reconnect, so we fail */
+                       lck_mtx_unlock(&nmp->nm_lock);
                        return (error);
                }
-               if ((error = nfs_sigintr(rep->r_nmp, rep, rep->r_procp)))
+               nfs_mount_check_dead_timeout(nmp);
+               if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       return (error);
+               }
+               lck_mtx_unlock(&nmp->nm_lock);
+               tsleep(&lbolt, PSOCK, "nfs_reconnect_delay", 0);
+               if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
                        return (error);
-               tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0);
        }
 
+       if (wentdown)
+               nfs_up(nmp, thd, NFSSTA_TIMEO, "connected");
+
        /*
-        * Loop through outstanding request list and fix up all requests
-        * on old socket.
+        * Loop through outstanding request list and mark all requests
+        * as needing a resend.  (Though nfs_need_reconnect() probably
+        * marked them all already.)
         */
-       TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
-               if (rp->r_nmp == nmp)
-                       rp->r_flags |= R_MUSTRESEND;
+       lck_mtx_lock(nfs_request_mutex);
+       TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
+               if (rq->r_nmp == nmp) {
+                       lck_mtx_lock(&rq->r_mtx);
+                       if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
+                               rq->r_flags |= R_MUSTRESEND;
+                               rq->r_rtt = -1;
+                               wakeup(rq);
+                               if ((rq->r_flags & (R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
+                                       nfs_asyncio_resend(rq);
+                       }
+                       lck_mtx_unlock(&rq->r_mtx);
+               }
        }
+       lck_mtx_unlock(nfs_request_mutex);
        return (0);
 }
 
@@ -545,1443 +487,2989 @@ nfs_disconnect(struct nfsmount *nmp)
 {
        socket_t so;
 
+       lck_mtx_lock(&nmp->nm_lock);
+       if ((nmp->nm_sotype == SOCK_STREAM) && nmp->nm_m) {
+               mbuf_freem(nmp->nm_m);
+               nmp->nm_m = nmp->nm_mlast = NULL;
+       }
        if (nmp->nm_so) {
                so = nmp->nm_so;
-               nmp->nm_so = 0;
-               sock_shutdown(so, 2);
+               nmp->nm_so = NULL;
+               lck_mtx_unlock(&nmp->nm_lock);
+               sock_shutdown(so, SHUT_RDWR);
                sock_close(so);
+       } else {
+               lck_mtx_unlock(&nmp->nm_lock);
        }
 }
 
 /*
- * This is the nfs send routine. For connection based socket types, it
- * must be called with an nfs_sndlock() on the socket.
- * "rep == NULL" indicates that it has been called from a server.
- * For the client side:
- * - return EINTR if the RPC is terminated, 0 otherwise
- * - set R_MUSTRESEND if the send fails for any reason
- * - do any cleanup required by recoverable socket errors (???)
- * For the server side:
- * - return EINTR or ERESTART if interrupted by a signal
- * - return EPIPE if a connection is lost for connection based sockets (TCP...)
- * - do any cleanup required by recoverable socket errors (???)
+ * mark an NFS mount as needing a reconnect/resends.
  */
-int
-nfs_send(so, nam, top, rep)
-       socket_t so;
-       mbuf_t nam;
-       mbuf_t top;
-       struct nfsreq *rep;
+void
+nfs_need_reconnect(struct nfsmount *nmp)
 {
-       struct sockaddr *sendnam;
-       int error, error2, sotype, flags;
-       u_long xidqueued = 0;
-       struct nfsreq *rp;
-       char savenametolog[MAXPATHLEN];
-       struct msghdr msg;
-       
-       if (rep) {
-               error = nfs_sigintr(rep->r_nmp, rep, rep->r_procp);
-               if (error) {
-                       mbuf_freem(top);
-                       return (error);
-               }
-               if ((so = rep->r_nmp->nm_so) == NULL) {
-                       rep->r_flags |= R_MUSTRESEND;
-                       mbuf_freem(top);
-                       return (0);
-               }
-               rep->r_flags &= ~R_MUSTRESEND;
-               TAILQ_FOREACH(rp, &nfs_reqq, r_chain)
-                       if (rp == rep)
-                               break;
-               if (rp)
-                       xidqueued = rp->r_xid;
-       }
-       sock_gettype(so, NULL, &sotype, NULL);
-       if ((sotype == SOCK_STREAM) || (sock_isconnected(so)) ||
-           (nam == 0))
-               sendnam = (struct sockaddr *)0;
-       else
-               sendnam = mbuf_data(nam);
+       struct nfsreq *rq;
 
-       if (sotype == SOCK_SEQPACKET)
-               flags = MSG_EOR;
-       else
-               flags = 0;
+       lck_mtx_lock(&nmp->nm_lock);
+       nmp->nm_sockflags &= ~(NMSOCK_READY|NMSOCK_SETUP);
+       lck_mtx_unlock(&nmp->nm_lock);
 
-       /* 
-        * Save the name here in case mount point goes away if we block.
-        * The name is using local stack and is large, but don't
-        * want to block if we malloc.
+       /*
+        * Loop through outstanding request list and
+        * mark all requests as needing a resend.
         */
-       if (rep)
-               strncpy(savenametolog,
-                       vfs_statfs(rep->r_nmp->nm_mountp)->f_mntfromname,
-                       MAXPATHLEN - 1);
-       bzero(&msg, sizeof(msg));
-       msg.msg_name = (caddr_t)sendnam;
-       msg.msg_namelen = sendnam == 0 ? 0 : sendnam->sa_len;
-       error = sock_sendmbuf(so, &msg, top, flags, NULL);
-
-       if (error) {
-               if (rep) {
-                       if (xidqueued) {
-                               TAILQ_FOREACH(rp, &nfs_reqq, r_chain)
-                                       if (rp == rep && rp->r_xid == xidqueued)
-                                               break;
-                               if (!rp)
-                                       panic("nfs_send: error %d xid %x gone",
-                                             error, xidqueued);
-                       }
-                       log(LOG_INFO, "nfs send error %d for server %s\n",
-                           error, savenametolog);
-                       /*
-                        * Deal with errors for the client side.
-                        */
-                       error2 = nfs_sigintr(rep->r_nmp, rep, rep->r_procp);
-                       if (error2) {
-                               error = error2;
-                       } else {
-                               rep->r_flags |= R_MUSTRESEND;
+       lck_mtx_lock(nfs_request_mutex);
+       TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
+               if (rq->r_nmp == nmp) {
+                       lck_mtx_lock(&rq->r_mtx);
+                       if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
+                               rq->r_flags |= R_MUSTRESEND;
+                               rq->r_rtt = -1;
+                               wakeup(rq);
+                               if ((rq->r_flags & (R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
+                                       nfs_asyncio_resend(rq);
                        }
-               } else
-                       log(LOG_INFO, "nfsd send error %d\n", error);
-
-               /*
-                * Handle any recoverable (soft) socket errors here. (???)
-                */
-               if (error != EINTR && error != ERESTART && error != EIO &&
-                       error != EWOULDBLOCK && error != EPIPE) {
-                       error = 0;
+                       lck_mtx_unlock(&rq->r_mtx);
                }
        }
-       return (error);
+       lck_mtx_unlock(nfs_request_mutex);
 }
 
 /*
- * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
- * done by soreceive(), but for SOCK_STREAM we must deal with the Record
- * Mark and consolidate the data into a new mbuf list.
- * nb: Sometimes TCP passes the data up to soreceive() in long lists of
- *     small mbufs.
- * For SOCK_STREAM we must be very careful to read an entire record once
- * we have read any of it, even if the system call has been interrupted.
+ * thread to handle miscellaneous async NFS socket work (reconnects/resends)
  */
-static int
-nfs_receive(struct nfsreq *rep, mbuf_t *mp)
+void
+nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
 {
-       socket_t so;
-       struct iovec_32 aio;
-       mbuf_t m, mlast;
-       u_long len, fraglen;
-       int error, error2, sotype;
-       proc_t p = current_proc();      /* XXX */
-       struct msghdr msg;
-       size_t rcvlen;
-       int lastfragment;
-
-       /*
-        * Set up arguments for soreceive()
-        */
-       *mp = NULL;
-       sotype = rep->r_nmp->nm_sotype;
-
-       /*
-        * For reliable protocols, lock against other senders/receivers
-        * in case a reconnect is necessary.
-        * For SOCK_STREAM, first get the Record Mark to find out how much
-        * more there is to get.
-        * We must lock the socket against other receivers
-        * until we have an entire rpc request/reply.
-        */
-       if (sotype != SOCK_DGRAM) {
-               error = nfs_sndlock(rep);
-               if (error)
-                       return (error);
-tryagain:
-               /*
-                * Check for fatal errors and resending request.
-                */
-               /*
-                * Ugh: If a reconnect attempt just happened, nm_so
-                * would have changed. NULL indicates a failed
-                * attempt that has essentially shut down this
-                * mount point.
-                */
-               if ((error = nfs_sigintr(rep->r_nmp, rep, p)) || rep->r_mrep) {
-                       nfs_sndunlock(rep);
-                       if (error)
-                               return (error);
-                       return (EINTR);
-               }
-               so = rep->r_nmp->nm_so;
-               if (!so) {
-                       error = nfs_reconnect(rep);
-                       if (error) {
-                               nfs_sndunlock(rep);
-                               return (error);
+       struct nfsmount *nmp = arg;
+       struct timespec ts = { 30, 0 };
+       thread_t thd = current_thread();
+       struct nfsreq *req;
+       struct timeval now;
+       int error, dofinish, force;
+       nfsnode_t np;
+       fhandle_t fh;
+       nfs_stateid dstateid;
+
+       lck_mtx_lock(&nmp->nm_lock);
+
+       while (!(nmp->nm_sockflags & NMSOCK_READY) ||
+              !TAILQ_EMPTY(&nmp->nm_resendq) ||
+              nmp->nm_deadto_start ||
+              ((nmp->nm_vers >= NFS_VER4) &&
+                       ((nmp->nm_state & NFSSTA_RECOVER) || !TAILQ_EMPTY(&nmp->nm_recallq))))
+       {
+               if (nmp->nm_sockflags & NMSOCK_UNMOUNT)
+                       break;
+               force = (nmp->nm_state & NFSSTA_FORCE);
+               /* do reconnect, if necessary */
+               if (!(nmp->nm_sockflags & NMSOCK_READY) && !force) {
+                       if (nmp->nm_reconnect_start <= 0) {
+                               microuptime(&now);
+                               nmp->nm_reconnect_start = now.tv_sec;
                        }
-                       goto tryagain;
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       NFS_SOCK_DBG(("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname));
+                       if (nfs_reconnect(nmp) == 0)
+                               nmp->nm_reconnect_start = 0;
+                       lck_mtx_lock(&nmp->nm_lock);
                }
-               while (rep->r_flags & R_MUSTRESEND) {
-                       error = mbuf_copym(rep->r_mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m);
-                       if (!error) {
-                               OSAddAtomic(1, (SInt32*)&nfsstats.rpcretries);
-                               error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
-                       }
-                       /*
-                        * we also hold rcv lock so rep is still
-                        * legit this point
-                        */
-                       if (error) {
-                               if (error == EINTR || error == ERESTART ||
-                                   (error = nfs_reconnect(rep))) {
-                                       nfs_sndunlock(rep);
-                                       return (error);
-                               }
-                               goto tryagain;
+               if ((nmp->nm_sockflags & NMSOCK_READY) &&
+                   (nmp->nm_state & NFSSTA_RECOVER) &&
+                   !(nmp->nm_sockflags & NMSOCK_UNMOUNT) && !force) {
+                       /* perform state recovery */
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       nfs4_recover(nmp);
+                       lck_mtx_lock(&nmp->nm_lock);
+               }
+               /* handle NFSv4 delegation recalls */
+               while ((nmp->nm_vers >= NFS_VER4) && !force &&
+                      (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) &&
+                      ((np = TAILQ_FIRST(&nmp->nm_recallq)))) {
+                       TAILQ_REMOVE(&nmp->nm_recallq, np, n_dlink);
+                       np->n_dlink.tqe_next = NFSNOLIST;
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       lck_mtx_lock(&np->n_openlock);
+                       dstateid = np->n_dstateid;
+                       if (np->n_openflags & N_DELEG_MASK) {
+                               fh.fh_len = np->n_fhsize;
+                               bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
+                               np->n_openflags &= ~N_DELEG_MASK;
+                               lck_mtx_unlock(&np->n_openlock);
+                               nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, thd, nmp->nm_mcred);
+                       } else {
+                               lck_mtx_unlock(&np->n_openlock);
                        }
+                       lck_mtx_lock(&nmp->nm_lock);
                }
-               nfs_sndunlock(rep);
-               if (sotype == SOCK_STREAM) {
-                       error = 0;
-                       len = 0;
-                       lastfragment = 0;
-                       mlast = NULL;
-                       while (!error && !lastfragment) {
-                               aio.iov_base = (uintptr_t) &fraglen;
-                               aio.iov_len = sizeof(u_long);
-                               bzero(&msg, sizeof(msg));
-                               msg.msg_iov = (struct iovec *) &aio;
-                               msg.msg_iovlen = 1;
-                               do {
-                                  error = sock_receive(so, &msg, MSG_WAITALL, &rcvlen);
-                                  if (!rep->r_nmp) /* if unmounted then bailout */
-                                       goto shutout;
-                                  if (error == EWOULDBLOCK && rep) {
-                                       error2 = nfs_sigintr(rep->r_nmp, rep, p);
-                                       if (error2)
-                                               error = error2;
-                                  }
-                               } while (error == EWOULDBLOCK);
-                               if (!error && rcvlen < aio.iov_len) {
-                                   /* only log a message if we got a partial word */
-                                   if (rcvlen != 0)
-                                           log(LOG_INFO,
-                                                "short receive (%d/%d) from nfs server %s\n",
-                                                rcvlen, sizeof(u_long),
-                                                vfs_statfs(rep->r_nmp->nm_mountp)->f_mntfromname);
-                                   error = EPIPE;
-                               }
-                               if (error)
-                                       goto errout;
-                               lastfragment = ntohl(fraglen) & 0x80000000;
-                               fraglen = ntohl(fraglen) & ~0x80000000;
-                               len += fraglen;
-                               /*
-                                * This is SERIOUS! We are out of sync with the sender
-                                * and forcing a disconnect/reconnect is all I can do.
-                                */
-                               if (len > NFS_MAXPACKET) {
-                                   log(LOG_ERR, "%s (%d) from nfs server %s\n",
-                                       "impossible RPC record length", len,
-                                       vfs_statfs(rep->r_nmp->nm_mountp)->f_mntfromname);
-                                   error = EFBIG;
-                                   goto errout;
-                               }
-
-                               m = NULL;
-                               do {
-                                   rcvlen = fraglen;
-                                   error = sock_receivembuf(so, NULL, &m, MSG_WAITALL, &rcvlen);
-                                   if (!rep->r_nmp) /* if unmounted then bailout */ {
-                                       goto shutout;
-                                   }
-                               } while (error == EWOULDBLOCK || error == EINTR ||
-                                        error == ERESTART);
-
-                               if (!error && fraglen > rcvlen) {
-                                   log(LOG_INFO,
-                                       "short receive (%d/%d) from nfs server %s\n",
-                                       rcvlen, fraglen,
-                                       vfs_statfs(rep->r_nmp->nm_mountp)->f_mntfromname);
-                                   error = EPIPE;
-                                   mbuf_freem(m);
-                               }
-                               if (!error) {
-                                       if (!*mp) {
-                                               *mp = m;
-                                               mlast = m;
-                                       } else {
-                                               error = mbuf_setnext(mlast, m);
-                                               if (error) {
-                                                       printf("nfs_receive: mbuf_setnext failed %d\n", error);
-                                                       mbuf_freem(m);
-                                               }
-                                       }
-                                       while (mbuf_next(mlast))
-                                               mlast = mbuf_next(mlast);
-                               }
+               /* do resends, if necessary/possible */
+               while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) || force) &&
+                      ((req = TAILQ_FIRST(&nmp->nm_resendq)))) {
+                       if (req->r_resendtime)
+                               microuptime(&now);
+                       while (req && !force && req->r_resendtime && (now.tv_sec < req->r_resendtime))
+                               req = TAILQ_NEXT(req, r_rchain);
+                       if (!req)
+                               break;
+                       TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+                       req->r_rchain.tqe_next = NFSREQNOLIST;
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       lck_mtx_lock(&req->r_mtx);
+                       if (req->r_error || req->r_nmrep.nmc_mhead) {
+                               dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
+                               req->r_flags &= ~R_RESENDQ;
+                               wakeup(req);
+                               lck_mtx_unlock(&req->r_mtx);
+                               if (dofinish)
+                                       nfs_asyncio_finish(req);
+                               lck_mtx_lock(&nmp->nm_lock);
+                               continue;
                        }
-               } else {
-                       bzero(&msg, sizeof(msg));
-                       do {
-                           rcvlen = 100000000;
-                           error = sock_receivembuf(so, &msg, mp, 0, &rcvlen);
-                           if (!rep->r_nmp) /* if unmounted then bailout */ {
-                               goto shutout;
-                           }   
-                           if (error == EWOULDBLOCK && rep) {
-                               error2 = nfs_sigintr(rep->r_nmp, rep, p);
-                               if (error2) {
-                                       return (error2);
+                       if ((req->r_flags & R_RESTART) || req->r_gss_ctx) {
+                               req->r_flags &= ~R_RESTART;
+                               req->r_resendtime = 0;
+                               lck_mtx_unlock(&req->r_mtx);
+                               /* async RPCs on GSS mounts need to be rebuilt and resent. */
+                               nfs_reqdequeue(req);
+                               if (req->r_gss_ctx) {
+                                       nfs_gss_clnt_rpcdone(req);
+                                       error = nfs_gss_clnt_args_restore(req);
+                                       if (error == ENEEDAUTH)
+                                               req->r_xid = 0;
                                }
-                           }
-                       } while (error == EWOULDBLOCK);
-
-                       if ((msg.msg_flags & MSG_EOR) == 0)
-                               printf("Egad!!\n");
-                       if (!error && *mp == NULL)
-                               error = EPIPE;
-                       len = rcvlen;
-               }
-errout:
-               if (error && error != EINTR && error != ERESTART) {
-                       mbuf_freem(*mp);
-                       *mp = NULL;
-                       if (error != EPIPE)
-                               log(LOG_INFO,
-                                   "receive error %d from nfs server %s\n", error,
-                                   vfs_statfs(rep->r_nmp->nm_mountp)->f_mntfromname);
-                       error = nfs_sndlock(rep);
-                       if (!error) {
-                               error = nfs_reconnect(rep);
+                               NFS_SOCK_DBG(("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
+                                       req->r_gss_ctx ? " gss" : "", req->r_procnum, req->r_xid,
+                                       req->r_flags, req->r_rtt));
+                               error = !req->r_nmp ? ENXIO : 0;        /* unmounted? */
+                               if (!error)
+                                       error = nfs_sigintr(nmp, req, req->r_thread, 0);
                                if (!error)
-                                       goto tryagain;
-                               nfs_sndunlock(rep);
+                                       error = nfs_request_add_header(req);
+                               if (!error)
+                                       error = nfs_request_send(req, 0);
+                               lck_mtx_lock(&req->r_mtx);
+                               if (req->r_flags & R_RESENDQ)
+                                       req->r_flags &= ~R_RESENDQ;
+                               if (error)
+                                       req->r_error = error;
+                               wakeup(req);
+                               dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
+                               lck_mtx_unlock(&req->r_mtx);
+                               if (dofinish)
+                                       nfs_asyncio_finish(req);
+                               lck_mtx_lock(&nmp->nm_lock);
+                               error = 0;
+                               continue;
                        }
-               }
-       } else {
-               /*
-                * We could have failed while rebinding the datagram socket
-                * so we need to attempt to rebind here.
-                */
-               if ((so = rep->r_nmp->nm_so) == NULL) {
-                       error = nfs_sndlock(rep);
+                       NFS_SOCK_DBG(("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
+                               req->r_procnum, req->r_xid, req->r_flags, req->r_rtt));
+                       error = !req->r_nmp ? ENXIO : 0;        /* unmounted? */
+                       if (!error)
+                               error = nfs_sigintr(nmp, req, req->r_thread, 0);
                        if (!error) {
-                               error = nfs_reconnect(rep);
-                               nfs_sndunlock(rep);
-                       }
-                       if (error)
-                               return (error);
-                       if (!rep->r_nmp) /* if unmounted then bailout */
-                               return (ENXIO);
-                       so = rep->r_nmp->nm_so;
-               }
-               bzero(&msg, sizeof(msg));
-               len = 0;
-               do {
-                       rcvlen = 1000000;
-                       error = sock_receivembuf(so, &msg, mp, 0, &rcvlen);
-                       if (!rep->r_nmp) /* if unmounted then bailout */
-                               goto shutout;
-                       if (error) {
-                               error2 = nfs_sigintr(rep->r_nmp, rep, p);
-                               if (error2) {
-                                       error = error2;
-                                       goto shutout;
-                               }
-                       }
-                       /* Reconnect for all errors.  We may be receiving
-                        * soft/hard/blocking errors because of a network
-                        * change.
-                        * XXX: we should rate limit or delay this
-                        * to once every N attempts or something.
-                        * although TCP doesn't seem to.
-                        */
-                       if (error) {
-                               error2 = nfs_sndlock(rep);
-                               if (!error2) {
-                                       error2 = nfs_reconnect(rep);
-                                       if (error2)
-                                               error = error2;
-                                       else if (!rep->r_nmp) /* if unmounted then bailout */
-                                               error = ENXIO;
-                                       else
-                                               so = rep->r_nmp->nm_so;
-                                       nfs_sndunlock(rep);
-                               } else {
-                                       error = error2;
+                               req->r_flags |= R_SENDING;
+                               lck_mtx_unlock(&req->r_mtx);
+                               error = nfs_send(req, 0);
+                               lck_mtx_lock(&req->r_mtx);
+                               if (!error) {
+                                       if (req->r_flags & R_RESENDQ)
+                                               req->r_flags &= ~R_RESENDQ;
+                                       wakeup(req);
+                                       lck_mtx_unlock(&req->r_mtx);
+                                       lck_mtx_lock(&nmp->nm_lock);
+                                       continue;
                                }
                        }
-               } while (error == EWOULDBLOCK);
-       }
-shutout:
-       if (error) {
-               mbuf_freem(*mp);
-               *mp = NULL;
+                       req->r_error = error;
+                       if (req->r_flags & R_RESENDQ)
+                               req->r_flags &= ~R_RESENDQ;
+                       wakeup(req);
+                       dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
+                       lck_mtx_unlock(&req->r_mtx);
+                       if (dofinish)
+                               nfs_asyncio_finish(req);
+                       lck_mtx_lock(&nmp->nm_lock);
+               }
+               if (nmp->nm_deadto_start)
+                       nfs_mount_check_dead_timeout(nmp);
+               if (force || (nmp->nm_state & NFSSTA_DEAD))
+                       break;
+               if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & NFSSTA_RECOVER)) {
+                       if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) ||
+                           (nmp->nm_state & NFSSTA_RECOVER))
+                               ts.tv_sec = 1;
+                       else
+                               ts.tv_sec = 30;
+                       msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts);
+               }
+       }
+
+       /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
+       if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) && (nmp->nm_flag & NFSMNT_CALLUMNT) &&
+           (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) {
+               lck_mtx_unlock(&nmp->nm_lock);
+               nfs3_umount_rpc(nmp, vfs_context_kernel(),
+                       (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2);
+               lck_mtx_lock(&nmp->nm_lock);
+       }
+
+       if (nmp->nm_sockthd == thd)
+               nmp->nm_sockthd = NULL;
+       lck_mtx_unlock(&nmp->nm_lock);
+       wakeup(&nmp->nm_sockthd);
+       thread_terminate(thd);
+}
+
+/* start or wake a mount's socket thread */
+void
+nfs_mount_sock_thread_wake(struct nfsmount *nmp)
+{
+       if (nmp->nm_sockthd)
+               wakeup(&nmp->nm_sockthd);
+       else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS)
+               thread_deallocate(nmp->nm_sockthd);
+}
+
+/*
+ * Check if we should mark the mount dead because the
+ * unresponsive mount has reached the dead timeout.
+ * (must be called with nmp locked)
+ */
+void
+nfs_mount_check_dead_timeout(struct nfsmount *nmp)
+{
+       struct timeval now;
+
+       if (!(nmp->nm_flag & NFSMNT_DEADTIMEOUT))
+               return;
+       if (nmp->nm_deadto_start == 0)
+               return;
+       if (nmp->nm_state & NFSSTA_DEAD)
+               return;
+       microuptime(&now);
+       if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_deadtimeout)
+               return;
+       printf("nfs server %s: dead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+       nmp->nm_state |= NFSSTA_DEAD;
+       vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_DEAD, 0);
+}
+
+/*
+ * RPC record marker parsing state
+ */
+struct nfs_rpc_record_state
+{
+       uint16_t        nrrs_lastfrag;          /* last fragment of record */
+       uint16_t        nrrs_markerleft;        /* marker bytes remaining */
+       uint32_t        nrrs_fragleft;          /* fragment bytes remaining */
+       uint32_t        nrrs_reclen;            /* length of RPC record */
+       mbuf_t          nrrs_m;                 /* mbufs for current record */
+       mbuf_t          nrrs_mlast;
+};
+int nfs_rpc_record_read(socket_t, struct nfs_rpc_record_state *, int *, mbuf_t *);
+
+/*
+ * NFS callback channel socket state
+ */
+struct nfs_callback_socket
+{
+       TAILQ_ENTRY(nfs_callback_socket) ncbs_link;
+       socket_t                        ncbs_so;        /* the socket */
+       struct sockaddr_in              ncbs_sin;       /* socket address */
+       struct nfs_rpc_record_state     ncbs_rrs;       /* RPC record parsing state */
+       time_t                          ncbs_stamp;     /* last accessed at */
+       uint32_t                        ncbs_flags;     /* see below */
+};
+#define NCBSOCK_UPCALL         0x0001
+#define NCBSOCK_UPCALLWANT     0x0002
+#define NCBSOCK_DEAD           0x0004
+
+/*
+ * NFS callback channel state
+ *
+ * One listening socket for accepting socket connections from servers and
+ * a list of connected sockets to handle callback requests on.
+ * Mounts registered with the callback channel are assigned IDs and
+ * put on a list so that the callback request handling code can match
+ * the requests up with mounts.
+ */
+socket_t nfs4_cb_so = NULL;
+in_port_t nfs4_cb_port = 0;
+uint32_t nfs4_cb_id = 0;
+uint32_t nfs4_cb_so_usecount = 0;
+TAILQ_HEAD(nfs4_cb_sock_list,nfs_callback_socket) nfs4_cb_socks;
+TAILQ_HEAD(nfs4_cb_mount_list,nfsmount) nfs4_cb_mounts;
+
+int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t);
+
+/*
+ * Set up the callback channel for the NFS mount.
+ *
+ * Initializes the callback channel socket state and
+ * assigns a callback ID to the mount.
+ */
+void
+nfs4_mount_callback_setup(struct nfsmount *nmp)
+{
+       struct sockaddr_in sin;
+       socket_t so = NULL;
+       struct timeval timeo;
+       int error, on = 1;
+
+       lck_mtx_lock(nfs_global_mutex);
+       if (nfs4_cb_id == 0) {
+               TAILQ_INIT(&nfs4_cb_mounts);
+               TAILQ_INIT(&nfs4_cb_socks);
+               nfs4_cb_id++;
+       }
+       nmp->nm_cbid = nfs4_cb_id++;
+       if (nmp->nm_cbid == 0)
+               nmp->nm_cbid = nfs4_cb_id++;
+       nfs4_cb_so_usecount++;
+       TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
+
+       if (nfs4_cb_so) {
+               lck_mtx_unlock(nfs_global_mutex);
+               return;
+       }
+
+       error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so);
+       if (error) {
+               log(LOG_INFO, "nfs callback setup: error %d creating listening socket\n", error);
+               goto fail;
+       }
+       so = nfs4_cb_so;
+
+       sin.sin_len = sizeof(struct sockaddr_in);
+       sin.sin_family = AF_INET;
+       sin.sin_addr.s_addr = htonl(INADDR_ANY);
+       sin.sin_port = 0;
+       error = sock_bind(so, (struct sockaddr *)&sin);
+       if (error) {
+               log(LOG_INFO, "nfs callback setup: error %d binding listening socket\n", error);
+               goto fail;
+       }
+       error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len);
+       if (error) {
+               log(LOG_INFO, "nfs callback setup: error %d getting listening socket port\n", error);
+               goto fail;
+       }
+       nfs4_cb_port = ntohs(sin.sin_port);
+
+       error = sock_listen(so, 32);
+       if (error) {
+               log(LOG_INFO, "nfs callback setup: error %d on listen\n", error);
+               goto fail;
+       }
+
+       /* receive timeout shouldn't matter.  If timeout on send, we'll want to drop the socket */
+       timeo.tv_usec = 0;
+       timeo.tv_sec = 60;
+       error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+       if (error)
+               log(LOG_INFO, "nfs callback setup: error %d setting socket rx timeout\n", error);
+       error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+       if (error)
+               log(LOG_INFO, "nfs callback setup: error %d setting socket tx timeout\n", error);
+       sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+       sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
+       sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
+       error = 0;
+
+fail:
+       if (error) {
+               nfs4_cb_so = NULL;
+               lck_mtx_unlock(nfs_global_mutex);
+               if (so) {
+                       sock_shutdown(so, SHUT_RDWR);
+                       sock_close(so);
+               }
+       } else {
+               lck_mtx_unlock(nfs_global_mutex);
+       }
+}
+
+/*
+ * Shut down the callback channel for the NFS mount.
+ *
+ * Clears the mount's callback ID and releases the mounts
+ * reference on the callback socket.  Last reference dropped
+ * will also shut down the callback socket(s).
+ */
+void
+nfs4_mount_callback_shutdown(struct nfsmount *nmp)
+{
+       struct nfs_callback_socket *ncbsp;
+       socket_t so;
+       struct nfs4_cb_sock_list cb_socks;
+       struct timespec ts = {1,0};
+
+       lck_mtx_lock(nfs_global_mutex);
+       TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
+       /* wait for any callbacks in progress to complete */
+       while (nmp->nm_cbrefs)
+               msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts);
+       if (--nfs4_cb_so_usecount) {
+               lck_mtx_unlock(nfs_global_mutex);
+               return;
+       }
+       so = nfs4_cb_so;
+       nfs4_cb_so = NULL;
+       TAILQ_INIT(&cb_socks);
+       TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
+       lck_mtx_unlock(nfs_global_mutex);
+       if (so) {
+               sock_shutdown(so, SHUT_RDWR);
+               sock_close(so);
+       }
+       while ((ncbsp = TAILQ_FIRST(&cb_socks))) {
+               TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link);
+               sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
+               sock_close(ncbsp->ncbs_so);
+               FREE(ncbsp, M_TEMP);
+       }
+}
+
+/*
+ * Check periodically for stale/unused nfs callback sockets
+ */
+#define NFS4_CB_TIMER_PERIOD   30
+#define NFS4_CB_IDLE_MAX       300
+void
+nfs4_callback_timer(__unused void *param0, __unused void *param1)
+{
+       struct nfs_callback_socket *ncbsp, *nextncbsp;
+       struct timeval now;
+
+loop:
+       lck_mtx_lock(nfs_global_mutex);
+       if (TAILQ_EMPTY(&nfs4_cb_socks)) {
+               nfs4_callback_timer_on = 0;
+               lck_mtx_unlock(nfs_global_mutex);
+               return;
+       }
+       microuptime(&now);
+       TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) {
+               if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) &&
+                    (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX)))
+                       continue;
+               TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
+               lck_mtx_unlock(nfs_global_mutex);
+               sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
+               sock_close(ncbsp->ncbs_so);
+               FREE(ncbsp, M_TEMP);
+               goto loop;
+       }
+       nfs4_callback_timer_on = 1;
+       nfs_interval_timer_start(nfs4_callback_timer_call,
+               NFS4_CB_TIMER_PERIOD * 1000);
+       lck_mtx_unlock(nfs_global_mutex);
+}
+
+/*
+ * Accept a new callback socket.
+ */
+void
+nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
+{
+       socket_t newso = NULL;
+       struct nfs_callback_socket *ncbsp;
+       struct nfsmount *nmp;
+       struct timeval timeo, now;
+       struct sockaddr_in *saddr;
+       int error, on = 1;
+
+       if (so != nfs4_cb_so)
+               return;
+
+       /* allocate/initialize a new nfs_callback_socket */
+       MALLOC(ncbsp, struct nfs_callback_socket *, sizeof(struct nfs_callback_socket), M_TEMP, M_WAITOK);
+       if (!ncbsp) {
+               log(LOG_ERR, "nfs callback accept: no memory for new socket\n");
+               return;
+       }
+       bzero(ncbsp, sizeof(*ncbsp));
+       ncbsp->ncbs_sin.sin_len = sizeof(struct sockaddr_in);
+       ncbsp->ncbs_rrs.nrrs_markerleft = sizeof(ncbsp->ncbs_rrs.nrrs_fragleft);
+
+       /* accept a new socket */
+       error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_sin,
+                       ncbsp->ncbs_sin.sin_len, MSG_DONTWAIT,
+                       nfs4_cb_rcv, ncbsp, &newso);
+       if (error) {
+               log(LOG_INFO, "nfs callback accept: error %d accepting socket\n", error);
+               FREE(ncbsp, M_TEMP);
+               return;
+       }
+
+       /* set up the new socket */
+       /* receive timeout shouldn't matter.  If timeout on send, we'll want to drop the socket */
+       timeo.tv_usec = 0;
+       timeo.tv_sec = 60;
+       error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+       if (error)
+               log(LOG_INFO, "nfs callback socket: error %d setting socket rx timeout\n", error);
+       error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+       if (error)
+               log(LOG_INFO, "nfs callback socket: error %d setting socket tx timeout\n", error);
+       sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+       sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
+       sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
+
+       ncbsp->ncbs_so = newso;
+       microuptime(&now);
+       ncbsp->ncbs_stamp = now.tv_sec;
+
+       lck_mtx_lock(nfs_global_mutex);
+
+       /* add it to the list */
+       TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
+
+       /* verify it's from a host we have mounted */
+       TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
+               /* check socket's source address matches this mount's server address */
+               saddr = mbuf_data(nmp->nm_nam);
+               if ((ncbsp->ncbs_sin.sin_len == saddr->sin_len) &&
+                   (ncbsp->ncbs_sin.sin_family == saddr->sin_family) &&
+                   (ncbsp->ncbs_sin.sin_addr.s_addr == saddr->sin_addr.s_addr))
+                       break;
+       }
+       if (!nmp) /* we don't want this socket, mark it dead */
+               ncbsp->ncbs_flags |= NCBSOCK_DEAD;
+
+       /* make sure the callback socket cleanup timer is running */
+       /* (shorten the timer if we've got a socket we don't want) */
+       if (!nfs4_callback_timer_on) {
+               nfs4_callback_timer_on = 1;
+               nfs_interval_timer_start(nfs4_callback_timer_call,
+                       !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000));
+       } else if (!nmp && (nfs4_callback_timer_on < 2)) {
+               nfs4_callback_timer_on = 2;
+               thread_call_cancel(nfs4_callback_timer_call);
+               nfs_interval_timer_start(nfs4_callback_timer_call, 500);
+       }
+
+       lck_mtx_unlock(nfs_global_mutex);
+}
+
+/*
+ * Receive mbufs from callback sockets into RPC records and process each record.
+ * Detect connection has been closed and shut down.
+ */
+void
+nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
+{
+       struct nfs_callback_socket *ncbsp = arg;
+       struct timespec ts = {1,0};
+       struct timeval now;
+       mbuf_t m;
+       int error = 0, recv = 1;
+
+       lck_mtx_lock(nfs_global_mutex);
+       while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
+               /* wait if upcall is already in progress */
+               ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
+               msleep(ncbsp, nfs_global_mutex, PSOCK, "cbupcall", &ts);
+       }
+       ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
+       lck_mtx_unlock(nfs_global_mutex);
+
+       /* loop while we make error-free progress */
+       while (!error && recv) {
+               error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, &recv, &m);
+               if (m) /* handle the request */
+                       error = nfs4_cb_handler(ncbsp, m);
+       }
+
+       /* note: no error and no data indicates server closed its end */
+       if ((error != EWOULDBLOCK) && (error || !recv)) {
+               /*
+                * Socket is either being closed or should be.
+                * We can't close the socket in the context of the upcall.
+                * So we mark it as dead and leave it for the cleanup timer to reap.
+                */
+               ncbsp->ncbs_stamp = 0;
+               ncbsp->ncbs_flags |= NCBSOCK_DEAD;
+       } else {
+               microuptime(&now);
+               ncbsp->ncbs_stamp = now.tv_sec;
+       }
+
+       lck_mtx_lock(nfs_global_mutex);
+       ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
+       lck_mtx_unlock(nfs_global_mutex);
+       wakeup(ncbsp);
+}
+
+/*
+ * Handle an NFS callback channel request.
+ */
+int
+nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
+{
+       socket_t so = ncbsp->ncbs_so;
+       struct nfsm_chain nmreq, nmrep;
+       mbuf_t mhead = NULL, mrest = NULL, m;
+       struct sockaddr_in *saddr;
+       struct msghdr msg;
+       struct nfsmount *nmp;
+       fhandle_t fh;
+       nfsnode_t np;
+       nfs_stateid stateid;
+       uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
+       uint32_t val, xid, procnum, taglen, cbid, numops, op, status;
+       uint32_t auth_type, auth_len;
+       uint32_t numres, *pnumres;
+       int error = 0, replen, len;
+       size_t sentlen = 0;
+
+       xid = numops = op = status = procnum = taglen = cbid = 0;
+
+       nfsm_chain_dissect_init(error, &nmreq, mreq);
+       nfsm_chain_get_32(error, &nmreq, xid);          // RPC XID
+       nfsm_chain_get_32(error, &nmreq, val);          // RPC Call
+       nfsm_assert(error, (val == RPC_CALL), EBADRPC);
+       nfsm_chain_get_32(error, &nmreq, val);          // RPC Version
+       nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH);
+       nfsm_chain_get_32(error, &nmreq, val);          // RPC Program Number
+       nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL);
+       nfsm_chain_get_32(error, &nmreq, val);          // NFS Callback Program Version Number
+       nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH);
+       nfsm_chain_get_32(error, &nmreq, procnum);      // NFS Callback Procedure Number
+       nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL);
+
+       /* Handle authentication */
+       /* XXX just ignore auth for now - handling kerberos may be tricky */
+       nfsm_chain_get_32(error, &nmreq, auth_type);    // RPC Auth Flavor
+       nfsm_chain_get_32(error, &nmreq, auth_len);     // RPC Auth Length
+       nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
+       if (!error && (auth_len > 0))
+               nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
+       nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED);   // verifier flavor (should be AUTH_NONE)
+       nfsm_chain_get_32(error, &nmreq, auth_len);     // verifier length
+       nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
+       if (!error && (auth_len > 0))
+               nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
+       if (error) {
+               status = error;
+               error = 0;
+               goto nfsmout;
+       }
+
+       switch (procnum) {
+       case NFSPROC4_CB_NULL:
+               status = NFSERR_RETVOID;
+               break;
+       case NFSPROC4_CB_COMPOUND:
+               /* tag, minorversion, cb ident, numops, op array */
+               nfsm_chain_get_32(error, &nmreq, taglen);       /* tag length */
+               nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC);
+
+               /* start building the body of the response */
+               nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5*NFSX_UNSIGNED);
+               nfsm_chain_init(&nmrep, mrest);
+
+               /* copy tag from request to response */
+               nfsm_chain_add_32(error, &nmrep, taglen);       /* tag length */
+               for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) {
+                       nfsm_chain_get_32(error, &nmreq, val);
+                       nfsm_chain_add_32(error, &nmrep, val);
+               }
+
+               /* insert number of results placeholder */
+               numres = 0;
+               nfsm_chain_add_32(error, &nmrep, numres);
+               pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED);
+
+               nfsm_chain_get_32(error, &nmreq, val);          /* minorversion */
+               nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH);
+               nfsm_chain_get_32(error, &nmreq, cbid);         /* callback ID */
+               nfsm_chain_get_32(error, &nmreq, numops);       /* number of operations */
+               if (error) {
+                       if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH))
+                               status = error;
+                       else if ((error == ENOBUFS) || (error == ENOMEM))
+                               status = NFSERR_RESOURCE;
+                       else 
+                               status = NFSERR_SERVERFAULT;
+                       error = 0;
+                       nfsm_chain_null(&nmrep);
+                       goto nfsmout;
+               }
+               /* match the callback ID to a registered mount */
+               lck_mtx_lock(nfs_global_mutex);
+               TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
+                       if (nmp->nm_cbid != cbid)
+                               continue;
+                       /* verify socket's source address matches this mount's server address */
+                       saddr = mbuf_data(nmp->nm_nam);
+                       if ((ncbsp->ncbs_sin.sin_len != saddr->sin_len) ||
+                           (ncbsp->ncbs_sin.sin_family != saddr->sin_family) ||
+                           (ncbsp->ncbs_sin.sin_addr.s_addr != saddr->sin_addr.s_addr))
+                               continue;
+                       break;
+               }
+               /* mark the NFS mount as busy */
+               if (nmp)
+                       nmp->nm_cbrefs++;
+               lck_mtx_unlock(nfs_global_mutex);
+               if (!nmp) {
+                       /* if no mount match, just drop socket. */
+                       error = EPERM;
+                       nfsm_chain_null(&nmrep);
+                       goto out;
+               }
+
+               /* process ops, adding results to mrest */
+               while (numops > 0) {
+                       numops--;
+                       nfsm_chain_get_32(error, &nmreq, op);
+                       if (error)
+                               break;
+                       switch (op) {
+                       case NFS_OP_CB_GETATTR:
+                               // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
+                               np = NULL;
+                               nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+                               bmlen = NFS_ATTR_BITMAP_LEN;
+                               nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
+                               if (error) {
+                                       status = error;
+                                       error = 0;
+                                       numops = 0; /* don't process any more ops */
+                               } else {
+                                       /* find the node for the file handle */
+                                       error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, NG_NOCREATE, &np);
+                                       if (error || !np) {
+                                               status = NFSERR_BADHANDLE;
+                                               error = 0;
+                                               np = NULL;
+                                               numops = 0; /* don't process any more ops */
+                                       }
+                               }
+                               nfsm_chain_add_32(error, &nmrep, op);
+                               nfsm_chain_add_32(error, &nmrep, status);
+                               if (!error && (status == EBADRPC))
+                                       error = status;
+                               if (np) {
+                                       /* only allow returning size, change, and mtime attrs */
+                                       NFS_CLEAR_ATTRIBUTES(&rbitmap);
+                                       attrbytes = 0;
+                                       if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
+                                               NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE);
+                                               attrbytes += 2 * NFSX_UNSIGNED;
+                                       }
+                                       if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
+                                               NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE);
+                                               attrbytes += 2 * NFSX_UNSIGNED;
+                                       }
+                                       if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
+                                               NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY);
+                                               attrbytes += 3 * NFSX_UNSIGNED;
+                                       }
+                                       nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN);
+                                       nfsm_chain_add_32(error, &nmrep, attrbytes);
+                                       if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE))
+                                               nfsm_chain_add_64(error, &nmrep,
+                                                       np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0));
+                                       if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE))
+                                               nfsm_chain_add_64(error, &nmrep, np->n_size);
+                                       if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
+                                               nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]);
+                                               nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]);
+                                       }
+                                       nfs_node_unlock(np);
+                                       vnode_put(NFSTOV(np));
+                                       np = NULL;
+                               }
+                               /*
+                                * If we hit an error building the reply, we can't easily back up.
+                                * So we'll just update the status and hope the server ignores the
+                                * extra garbage.
+                                */
+                               break;
+                       case NFS_OP_CB_RECALL:
+                               // (STATEID, TRUNCATE, FH) -> (STATUS)
+                               np = NULL;
+                               nfsm_chain_get_stateid(error, &nmreq, &stateid);
+                               nfsm_chain_get_32(error, &nmreq, truncate);
+                               nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+                               if (error) {
+                                       status = error;
+                                       error = 0;
+                                       numops = 0; /* don't process any more ops */
+                               } else {
+                                       /* find the node for the file handle */
+                                       error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, NG_NOCREATE, &np);
+                                       if (error || !np) {
+                                               status = NFSERR_BADHANDLE;
+                                               error = 0;
+                                               np = NULL;
+                                               numops = 0; /* don't process any more ops */
+                                       } else if (!(np->n_openflags & N_DELEG_MASK) ||
+                                                   bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) {
+                                               /* delegation stateid state doesn't match */
+                                               status = NFSERR_BAD_STATEID;
+                                               numops = 0; /* don't process any more ops */
+                                       }
+                                       if (!status) {
+                                               /* add node to recall queue, and wake socket thread */
+                                               lck_mtx_lock(&nmp->nm_lock);
+                                               if (np->n_dlink.tqe_next == NFSNOLIST)
+                                                       TAILQ_INSERT_TAIL(&nmp->nm_recallq, np, n_dlink);
+                                               nfs_mount_sock_thread_wake(nmp);
+                                               lck_mtx_unlock(&nmp->nm_lock);
+                                       }
+                                       if (np) {
+                                               nfs_node_unlock(np);
+                                               vnode_put(NFSTOV(np));
+                                       }
+                               }
+                               nfsm_chain_add_32(error, &nmrep, op);
+                               nfsm_chain_add_32(error, &nmrep, status);
+                               if (!error && (status == EBADRPC))
+                                       error = status;
+                               break;
+                       case NFS_OP_CB_ILLEGAL:
+                       default:
+                               nfsm_chain_add_32(error, &nmrep, NFS_OP_CB_ILLEGAL);
+                               status = NFSERR_OP_ILLEGAL;
+                               nfsm_chain_add_32(error, &nmrep, status);
+                               numops = 0; /* don't process any more ops */
+                               break;
+                       }
+                       numres++;
+               }
+
+               if (!status && error) {
+                       if (error == EBADRPC)
+                               status = error;
+                       else if ((error == ENOBUFS) || (error == ENOMEM))
+                               status = NFSERR_RESOURCE;
+                       else 
+                               status = NFSERR_SERVERFAULT;
+                       error = 0;
+               }
+
+               /* Now, set the numres field */
+               *pnumres = txdr_unsigned(numres);
+               nfsm_chain_build_done(error, &nmrep);
+               nfsm_chain_null(&nmrep);
+
+               /* drop the callback reference on the mount */
+               lck_mtx_lock(nfs_global_mutex);
+               nmp->nm_cbrefs--;
+               if (!nmp->nm_cbid)
+                       wakeup(&nmp->nm_cbrefs);
+               lck_mtx_unlock(nfs_global_mutex);
+               break;
+       }
+
+nfsmout:
+       if (status == EBADRPC)
+               OSAddAtomic(1, &nfsstats.rpcinvalid);
+
+       /* build reply header */
+       error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead);
+       nfsm_chain_init(&nmrep, mhead);
+       nfsm_chain_add_32(error, &nmrep, 0); /* insert space for an RPC record mark */
+       nfsm_chain_add_32(error, &nmrep, xid);
+       nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
+       if ((status == ERPCMISMATCH) || (status & NFSERR_AUTHERR)) {
+               nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
+               if (status & NFSERR_AUTHERR) {
+                       nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
+                       nfsm_chain_add_32(error, &nmrep, (status & ~NFSERR_AUTHERR));
+               } else {
+                       nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
+                       nfsm_chain_add_32(error, &nmrep, RPC_VER2);
+                       nfsm_chain_add_32(error, &nmrep, RPC_VER2);
+               }
+       } else {
+               /* reply status */
+               nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
+               /* XXX RPCAUTH_NULL verifier */
+               nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
+               nfsm_chain_add_32(error, &nmrep, 0);
+               /* accepted status */
+               switch (status) {
+               case EPROGUNAVAIL:
+                       nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
+                       break;
+               case EPROGMISMATCH:
+                       nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
+                       nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
+                       nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
+                       break;
+               case EPROCUNAVAIL:
+                       nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
+                       break;
+               case EBADRPC:
+                       nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
+                       break;
+               default:
+                       nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
+                       if (status != NFSERR_RETVOID)
+                               nfsm_chain_add_32(error, &nmrep, status);
+                       break;
+               }
+       }
+       nfsm_chain_build_done(error, &nmrep);
+       if (error) {
+               nfsm_chain_null(&nmrep);
+               goto out;
+       }
+       error = mbuf_setnext(nmrep.nmc_mcur, mrest);
+       if (error) {
+               printf("nfs cb: mbuf_setnext failed %d\n", error);
+               goto out;
+       }
+       mrest = NULL;
+       /* Calculate the size of the reply */
+       replen = 0;
+       for (m = nmrep.nmc_mhead; m; m = mbuf_next(m))
+               replen += mbuf_len(m);
+       mbuf_pkthdr_setlen(mhead, replen);
+       error = mbuf_pkthdr_setrcvif(mhead, NULL);
+       nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000);
+       nfsm_chain_null(&nmrep);
+
+       /* send the reply */
+       bzero(&msg, sizeof(msg));
+       error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen);
+       mhead = NULL;
+       if (!error && ((int)sentlen != replen))
+               error = EWOULDBLOCK;
+       if (error == EWOULDBLOCK) /* inability to send response is considered fatal */
+               error = ETIMEDOUT;
+out:
+       if (error)
+               nfsm_chain_cleanup(&nmrep);
+       if (mhead)
+               mbuf_freem(mhead);
+       if (mrest)
+               mbuf_freem(mrest);
+       if (mreq)
+               mbuf_freem(mreq);
+       return (error);
+}
+
+
+/*
+ * Read the next (marked) RPC record from the socket.
+ *
+ * *recvp returns if any data was received.
+ * *mp returns the next complete RPC record
+ */
+int
+nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int *recvp, mbuf_t *mp)
+{
+       struct iovec aio;
+       struct msghdr msg;
+       size_t rcvlen;
+       int error = 0;
+       mbuf_t m;
+
+       *recvp = 0;
+       *mp = NULL;
+
+       /* read the TCP RPC record marker */
+       while (!error && nrrsp->nrrs_markerleft) {
+               aio.iov_base = ((char*)&nrrsp->nrrs_fragleft +
+                               sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft);
+               aio.iov_len = nrrsp->nrrs_markerleft;
+               bzero(&msg, sizeof(msg));
+               msg.msg_iov = &aio;
+               msg.msg_iovlen = 1;
+               error = sock_receive(so, &msg, MSG_DONTWAIT, &rcvlen);
+               if (error || !rcvlen)
+                       break;
+               *recvp = 1;
+               nrrsp->nrrs_markerleft -= rcvlen;
+               if (nrrsp->nrrs_markerleft)
+                       continue;
+               /* record marker complete */
+               nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft);
+               if (nrrsp->nrrs_fragleft & 0x80000000) {
+                       nrrsp->nrrs_lastfrag = 1;
+                       nrrsp->nrrs_fragleft &= ~0x80000000;
+               }
+               nrrsp->nrrs_reclen += nrrsp->nrrs_fragleft;
+               if (nrrsp->nrrs_reclen > NFS_MAXPACKET) {
+                       /*
+                        * This is SERIOUS! We are out of sync with the sender
+                        * and forcing a disconnect/reconnect is all I can do.
+                        */
+                       log(LOG_ERR, "impossible RPC record length (%d) on callback", nrrsp->nrrs_reclen);
+                       error = EFBIG;
+               }
+       }
+
+       /* read the TCP RPC record fragment */
+       while (!error && !nrrsp->nrrs_markerleft && nrrsp->nrrs_fragleft) {
+               m = NULL;
+               rcvlen = nrrsp->nrrs_fragleft;
+               error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
+               if (error || !rcvlen || !m)
+                       break;
+               *recvp = 1;
+               /* append mbufs to list */
+               nrrsp->nrrs_fragleft -= rcvlen;
+               if (!nrrsp->nrrs_m) {
+                       nrrsp->nrrs_m = m;
+               } else {
+                       error = mbuf_setnext(nrrsp->nrrs_mlast, m);
+                       if (error) {
+                               printf("nfs tcp rcv: mbuf_setnext failed %d\n", error);
+                               mbuf_freem(m);
+                               break;
+                       }
+               }
+               while (mbuf_next(m))
+                       m = mbuf_next(m);
+               nrrsp->nrrs_mlast = m;
+       }
+
+       /* done reading fragment? */
+       if (!error && !nrrsp->nrrs_markerleft && !nrrsp->nrrs_fragleft) {
+               /* reset socket fragment parsing state */
+               nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
+               if (nrrsp->nrrs_lastfrag) {
+                       /* RPC record complete */
+                       *mp = nrrsp->nrrs_m;
+                       /* reset socket record parsing state */
+                       nrrsp->nrrs_reclen = 0;
+                       nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
+                       nrrsp->nrrs_lastfrag = 0;
+               }
+       }
+
+       return (error);
+}
+
+
+
+/*
+ * The NFS client send routine.
+ *
+ * Send the given NFS request out the mount's socket.
+ * Holds nfs_sndlock() for the duration of this call.
+ *
+ * - check for request termination (sigintr)
+ * - wait for reconnect, if necessary
+ * - UDP: check the congestion window
+ * - make a copy of the request to send
+ * - UDP: update the congestion window
+ * - send the request
+ *
+ * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
+ * rexmit count is also updated if this isn't the first send.
+ *
+ * If the send is not successful, make sure R_MUSTRESEND is set.
+ * If this wasn't the first transmit, set R_RESENDERR.
+ * Also, undo any UDP congestion window changes made.
+ *
+ * If the error appears to indicate that the socket should
+ * be reconnected, mark the socket for reconnection.
+ *
+ * Only return errors when the request should be aborted.
+ */
+int
+nfs_send(struct nfsreq *req, int wait)
+{
+       struct nfsmount *nmp;
+       socket_t so;
+       int error, error2, sotype, rexmit, slpflag = 0, needrecon;
+       struct msghdr msg;
+       struct sockaddr *sendnam;
+       mbuf_t mreqcopy;
+       size_t sentlen = 0;
+       struct timespec ts = { 2, 0 };
+
+again:
+       error = nfs_sndlock(req);
+       if (error) {
+               lck_mtx_lock(&req->r_mtx);
+               req->r_error = error;
+               req->r_flags &= ~R_SENDING;
+               lck_mtx_unlock(&req->r_mtx);
+               return (error);
+       }
+
+       error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
+       if (error) {
+               nfs_sndunlock(req);
+               lck_mtx_lock(&req->r_mtx);
+               req->r_error = error;
+               req->r_flags &= ~R_SENDING;
+               lck_mtx_unlock(&req->r_mtx);
+               return (error);
+       }
+       nmp = req->r_nmp;
+       sotype = nmp->nm_sotype;
+
+       /*
+        * If it's a setup RPC but we're not in SETUP... must need reconnect.
+        * If it's a recovery RPC but the socket's not ready... must need reconnect.
+        */
+       if (((req->r_flags & R_SETUP) && !(nmp->nm_sockflags & NMSOCK_SETUP)) ||
+           ((req->r_flags & R_RECOVER) && !(nmp->nm_sockflags & NMSOCK_READY))) {
+               error = ETIMEDOUT;
+               nfs_sndunlock(req);
+               lck_mtx_lock(&req->r_mtx);
+               req->r_error = error;
+               req->r_flags &= ~R_SENDING;
+               lck_mtx_unlock(&req->r_mtx);
+               return (error);
+       }
+
+       /* If the socket needs reconnection, do that now. */
+       /* wait until socket is ready - unless this request is part of setup */
+       lck_mtx_lock(&nmp->nm_lock);
+       if (!(nmp->nm_sockflags & NMSOCK_READY) &&
+           !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) {
+               if (nmp->nm_flag & NFSMNT_INT)
+                       slpflag |= PCATCH;
+               lck_mtx_unlock(&nmp->nm_lock);
+               nfs_sndunlock(req);
+               if (!wait) {
+                       lck_mtx_lock(&req->r_mtx);
+                       req->r_flags &= ~R_SENDING;
+                       req->r_flags |= R_MUSTRESEND;
+                       req->r_rtt = 0;
+                       lck_mtx_unlock(&req->r_mtx);
+                       return (0);
+               }
+               NFS_SOCK_DBG(("nfs_send: 0x%llx wait reconnect\n", req->r_xid));
+               lck_mtx_lock(&req->r_mtx);
+               req->r_flags &= ~R_MUSTRESEND;
+               req->r_rtt = 0;
+               lck_mtx_unlock(&req->r_mtx);
+               lck_mtx_lock(&nmp->nm_lock);
+               while (!(nmp->nm_sockflags & NMSOCK_READY)) {
+                       /* don't bother waiting if the socket thread won't be reconnecting it */
+                       if (nmp->nm_state & NFSSTA_FORCE) {
+                               error = EIO;
+                               break;
+                       }
+                       if ((nmp->nm_flag & NFSMNT_SOFT) && (nmp->nm_reconnect_start > 0)) {
+                               struct timeval now;
+                               microuptime(&now);
+                               if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) {
+                                       /* soft mount in reconnect for a while... terminate ASAP */
+                                       OSAddAtomic(1, &nfsstats.rpctimeouts);
+                                       req->r_flags |= R_SOFTTERM;
+                                       req->r_error = error = ETIMEDOUT;
+                                       break;
+                               }
+                       }
+                       /* make sure socket thread is running, then wait */
+                       nfs_mount_sock_thread_wake(nmp);
+                       if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1)))
+                               break;
+                       msleep(req, &nmp->nm_lock, slpflag|PSOCK, "nfsconnectwait", &ts);
+                       slpflag = 0;
+               }
+               lck_mtx_unlock(&nmp->nm_lock);
+               if (error) {
+                       lck_mtx_lock(&req->r_mtx);
+                       req->r_error = error;
+                       req->r_flags &= ~R_SENDING;
+                       lck_mtx_unlock(&req->r_mtx);
+                       return (error);
+               }
+               goto again;
+       }
+       so = nmp->nm_so;
+       lck_mtx_unlock(&nmp->nm_lock);
+       if (!so) {
+               nfs_sndunlock(req);
+               lck_mtx_lock(&req->r_mtx);
+               req->r_flags &= ~R_SENDING;
+               req->r_flags |= R_MUSTRESEND;
+               req->r_rtt = 0;
+               lck_mtx_unlock(&req->r_mtx);
+               return (0);
+       }
+
+       lck_mtx_lock(&req->r_mtx);
+       rexmit = (req->r_flags & R_SENT);
+
+       if (sotype == SOCK_DGRAM) {
+               lck_mtx_lock(&nmp->nm_lock);
+               if (!(req->r_flags & R_CWND) && (nmp->nm_sent >= nmp->nm_cwnd)) {
+                       /* if we can't send this out yet, wait on the cwnd queue */
+                       slpflag = ((nmp->nm_flag & NFSMNT_INT) && req->r_thread) ? PCATCH : 0;
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       nfs_sndunlock(req);
+                       req->r_flags &= ~R_SENDING;
+                       req->r_flags |= R_MUSTRESEND;
+                       lck_mtx_unlock(&req->r_mtx);
+                       if (!wait) {
+                               req->r_rtt = 0;
+                               return (0);
+                       }
+                       lck_mtx_lock(&nmp->nm_lock);
+                       while (nmp->nm_sent >= nmp->nm_cwnd) {
+                               if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1)))
+                                       break;
+                               TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain);
+                               msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts);
+                               slpflag = 0;
+                               if ((req->r_cchain.tqe_next != NFSREQNOLIST)) {
+                                       TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
+                                       req->r_cchain.tqe_next = NFSREQNOLIST;
+                               }
+                       }
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       goto again;
+               }
+               /*
+                * We update these *before* the send to avoid racing
+                * against others who may be looking to send requests.
+                */
+               if (!rexmit) {
+                       /* first transmit */
+                       req->r_flags |= R_CWND;
+                       nmp->nm_sent += NFS_CWNDSCALE;
+               } else {
+                       /*
+                        * When retransmitting, turn timing off
+                        * and divide congestion window by 2. 
+                        */
+                       req->r_flags &= ~R_TIMING;
+                       nmp->nm_cwnd >>= 1;
+                       if (nmp->nm_cwnd < NFS_CWNDSCALE)
+                               nmp->nm_cwnd = NFS_CWNDSCALE;
+               }
+               lck_mtx_unlock(&nmp->nm_lock);
+       }
+
+       req->r_flags &= ~R_MUSTRESEND;
+       lck_mtx_unlock(&req->r_mtx);
+
+       error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL,
+                       wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy);
+       if (error) {
+               if (wait)
+                       log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error);
+               nfs_sndunlock(req);
+               lck_mtx_lock(&req->r_mtx);
+               req->r_flags &= ~R_SENDING;
+               req->r_flags |= R_MUSTRESEND;
+               req->r_rtt = 0;
+               lck_mtx_unlock(&req->r_mtx);
+               return (0);
+       }
+
+       bzero(&msg, sizeof(msg));
+       if (nmp->nm_nam && (sotype != SOCK_STREAM) && !sock_isconnected(so)) {
+               if ((sendnam = mbuf_data(nmp->nm_nam))) {
+                       msg.msg_name = (caddr_t)sendnam;
+                       msg.msg_namelen = sendnam->sa_len;
+               }
+       }
+       error = sock_sendmbuf(so, &msg, mreqcopy, 0, &sentlen);
+#ifdef NFS_SOCKET_DEBUGGING
+       if (error || (sentlen != req->r_mreqlen))
+               NFS_SOCK_DBG(("nfs_send: 0x%llx sent %d/%d error %d\n",
+                       req->r_xid, (int)sentlen, (int)req->r_mreqlen, error));
+#endif
+       if (!error && (sentlen != req->r_mreqlen))
+               error = EWOULDBLOCK;
+       needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen));
+
+       lck_mtx_lock(&req->r_mtx);
+       req->r_flags &= ~R_SENDING;
+       req->r_rtt = 0;
+       if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT))
+               req->r_rexmit = NFS_MAXREXMIT;
+
+       if (!error) {
+               /* SUCCESS */
+               req->r_flags &= ~R_RESENDERR;
+               if (rexmit)
+                       OSAddAtomic(1, &nfsstats.rpcretries);
+               req->r_flags |= R_SENT;
+               if (req->r_flags & R_WAITSENT) {
+                       req->r_flags &= ~R_WAITSENT;
+                       wakeup(req);
+               }
+               nfs_sndunlock(req);
+               lck_mtx_unlock(&req->r_mtx);
+               return (0);
+       }
+
+       /* send failed */
+       req->r_flags |= R_MUSTRESEND;
+       if (rexmit)
+               req->r_flags |= R_RESENDERR;
+       if ((error == EINTR) || (error == ERESTART))
+               req->r_error = error;
+       lck_mtx_unlock(&req->r_mtx);
+
+       if (sotype == SOCK_DGRAM) {
+               /*
+                * Note: even though a first send may fail, we consider
+                * the request sent for congestion window purposes.
+                * So we don't need to undo any of the changes made above.
+                */
+               /*
+                * Socket errors ignored for connectionless sockets??
+                * For now, ignore them all
+                */
+               if ((error != EINTR) && (error != ERESTART) &&
+                   (error != EWOULDBLOCK) && (error != EIO)) {
+                       int clearerror = 0, optlen = sizeof(clearerror);
+                       sock_getsockopt(so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
+#ifdef NFS_SOCKET_DEBUGGING
+                       if (clearerror)
+                               NFS_SOCK_DBG(("nfs_send: ignoring UDP socket error %d so %d\n",
+                                       error, clearerror));
+#endif
+               }
+       }
+
+       /* check if it appears we should reconnect the socket */
+       switch (error) {
+       case EWOULDBLOCK:
+               /* if send timed out, reconnect if on TCP */
+               if (sotype != SOCK_STREAM)
+                       break;
+       case EPIPE:
+       case EADDRNOTAVAIL:
+       case ENETDOWN:
+       case ENETUNREACH:
+       case ENETRESET:
+       case ECONNABORTED:
+       case ECONNRESET:
+       case ENOTCONN:
+       case ESHUTDOWN:
+       case ECONNREFUSED:
+       case EHOSTDOWN:
+       case EHOSTUNREACH:
+               needrecon = 1;
+               break;
+       }
+       if (needrecon) { /* mark socket as needing reconnect */
+               NFS_SOCK_DBG(("nfs_send: 0x%llx need reconnect %d\n", req->r_xid, error));
+               nfs_need_reconnect(nmp);
+       }
+
+       nfs_sndunlock(req);
+
+       /*
+        * Don't log some errors:
+        * EPIPE errors may be common with servers that drop idle connections.
+        * EADDRNOTAVAIL may occur on network transitions.
+        * ENOTCONN may occur under some network conditions.
+        */
+       if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN))
+               error = 0;
+       if (error && (error != EINTR) && (error != ERESTART))
+               log(LOG_INFO, "nfs send error %d for server %s\n", error,
+                       !req->r_nmp ? "<unmounted>" :
+                       vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname);
+
+       /* prefer request termination error over other errors */
+       error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
+       if (error2)
+               error = error2;
+
+       /* only allow the following errors to be returned */
+       if ((error != EINTR) && (error != ERESTART) && (error != EIO) &&
+           (error != ENXIO) && (error != ETIMEDOUT))
+               error = 0;
+       return (error);
+}
+
+/*
+ * NFS client socket upcalls
+ *
+ * Pull RPC replies out of an NFS mount's socket and match them
+ * up with the pending request.
+ *
+ * The datagram code is simple because we always get whole
+ * messages out of the socket.
+ *
+ * The stream code is more involved because we have to parse
+ * the RPC records out of the stream.
+ */
+
+/* NFS client UDP socket upcall */
+void
+nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag)
+{
+       struct nfsmount *nmp = arg;
+       size_t rcvlen;
+       mbuf_t m;
+       int error = 0;
+
+       if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
+               wakeup(&nmp->nm_so);
+               return;
+       }
+
+       /* make sure we're on the current socket */
+       if (nmp->nm_so != so)
+               return;
+
+       do {
+               m = NULL;
+               rcvlen = 1000000;
+               error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
+               if (m)
+                       nfs_request_match_reply(nmp, m);
+       } while (m && !error);
+
+       if (error && (error != EWOULDBLOCK)) {
+               /* problems with the socket... mark for reconnection */
+               NFS_SOCK_DBG(("nfs_udp_rcv: need reconnect %d\n", error));
+               nfs_need_reconnect(nmp);
+       }
+}
+
+/* NFS client TCP socket upcall */
+void
+nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag)
+{
+       struct nfsmount *nmp = arg;
+       struct iovec aio;
+       struct msghdr msg;
+       size_t rcvlen;
+       mbuf_t m;
+       int error = 0;
+       int recv;
+
+       if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
+               wakeup(&nmp->nm_so);
+               return;
+       }
+
+       /* make sure we're on the current socket */
+       if (nmp->nm_so != so)
+               return;
+
+       lck_mtx_lock(&nmp->nm_lock);
+       if (nmp->nm_sockflags & NMSOCK_UPCALL) {
+               /* upcall is already receiving data - just return */
+               lck_mtx_unlock(&nmp->nm_lock);
+               return;
+       }
+       nmp->nm_sockflags |= NMSOCK_UPCALL;
+
+nextfrag:
+       recv = 0;
+
+       /* read the TCP RPC record marker */
+       while (!error && nmp->nm_markerleft) {
+               aio.iov_base = ((char*)&nmp->nm_fragleft +
+                               sizeof(nmp->nm_fragleft) - nmp->nm_markerleft);
+               aio.iov_len = nmp->nm_markerleft;
+               bzero(&msg, sizeof(msg));
+               msg.msg_iov = &aio;
+               msg.msg_iovlen = 1;
+               lck_mtx_unlock(&nmp->nm_lock);
+               error = sock_receive(so, &msg, MSG_DONTWAIT, &rcvlen);
+               lck_mtx_lock(&nmp->nm_lock);
+               if (error || !rcvlen)
+                       break;
+               recv = 1;
+               nmp->nm_markerleft -= rcvlen;
+               if (nmp->nm_markerleft)
+                       continue;
+               /* record marker complete */
+               nmp->nm_fragleft = ntohl(nmp->nm_fragleft);
+               if (nmp->nm_fragleft & 0x80000000) {
+                       nmp->nm_sockflags |= NMSOCK_LASTFRAG;
+                       nmp->nm_fragleft &= ~0x80000000;
+               }
+               nmp->nm_reclen += nmp->nm_fragleft;
+               if (nmp->nm_reclen > NFS_MAXPACKET) {
+                       /*
+                        * This is SERIOUS! We are out of sync with the sender
+                        * and forcing a disconnect/reconnect is all I can do.
+                        */
+                       log(LOG_ERR, "%s (%d) from nfs server %s\n",
+                               "impossible RPC record length", nmp->nm_reclen,
+                               vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+                       error = EFBIG;
+               }
+       }
+
+       /* read the TCP RPC record fragment */
+       while (!error && !nmp->nm_markerleft && nmp->nm_fragleft) {
+               m = NULL;
+               rcvlen = nmp->nm_fragleft;
+               lck_mtx_unlock(&nmp->nm_lock);
+               error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
+               lck_mtx_lock(&nmp->nm_lock);
+               if (error || !rcvlen || !m)
+                       break;
+               recv = 1;
+               /* append mbufs to list */
+               nmp->nm_fragleft -= rcvlen;
+               if (!nmp->nm_m) {
+                       nmp->nm_m = m;
+               } else {
+                       error = mbuf_setnext(nmp->nm_mlast, m);
+                       if (error) {
+                               printf("nfs_tcp_rcv: mbuf_setnext failed %d\n", error);
+                               mbuf_freem(m);
+                               break;
+                       }
+               }
+               while (mbuf_next(m))
+                       m = mbuf_next(m);
+               nmp->nm_mlast = m;
+       }
+
+       /* done reading fragment? */
+       m = NULL;
+       if (!error && !nmp->nm_markerleft && !nmp->nm_fragleft) {
+               /* reset socket fragment parsing state */
+               nmp->nm_markerleft = sizeof(nmp->nm_fragleft);
+               if (nmp->nm_sockflags & NMSOCK_LASTFRAG) {
+                       /* RPC record complete */
+                       m = nmp->nm_m;
+                       /* reset socket record parsing state */
+                       nmp->nm_reclen = 0;
+                       nmp->nm_m = nmp->nm_mlast = NULL;
+                       nmp->nm_sockflags &= ~NMSOCK_LASTFRAG;
+               }
+       }
+
+       if (m) { /* match completed response with request */
+               lck_mtx_unlock(&nmp->nm_lock);
+               nfs_request_match_reply(nmp, m);
+               lck_mtx_lock(&nmp->nm_lock);
+       }
+
+       /* loop if we've been making error-free progress */
+       if (!error && recv)
+               goto nextfrag;
+
+       nmp->nm_sockflags &= ~NMSOCK_UPCALL;
+       lck_mtx_unlock(&nmp->nm_lock);
+#ifdef NFS_SOCKET_DEBUGGING
+       if (!recv && (error != EWOULDBLOCK))
+               NFS_SOCK_DBG(("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error));
+#endif
+       /* note: no error and no data indicates server closed its end */
+       if ((error != EWOULDBLOCK) && (error || !recv)) {
+               /* problems with the socket... mark for reconnection */
+               NFS_SOCK_DBG(("nfs_tcp_rcv: need reconnect %d\n", error));
+               nfs_need_reconnect(nmp);
+       }
+}
+
+/*
+ * "poke" a socket to try to provoke any pending errors
+ */
+void
+nfs_sock_poke(struct nfsmount *nmp)
+{
+       struct iovec aio;
+       struct msghdr msg;
+       size_t len;
+       int error = 0;
+       int dummy;
+
+       lck_mtx_lock(&nmp->nm_lock);
+       if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) || !nmp->nm_so) {
+               lck_mtx_unlock(&nmp->nm_lock);
+               return;
+       }
+       lck_mtx_unlock(&nmp->nm_lock);
+       aio.iov_base = &dummy;
+       aio.iov_len = 0;
+       len = 0;
+       bzero(&msg, sizeof(msg));
+       msg.msg_iov = &aio;
+       msg.msg_iovlen = 1;
+       error = sock_send(nmp->nm_so, &msg, MSG_DONTWAIT, &len);
+       NFS_SOCK_DBG(("nfs_sock_poke: error %d\n", error));
+}
+
+/*
+ * Match an RPC reply with the corresponding request
+ */
+void
+nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
+{
+       struct nfsreq *req;
+       struct nfsm_chain nmrep;
+       u_int32_t reply = 0, rxid = 0;
+       int error = 0, asyncioq, t1;
+
+       /* Get the xid and check that it is an rpc reply */
+       nfsm_chain_dissect_init(error, &nmrep, mrep);
+       nfsm_chain_get_32(error, &nmrep, rxid);
+       nfsm_chain_get_32(error, &nmrep, reply);
+       if (error || (reply != RPC_REPLY)) {
+               OSAddAtomic(1, &nfsstats.rpcinvalid);
+               mbuf_freem(mrep);
+               return;
+       }
+
+       /*
+        * Loop through the request list to match up the reply
+        * Iff no match, just drop it.
+        */
+       lck_mtx_lock(nfs_request_mutex);
+       TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
+               if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid)))
+                       continue;
+               /* looks like we have it, grab lock and double check */
+               lck_mtx_lock(&req->r_mtx);
+               if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
+                       lck_mtx_unlock(&req->r_mtx);
+                       continue;
+               }
+               /* Found it.. */
+               req->r_nmrep = nmrep;
+               lck_mtx_lock(&nmp->nm_lock);
+               if (nmp->nm_sotype == SOCK_DGRAM) {
+                       /*
+                        * Update congestion window.
+                        * Do the additive increase of one rpc/rtt.
+                        */
+                       FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
+                       if (nmp->nm_cwnd <= nmp->nm_sent) {
+                               nmp->nm_cwnd +=
+                                  ((NFS_CWNDSCALE * NFS_CWNDSCALE) +
+                                   (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
+                               if (nmp->nm_cwnd > NFS_MAXCWND)
+                                       nmp->nm_cwnd = NFS_MAXCWND;
+                       }
+                       if (req->r_flags & R_CWND) {
+                               nmp->nm_sent -= NFS_CWNDSCALE;
+                               req->r_flags &= ~R_CWND;
+                       }
+                       if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
+                               /* congestion window is open, poke the cwnd queue */
+                               struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
+                               TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
+                               req2->r_cchain.tqe_next = NFSREQNOLIST;
+                               wakeup(req2);
+                       }
+               }
+               /*
+                * Update rtt using a gain of 0.125 on the mean
+                * and a gain of 0.25 on the deviation.
+                */
+               if (req->r_flags & R_TIMING) {
+                       /*
+                        * Since the timer resolution of
+                        * NFS_HZ is so course, it can often
+                        * result in r_rtt == 0. Since
+                        * r_rtt == N means that the actual
+                        * rtt is between N+dt and N+2-dt ticks,
+                        * add 1.
+                        */
+                       if (proct[req->r_procnum] == 0)
+                               panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum);
+                       t1 = req->r_rtt + 1;
+                       t1 -= (NFS_SRTT(req) >> 3);
+                       NFS_SRTT(req) += t1;
+                       if (t1 < 0)
+                               t1 = -t1;
+                       t1 -= (NFS_SDRTT(req) >> 2);
+                       NFS_SDRTT(req) += t1;
+               }
+               nmp->nm_timeouts = 0;
+               lck_mtx_unlock(&nmp->nm_lock);
+               /* signal anyone waiting on this request */
+               wakeup(req);
+               asyncioq = (req->r_callback.rcb_func != NULL);
+               if (req->r_gss_ctx != NULL)
+                       nfs_gss_clnt_rpcdone(req);
+               lck_mtx_unlock(&req->r_mtx);
+               lck_mtx_unlock(nfs_request_mutex);
+               /* if it's an async RPC with a callback, queue it up */
+               if (asyncioq)
+                       nfs_asyncio_finish(req);
+               break;
+       }
+
+       if (!req) {
+               /* not matched to a request, so drop it. */
+               lck_mtx_unlock(nfs_request_mutex);
+               OSAddAtomic(1, &nfsstats.rpcunexpected);
+               mbuf_freem(mrep);
+       }
+}
+
+/*
+ * Wait for the reply for a given request...
+ * ...potentially resending the request if necessary.
+ */
+int
+nfs_wait_reply(struct nfsreq *req)
+{
+       struct timespec ts = { 2, 0 };
+       int error = 0, slpflag;
+
+       if (req->r_nmp && (req->r_nmp->nm_flag & NFSMNT_INT) && req->r_thread)
+               slpflag = PCATCH;
+       else
+               slpflag = 0;
+
+       lck_mtx_lock(&req->r_mtx);
+       while (!req->r_nmrep.nmc_mhead) {
+               if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
+                       break;
+               if (((error = req->r_error)) || req->r_nmrep.nmc_mhead)
+                       break;
+               /* check if we need to resend */
+               if (req->r_flags & R_MUSTRESEND) {
+                       NFS_SOCK_DBG(("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
+                               req->r_procnum, req->r_xid, req->r_flags, req->r_rtt));
+                       req->r_flags |= R_SENDING;
+                       lck_mtx_unlock(&req->r_mtx);
+                       if (req->r_gss_ctx) {
+                               /*
+                                * It's an RPCSEC_GSS mount.
+                                * Can't just resend the original request
+                                * without bumping the cred sequence number.
+                                * Go back and re-build the request.
+                                */
+                               lck_mtx_lock(&req->r_mtx);
+                               req->r_flags &= ~R_SENDING;
+                               lck_mtx_unlock(&req->r_mtx);
+                               return (EAGAIN);
+                       }
+                       error = nfs_send(req, 1);
+                       lck_mtx_lock(&req->r_mtx);
+                       NFS_SOCK_DBG(("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
+                               req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error));
+                       if (error)
+                               break;
+                       if (((error = req->r_error)) || req->r_nmrep.nmc_mhead)
+                               break;
+               }
+               /* need to poll if we're P_NOREMOTEHANG */
+               if (nfs_noremotehang(req->r_thread))
+                       ts.tv_sec = 1;
+               msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitreply", &ts);
+               slpflag = 0;
+       }
+       lck_mtx_unlock(&req->r_mtx);
+
+       return (error);
+}
+
+/*
+ * An NFS request goes something like this:
+ * (nb: always frees up mreq mbuf list)
+ * nfs_request_create()
+ *     - allocates a request struct if one is not provided
+ *     - initial fill-in of the request struct
+ * nfs_request_add_header()
+ *     - add the RPC header
+ * nfs_request_send()
+ *     - link it into list
+ *     - call nfs_send() for first transmit
+ * nfs_request_wait()
+ *     - call nfs_wait_reply() to wait for the reply
+ * nfs_request_finish()
+ *     - break down rpc header and return with error or nfs reply
+ *       pointed to by nmrep.
+ * nfs_request_rele()
+ * nfs_request_destroy()
+ *      - clean up the request struct
+ *      - free the request struct if it was allocated by nfs_request_create()
+ */
+
+/*
+ * Set up an NFS request struct (allocating if no request passed in).
+ */
+int
+nfs_request_create(
+       nfsnode_t np,
+       mount_t mp,     /* used only if !np */
+       struct nfsm_chain *nmrest,
+       int procnum,
+       thread_t thd,
+       kauth_cred_t cred,
+       struct nfsreq **reqp)
+{
+       struct nfsreq *req, *newreq = NULL;
+       struct nfsmount *nmp;
+
+       req = *reqp;
+       if (!req) {
+               /* allocate a new NFS request structure */
+               MALLOC_ZONE(newreq, struct nfsreq*, sizeof(*newreq), M_NFSREQ, M_WAITOK);
+               if (!newreq) {
+                       mbuf_freem(nmrest->nmc_mhead);
+                       nmrest->nmc_mhead = NULL;
+                       return (ENOMEM);
+               }
+               req = newreq;
+       }
+
+       bzero(req, sizeof(*req));
+       if (req == newreq)
+               req->r_flags = R_ALLOCATED;
+
+       nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
+       if (!nmp) {
+               if (newreq)
+                       FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
+               return (ENXIO);
+       }
+       lck_mtx_lock(&nmp->nm_lock);
+       if ((nmp->nm_state & (NFSSTA_FORCE|NFSSTA_TIMEO)) ==
+           (NFSSTA_FORCE|NFSSTA_TIMEO)) {
+               lck_mtx_unlock(&nmp->nm_lock);
+               mbuf_freem(nmrest->nmc_mhead);
+               nmrest->nmc_mhead = NULL;
+               if (newreq)
+                       FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
+               return (ENXIO);
+       }
+
+       if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS))
+               OSAddAtomic(1, &nfsstats.rpccnt[procnum]);
+       if ((nmp->nm_vers == NFS_VER4) && (procnum != NFSPROC4_COMPOUND) && (procnum != NFSPROC4_NULL))
+               panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum);
+
+       lck_mtx_init(&req->r_mtx, nfs_request_grp, LCK_ATTR_NULL);
+       req->r_nmp = nmp;
+       req->r_np = np;
+       req->r_thread = thd;
+       if (IS_VALID_CRED(cred)) {
+               kauth_cred_ref(cred);
+               req->r_cred = cred;
        }
-       return (error);
+       req->r_procnum = procnum;
+       if (proct[procnum] > 0)
+               req->r_flags |= R_TIMING;
+       req->r_nmrep.nmc_mhead = NULL;
+       SLIST_INIT(&req->r_gss_seqlist);
+       req->r_achain.tqe_next = NFSREQNOLIST;
+       req->r_rchain.tqe_next = NFSREQNOLIST;
+       req->r_cchain.tqe_next = NFSREQNOLIST;
+
+       lck_mtx_unlock(&nmp->nm_lock);
+
+       /* move the request mbuf chain to the nfsreq */
+       req->r_mrest = nmrest->nmc_mhead;
+       nmrest->nmc_mhead = NULL;
+
+       req->r_flags |= R_INITTED;
+       req->r_refs = 1;
+       if (newreq)
+               *reqp = req;
+       return (0);
 }
 
 /*
- * Implement receipt of reply on a socket.
- * We must search through the list of received datagrams matching them
- * with outstanding requests using the xid, until ours is found.
+ * Clean up and free an NFS request structure.
  */
-/* ARGSUSED */
-int
-nfs_reply(myrep)
-       struct nfsreq *myrep;
+void
+nfs_request_destroy(struct nfsreq *req)
 {
-       struct nfsreq *rep;
-       struct nfsmount *nmp = myrep->r_nmp;
-       long t1;
-       mbuf_t mrep, md;
-       u_long rxid, *tl;
-       caddr_t dpos, cp2;
-       int error;
-
-       /*
-        * Loop around until we get our own reply
-        */
-       for (;;) {
-               /*
-                * Lock against other receivers so that I don't get stuck in
-                * sbwait() after someone else has received my reply for me.
-                * Also necessary for connection based protocols to avoid
-                * race conditions during a reconnect.
-                * If nfs_rcvlock() returns EALREADY, that means that
-                * the reply has already been recieved by another
-                * process and we can return immediately.  In this
-                * case, the lock is not taken to avoid races with
-                * other processes.
-                */
-               error = nfs_rcvlock(myrep);
-               if (error == EALREADY)
-                       return (0);
-               if (error)
-                       return (error);
-               
-               /*
-                * If we slept after putting bits otw, then reply may have
-                * arrived.  In which case returning is required, or we
-                * would hang trying to nfs_receive an already received reply.
-                */
-               if (myrep->r_mrep != NULL) {
-                       nfs_rcvunlock(myrep);
-                       FSDBG(530, myrep->r_xid, myrep, myrep->r_nmp, -1);
-                       return (0);
-               }
-               /*
-                * Get the next Rpc reply off the socket. Assume myrep->r_nmp
-                * is still intact by checks done in nfs_rcvlock.
-                */
-               error = nfs_receive(myrep, &mrep);
-               /*
-                * Bailout asap if nfsmount struct gone (unmounted). 
-                */
-               if (!myrep->r_nmp) {
-                       FSDBG(530, myrep->r_xid, myrep, nmp, -2);
-                       if (mrep)
-                               mbuf_freem(mrep);
-                       return (ENXIO);
-               }
-               if (error) {
-                       FSDBG(530, myrep->r_xid, myrep, nmp, error);
-                       nfs_rcvunlock(myrep);
-
-                       /* Bailout asap if nfsmount struct gone (unmounted). */
-                       if (!myrep->r_nmp) {
-                               if (mrep)
-                                       mbuf_freem(mrep);
-                               return (ENXIO);
-                       }
+       struct nfsmount *nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
+       struct gss_seq *gsp, *ngsp;
+       struct timespec ts = { 1, 0 };
+       int clearjbtimeo = 0;
 
-                       /*
-                        * Ignore routing errors on connectionless protocols??
-                        */
-                       if (NFSIGNORE_SOERROR(nmp->nm_sotype, error)) {
-                               if (nmp->nm_so) {
-                                       int clearerror;
-                                       int optlen = sizeof(clearerror);
-                                       sock_getsockopt(nmp->nm_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
-                               }
-                               continue;
-                       }
-                       if (mrep)
-                               mbuf_freem(mrep);
-                       return (error);
+       if (!req || !(req->r_flags & R_INITTED))
+               return;
+       req->r_flags &= ~R_INITTED;
+       if (req->r_lflags & RL_QUEUED)
+               nfs_reqdequeue(req);
+       if (req->r_achain.tqe_next != NFSREQNOLIST) {
+               /* still on an async I/O queue? */
+               lck_mtx_lock(nfsiod_mutex);
+               if (nmp && (req->r_achain.tqe_next != NFSREQNOLIST)) {
+                       TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
+                       req->r_achain.tqe_next = NFSREQNOLIST;
                }
-
-               /*
-                * We assume all is fine, but if we did not have an error
-                 * and mrep is 0, better not dereference it. nfs_receive
-                 * calls soreceive which carefully sets error=0 when it got
-                 * errors on sbwait (tsleep). In most cases, I assume that's 
-                 * so we could go back again. In tcp case, EPIPE is returned.
-                 * In udp, case nfs_receive gets back here with no error and no
-                 * mrep. Is the right fix to have soreceive check for process
-                 * aborted after sbwait and return something non-zero? Should
-                 * nfs_receive give an EPIPE?  Too risky to play with those
-                 * two this late in game for a shutdown problem. Instead,
-                 * just check here and get out. (ekn)
-                */
-               if (!mrep) {
-                       nfs_rcvunlock(myrep);
-                        FSDBG(530, myrep->r_xid, myrep, nmp, -3);
-                        return (ENXIO); /* sounds good */
-                }
-                        
-               /*
-                * Get the xid and check that it is an rpc reply
-                */
-               md = mrep;
-               dpos = mbuf_data(md);
-               nfsm_dissect(tl, u_long *, 2*NFSX_UNSIGNED);
-               rxid = *tl++;
-               if (*tl != rpc_reply) {
-                       OSAddAtomic(1, (SInt32*)&nfsstats.rpcinvalid);
-                       mbuf_freem(mrep);
-nfsmout:
-                       if (nmp->nm_state & NFSSTA_RCVLOCK)
-                               nfs_rcvunlock(myrep);
-                       continue;
+               lck_mtx_unlock(nfsiod_mutex);
+       }
+       lck_mtx_lock(&req->r_mtx);
+       if (nmp) {
+               lck_mtx_lock(&nmp->nm_lock);
+               if (req->r_rchain.tqe_next != NFSREQNOLIST) {
+                       TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+                       req->r_rchain.tqe_next = NFSREQNOLIST;
+                       if (req->r_flags & R_RESENDQ)
+                               req->r_flags &= ~R_RESENDQ;
                }
-
-               /*
-                * Loop through the request list to match up the reply
-                * Iff no match, just drop the datagram
-                */
-               TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
-                       if (rep->r_mrep == NULL && rxid == rep->r_xid) {
-                               /* Found it.. */
-                               rep->r_mrep = mrep;
-                               rep->r_md = md;
-                               rep->r_dpos = dpos;
-                               /*
-                                * If we're tracking the round trip time
-                                * then we update the circular log here
-                                * with the stats from our current request.
-                                */
-                               if (nfsrtton) {
-                                       struct rttl *rt;
-
-                                       rt = &nfsrtt.rttl[nfsrtt.pos];
-                                       rt->proc = rep->r_procnum;
-                                       rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]);
-                                       rt->sent = nmp->nm_sent;
-                                       rt->cwnd = nmp->nm_cwnd;
-                                       if (proct[rep->r_procnum] == 0)
-                                               panic("nfs_reply: proct[%d] is zero", rep->r_procnum);
-                                       rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
-                                       rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
-                                       rt->fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
-                                       microtime(&rt->tstamp); // XXX unused
-                                       if (rep->r_flags & R_TIMING)
-                                               rt->rtt = rep->r_rtt;
-                                       else
-                                               rt->rtt = 1000000;
-                                       nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
-                               }
-                               /*
-                                * Update congestion window.
-                                * Do the additive increase of
-                                * one rpc/rtt.
-                                */
-                               FSDBG(530, rep->r_xid, rep, nmp->nm_sent,
-                                     nmp->nm_cwnd);
-                               if (nmp->nm_cwnd <= nmp->nm_sent) {
-                                       nmp->nm_cwnd +=
-                                          (NFS_CWNDSCALE * NFS_CWNDSCALE +
-                                          (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
-                                       if (nmp->nm_cwnd > NFS_MAXCWND)
-                                               nmp->nm_cwnd = NFS_MAXCWND;
-                               }
-                                if (rep->r_flags & R_SENT) {
-                                    rep->r_flags &= ~R_SENT;
-                                    nmp->nm_sent -= NFS_CWNDSCALE;
-                               }
-                               /*
-                                * Update rtt using a gain of 0.125 on the mean
-                                * and a gain of 0.25 on the deviation.
-                                */
-                               if (rep->r_flags & R_TIMING) {
-                                       /*
-                                        * Since the timer resolution of
-                                        * NFS_HZ is so course, it can often
-                                        * result in r_rtt == 0. Since
-                                        * r_rtt == N means that the actual
-                                        * rtt is between N+dt and N+2-dt ticks,
-                                        * add 1.
-                                        */
-                                       if (proct[rep->r_procnum] == 0)
-                                               panic("nfs_reply: proct[%d] is zero", rep->r_procnum);
-                                       t1 = rep->r_rtt + 1;
-                                       t1 -= (NFS_SRTT(rep) >> 3);
-                                       NFS_SRTT(rep) += t1;
-                                       if (t1 < 0)
-                                               t1 = -t1;
-                                       t1 -= (NFS_SDRTT(rep) >> 2);
-                                       NFS_SDRTT(rep) += t1;
-                               }
-                               nmp->nm_timeouts = 0;
-                               break;
-                       }
+               if (req->r_cchain.tqe_next != NFSREQNOLIST) {
+                       TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
+                       req->r_cchain.tqe_next = NFSREQNOLIST;
                }
-               nfs_rcvunlock(myrep);
-               /*
-                * If not matched to a request, drop it.
-                * If it's mine, get out.
-                */
-               if (rep == 0) {
-                       OSAddAtomic(1, (SInt32*)&nfsstats.rpcunexpected);
-                       mbuf_freem(mrep);
-               } else if (rep == myrep) {
-                       if (rep->r_mrep == NULL)
-                               panic("nfs_reply: nil r_mrep");
-                       return (0);
+               if (req->r_flags & R_JBTPRINTFMSG) {
+                       req->r_flags &= ~R_JBTPRINTFMSG;
+                       nmp->nm_jbreqs--;
+                       clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
                }
-               FSDBG(530, myrep->r_xid, myrep, rep,
-                     rep ? rep->r_xid : myrep->r_flags);
+               lck_mtx_unlock(&nmp->nm_lock);
        }
+       while (req->r_flags & R_RESENDQ)
+               msleep(req, &req->r_mtx, (PZERO - 1), "nfsresendqwait", &ts);
+       lck_mtx_unlock(&req->r_mtx);
+       if (clearjbtimeo)
+               nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
+       if (req->r_mhead)
+               mbuf_freem(req->r_mhead);
+       else if (req->r_mrest)
+               mbuf_freem(req->r_mrest);
+       if (req->r_nmrep.nmc_mhead)
+               mbuf_freem(req->r_nmrep.nmc_mhead);
+       if (IS_VALID_CRED(req->r_cred))
+               kauth_cred_unref(&req->r_cred);
+       if (req->r_gss_ctx)
+               nfs_gss_clnt_rpcdone(req);
+       SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp)
+               FREE(gsp, M_TEMP);
+       if (req->r_gss_ctx)
+               nfs_gss_clnt_ctx_unref(req);
+
+       lck_mtx_destroy(&req->r_mtx, nfs_request_grp);
+       if (req->r_flags & R_ALLOCATED)
+               FREE_ZONE(req, sizeof(*req), M_NFSREQ);
+}
+
+void
+nfs_request_ref(struct nfsreq *req, int locked)
+{
+       if (!locked)
+               lck_mtx_lock(&req->r_mtx);
+       if (req->r_refs <= 0)
+               panic("nfsreq reference error");
+       req->r_refs++;
+       if (!locked)
+               lck_mtx_unlock(&req->r_mtx);
+}
+
+void
+nfs_request_rele(struct nfsreq *req)
+{
+       int destroy;
+
+       lck_mtx_lock(&req->r_mtx);
+       if (req->r_refs <= 0)
+               panic("nfsreq reference underflow");
+       req->r_refs--;
+       destroy = (req->r_refs == 0);
+       lck_mtx_unlock(&req->r_mtx);
+       if (destroy)
+               nfs_request_destroy(req);
 }
 
+
 /*
- * nfs_request - goes something like this
- *     - fill in request struct
- *     - links it into list
- *     - calls nfs_send() for first transmit
- *     - calls nfs_receive() to get reply
- *     - break down rpc header and return with nfs reply pointed to
- *       by mrep or error
- * nb: always frees up mreq mbuf list
+ * Add an (updated) RPC header with authorization to an NFS request.
  */
 int
-nfs_request(vp, mp, mrest, procnum, procp, cred, mrp, mdp, dposp, xidp)
-       vnode_t vp;
-       mount_t mp;
-       mbuf_t mrest;
-       int procnum;
-       proc_t procp;
-       kauth_cred_t cred;
-       mbuf_t *mrp;
-       mbuf_t *mdp;
-       caddr_t *dposp;
-       u_int64_t *xidp;
+nfs_request_add_header(struct nfsreq *req)
 {
-       mbuf_t m, mrep, m2;
-       struct nfsreq re, *rep;
-       u_long *tl;
-       int i;
        struct nfsmount *nmp;
-       mbuf_t md, mheadend;
-       char nickv[RPCX_NICKVERF];
-       time_t waituntil;
-       caddr_t dpos, cp2;
-       int t1, error = 0, mrest_len, auth_len, auth_type;
-       int trylater_delay = NFS_TRYLATERDEL, failed_auth = 0;
-       int verf_len, verf_type;
-       u_long xid;
-       char *auth_str, *verf_str;
-       NFSKERBKEY_T key;               /* save session key */
-       int nmsotype;
-       struct timeval now;
+       int error = 0, auth_len = 0;
+       mbuf_t m;
 
-       if (mrp)
-               *mrp = NULL;
-       if (xidp)
-               *xidp = 0;
-       nmp = VFSTONFS(mp);
+       /* free up any previous header */
+       if ((m = req->r_mhead)) {
+               while (m && (m != req->r_mrest))
+                       m = mbuf_free(m);
+               req->r_mhead = NULL;
+       }
+
+       nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
+       if (!nmp)
+               return (ENXIO);
 
-       rep = &re;
+       if (!req->r_cred) /* RPCAUTH_NULL */
+               auth_len = 0;
+       else switch (nmp->nm_auth) {
+               case RPCAUTH_UNIX:
+                       if (req->r_cred->cr_ngroups < 1)
+                               return (EINVAL);
+                       auth_len = ((((req->r_cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
+                               nmp->nm_numgrps : (req->r_cred->cr_ngroups - 1)) << 2) +
+                               5 * NFSX_UNSIGNED;
+                       break;
+               case RPCAUTH_KRB5:
+               case RPCAUTH_KRB5I:
+               case RPCAUTH_KRB5P:
+                       auth_len = 5 * NFSX_UNSIGNED + 0; // zero context handle for now
+                       break;
+               }
 
-       if (vp)
-               nmp = VFSTONFS(vnode_mount(vp));
-       if (nmp == NULL ||
-           (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_TIMEO)) ==
-           (NFSSTA_FORCE|NFSSTA_TIMEO)) {
-               mbuf_freem(mrest);
+       error = nfsm_rpchead(req, auth_len, req->r_mrest, &req->r_xid, &req->r_mhead);
+       if (error)
+               return (error);
+
+       req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead);
+       nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
+       if (!nmp)
                return (ENXIO);
-       }
-       nmsotype = nmp->nm_sotype;
+       lck_mtx_lock(&nmp->nm_lock);
+       if (nmp->nm_flag & NFSMNT_SOFT)
+               req->r_retry = nmp->nm_retry;
+       else
+               req->r_retry = NFS_MAXREXMIT + 1;       /* past clip limit */
+       lck_mtx_unlock(&nmp->nm_lock);
 
-       FSDBG_TOP(531, vp, procnum, nmp, rep);
+       return (error);
+}
 
-       rep->r_nmp = nmp;
-       rep->r_vp = vp;
-       rep->r_procp = procp;
-       rep->r_procnum = procnum;
-       microuptime(&now);
-       rep->r_lastmsg = now.tv_sec -
-           ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
-       i = 0;
-       m = mrest;
-       while (m) {
-               i += mbuf_len(m);
-               m = mbuf_next(m);
-       }
-       mrest_len = i;
 
-       /*
-        * Get the RPC header with authorization.
-        */
-kerbauth:
-       nmp = vp ? VFSTONFS(vnode_mount(vp)) : rep->r_nmp;
+/*
+ * Queue an NFS request up and send it out.
+ */
+int
+nfs_request_send(struct nfsreq *req, int wait)
+{
+       struct nfsmount *nmp;
+       struct timeval now;
+
+       lck_mtx_lock(&req->r_mtx);
+       req->r_flags |= R_SENDING;
+       lck_mtx_unlock(&req->r_mtx);
+
+       lck_mtx_lock(nfs_request_mutex);
+
+       nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
        if (!nmp) {
-               FSDBG_BOT(531, error, rep->r_xid, nmp, rep);
-               mbuf_freem(mrest);
+               lck_mtx_unlock(nfs_request_mutex);
                return (ENXIO);
        }
-       verf_str = auth_str = (char *)0;
-       if (nmp->nm_flag & NFSMNT_KERB) {
-               verf_str = nickv;
-               verf_len = sizeof (nickv);
-               auth_type = RPCAUTH_KERB4;
-               bzero((caddr_t)key, sizeof (key));
-               if (failed_auth || nfs_getnickauth(nmp, cred, &auth_str,
-                       &auth_len, verf_str, verf_len)) {
-                       nmp = vp ? VFSTONFS(vnode_mount(vp)) : rep->r_nmp;
-                       if (!nmp) {
-                               FSDBG_BOT(531, 2, vp, error, rep);
-                               mbuf_freem(mrest);
-                               return (ENXIO);
-                       }
-                       error = nfs_getauth(nmp, rep, cred, &auth_str,
-                               &auth_len, verf_str, &verf_len, key);
-                       nmp = vp ? VFSTONFS(vnode_mount(vp)) : rep->r_nmp;
-                       if (!error && !nmp)
-                               error = ENXIO;
-                       if (error) {
-                               FSDBG_BOT(531, 2, vp, error, rep);
-                               mbuf_freem(mrest);
-                               return (error);
-                       }
-               }
-       } else {
-               auth_type = RPCAUTH_UNIX;
-               if (cred->cr_ngroups < 1)
-                       panic("nfsreq nogrps");
-               auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
-                       nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
-                       5 * NFSX_UNSIGNED;
-       }
-       error = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
-            auth_str, verf_len, verf_str, mrest, mrest_len, &mheadend, &xid, &m);
-       if (auth_str)
-               _FREE(auth_str, M_TEMP);
-       if (error) {
-               mbuf_freem(mrest);
-               FSDBG_BOT(531, error, rep->r_xid, nmp, rep);
-               return (error);
-       }
-       if (xidp)
-               *xidp = ntohl(xid) + ((u_int64_t)nfs_xidwrap << 32);
 
-       /*
-        * For stream protocols, insert a Sun RPC Record Mark.
-        */
-       if (nmsotype == SOCK_STREAM) {
-               error = mbuf_prepend(&m, NFSX_UNSIGNED, MBUF_WAITOK);
-               if (error) {
-                       mbuf_freem(m);
-                       FSDBG_BOT(531, error, rep->r_xid, nmp, rep);
-                       return (error);
-               }
-               *((u_long*)mbuf_data(m)) =
-                       htonl(0x80000000 | (mbuf_pkthdr_len(m) - NFSX_UNSIGNED));
+       microuptime(&now);
+       if (!req->r_start) {
+               req->r_start = now.tv_sec;
+               req->r_lastmsg = now.tv_sec -
+                   ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
        }
-       rep->r_mreq = m;
-       rep->r_xid = xid;
-tryagain:
-       nmp = vp ? VFSTONFS(vnode_mount(vp)) : rep->r_nmp;
-       if (nmp && (nmp->nm_flag & NFSMNT_SOFT))
-               rep->r_retry = nmp->nm_retry;
-       else
-               rep->r_retry = NFS_MAXREXMIT + 1;       /* past clip limit */
-       rep->r_rtt = rep->r_rexmit = 0;
-       if (proct[procnum] > 0)
-               rep->r_flags = R_TIMING;
-       else
-               rep->r_flags = 0;
-       rep->r_mrep = NULL;
 
-       /*
-        * Do the client side RPC.
-        */
-       OSAddAtomic(1, (SInt32*)&nfsstats.rpcrequests);
+       OSAddAtomic(1, &nfsstats.rpcrequests);
+
        /*
         * Chain request into list of outstanding requests. Be sure
         * to put it LAST so timer finds oldest requests first.
+        * Make sure that the request queue timer is running
+        * to check for possible request timeout.
         */
-       TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
+       TAILQ_INSERT_TAIL(&nfs_reqq, req, r_chain);
+       req->r_lflags |= RL_QUEUED;
+       if (!nfs_request_timer_on) {
+               nfs_request_timer_on = 1;
+               nfs_interval_timer_start(nfs_request_timer_call,
+                       NFS_REQUESTDELAY);
+       }
+       lck_mtx_unlock(nfs_request_mutex);
 
-       /*
-        * If backing off another request or avoiding congestion, don't
-        * send this one now but let timer do it. If not timing a request,
-        * do it now.
-        */
-       if (nmp && nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
-                          (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
-                          nmp->nm_sent < nmp->nm_cwnd)) {
-               int connrequired = (nmp->nm_sotype == SOCK_STREAM);
+       /* Send the request... */
+       return (nfs_send(req, wait));
+}
 
-               if (connrequired)
-                       error = nfs_sndlock(rep);
+/*
+ * Call nfs_wait_reply() to wait for the reply.
+ */
+void
+nfs_request_wait(struct nfsreq *req)
+{
+       req->r_error = nfs_wait_reply(req);
+}
 
-               /*
-                * Set the R_SENT before doing the send in case another thread
-                * processes the reply before the nfs_send returns here
-                */
-               if (!error) {
-                       if ((rep->r_flags & R_MUSTRESEND) == 0) {
-                               FSDBG(531, rep->r_xid, rep, nmp->nm_sent,
-                                     nmp->nm_cwnd);
-                               nmp->nm_sent += NFS_CWNDSCALE;
-                               rep->r_flags |= R_SENT;
-                       }
+/*
+ * Finish up an NFS request by dequeueing it and
+ * doing the initial NFS request reply processing.
+ */
+int
+nfs_request_finish(
+       struct nfsreq *req,
+       struct nfsm_chain *nmrepp,
+       int *status)
+{
+       struct nfsmount *nmp;
+       mbuf_t mrep;
+       int verf_type = 0;
+       uint32_t verf_len = 0;
+       uint32_t reply_status = 0;
+       uint32_t rejected_status = 0;
+       uint32_t auth_status = 0;
+       uint32_t accepted_status = 0;
+       struct nfsm_chain nmrep;
+       int error, auth, clearjbtimeo;
 
-                       error = mbuf_copym(m, 0, MBUF_COPYALL, MBUF_WAITOK, &m2);
-                       if (!error)
-                               error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
-                       if (connrequired)
-                               nfs_sndunlock(rep);
-               }
-               nmp = vp ? VFSTONFS(vnode_mount(vp)) : rep->r_nmp;
-               if (error) {
-                       if (nmp)
-                               nmp->nm_sent -= NFS_CWNDSCALE;
-                       rep->r_flags &= ~R_SENT;
-               }
-       } else {
-               rep->r_rtt = -1;
-       }
+       error = req->r_error;
 
-       /*
-        * Wait for the reply from our send or the timer's.
-        */
-       if (!error || error == EPIPE)
-               error = nfs_reply(rep);
+       if (nmrepp)
+               nmrepp->nmc_mhead = NULL;
 
-       /*
-        * RPC done, unlink the request.
-        */
-       nfs_repdequeue(rep);
+       /* RPC done, unlink the request. */
+       nfs_reqdequeue(req);
 
-       nmp = vp ? VFSTONFS(vnode_mount(vp)) : rep->r_nmp;
+       mrep = req->r_nmrep.nmc_mhead;
+
+       nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
 
        /*
         * Decrement the outstanding request count.
         */
-       if (rep->r_flags & R_SENT) {
-               rep->r_flags &= ~R_SENT;        /* paranoia */
-               if (nmp) {
-                       FSDBG(531, rep->r_xid, rep, nmp->nm_sent, nmp->nm_cwnd);
-                       nmp->nm_sent -= NFS_CWNDSCALE;
+       if ((req->r_flags & R_CWND) && nmp) {
+               req->r_flags &= ~R_CWND;
+               lck_mtx_lock(&nmp->nm_lock);
+               FSDBG(273, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
+               nmp->nm_sent -= NFS_CWNDSCALE;
+               if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
+                       /* congestion window is open, poke the cwnd queue */
+                       struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
+                       TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
+                       req2->r_cchain.tqe_next = NFSREQNOLIST;
+                       wakeup(req2);
                }
+               lck_mtx_unlock(&nmp->nm_lock);
        }
 
-       /*
-        * If there was a successful reply and a tprintf msg.
-        * tprintf a response.
-        */
-       if (!error)
-               nfs_up(nmp, procp, NFSSTA_TIMEO,
-                       (rep->r_flags & R_TPRINTFMSG) ? "is alive again" : NULL);
-       mrep = rep->r_mrep;
-       md = rep->r_md;
-       dpos = rep->r_dpos;
-       if (!error && !nmp)
-               error = ENXIO;
-       if (error) {
-               mbuf_freem(rep->r_mreq);
-               FSDBG_BOT(531, error, rep->r_xid, nmp, rep);
-               return (error);
+       if (req->r_gss_ctx) {   // Using gss cred ?
+               /*
+                * If the request had an RPCSEC_GSS credential
+                * then reset its sequence number bit in the
+                * request window.
+                */
+               nfs_gss_clnt_rpcdone(req);
+
+               /*
+                * If we need to re-send, go back and re-build the
+                * request based on a new sequence number.
+                * Note that we're using the original XID.
+                */
+               if (error == EAGAIN) {
+                       req->r_error = 0;
+                       if (mrep)
+                               mbuf_freem(mrep);
+                       error = nfs_gss_clnt_args_restore(req); // remove any trailer mbufs
+                       req->r_nmrep.nmc_mhead = NULL;
+                       req->r_flags |= R_RESTART;
+                       if (error == ENEEDAUTH) {
+                               req->r_xid = 0;         // get a new XID
+                               error = 0;
+                       }
+                       goto nfsmout;
+               }
        }
 
        /*
-        * break down the rpc header and check if ok
+        * If there was a successful reply, make sure to mark the mount as up.
+        * If a tprintf message was given (or if this is a timed-out soft mount)
+        * then post a tprintf message indicating the server is alive again.
         */
-       nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED);
-       if (*tl++ == rpc_msgdenied) {
-               if (*tl == rpc_mismatch)
-                       error = EOPNOTSUPP;
-               else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) {
-                       if (!failed_auth) {
-                               failed_auth++;
-                               error = mbuf_setnext(mheadend, NULL);
-                               mbuf_freem(mrep);
-                               mbuf_freem(rep->r_mreq);
-                               if (!error)
-                                       goto kerbauth;
-                               printf("nfs_request: mbuf_setnext failed\n");
-                       } else
-                               error = EAUTH;
-               } else
-                       error = EACCES;
-               mbuf_freem(mrep);
-               mbuf_freem(rep->r_mreq);
-               FSDBG_BOT(531, error, rep->r_xid, nmp, rep);
-               return (error);
+       if (!error) {
+               if ((req->r_flags & R_TPRINTFMSG) ||
+                   (nmp && (nmp->nm_flag & NFSMNT_SOFT) &&
+                    ((nmp->nm_state & (NFSSTA_TIMEO|NFSSTA_FORCE)) == NFSSTA_TIMEO)))
+                       nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, "is alive again");
+               else
+                       nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, NULL);
        }
+       if (!error && !nmp)
+               error = ENXIO;
+       nfsmout_if(error);
 
        /*
-        * Grab any Kerberos verifier, otherwise just throw it away.
+        * break down the RPC header and check if ok
         */
-       verf_type = fxdr_unsigned(int, *tl++);
-       i = fxdr_unsigned(int, *tl);
-       if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
-               error = nfs_savenickauth(nmp, cred, i, key, &md, &dpos, mrep);
-               if (error)
+       nmrep = req->r_nmrep;
+       nfsm_chain_get_32(error, &nmrep, reply_status);
+       nfsmout_if(error);
+       if (reply_status == RPC_MSGDENIED) {
+               nfsm_chain_get_32(error, &nmrep, rejected_status);
+               nfsmout_if(error);
+               if (rejected_status == RPC_MISMATCH) {
+                       error = ENOTSUP;
                        goto nfsmout;
-       } else if (i > 0)
-               nfsm_adv(nfsm_rndup(i));
-       nfsm_dissect(tl, u_long *, NFSX_UNSIGNED);
-       /* 0 == ok */
-       if (*tl == 0) {
-               nfsm_dissect(tl, u_long *, NFSX_UNSIGNED);
-               if (*tl != 0) {
-                       error = fxdr_unsigned(int, *tl);
-                       if ((nmp->nm_flag & NFSMNT_NFSV3) &&
-                               error == NFSERR_TRYLATER) {
-                               mbuf_freem(mrep);
-                               error = 0;
-                               microuptime(&now);
-                               waituntil = now.tv_sec + trylater_delay;
-                               while (now.tv_sec < waituntil) {
-                                       tsleep((caddr_t)&lbolt, PSOCK, "nfstrylater", 0);
-                                       microuptime(&now);
-                               }
-                               trylater_delay *= 2;
-                               if (trylater_delay > 60)
-                                       trylater_delay = 60;
-                               goto tryagain;
+               }
+               nfsm_chain_get_32(error, &nmrep, auth_status);
+               nfsmout_if(error);
+               switch (auth_status) {
+               case RPCSEC_GSS_CREDPROBLEM:
+               case RPCSEC_GSS_CTXPROBLEM:
+                       /*
+                        * An RPCSEC_GSS cred or context problem.
+                        * We can't use it anymore.
+                        * Restore the args, renew the context
+                        * and set up for a resend.
+                        */
+                       error = nfs_gss_clnt_args_restore(req);
+                       if (error && error != ENEEDAUTH)
+                               break;
+
+                       if (!error) {
+                               error = nfs_gss_clnt_ctx_renew(req);
+                               if (error)
+                                       break;
                        }
+                       mbuf_freem(mrep);
+                       req->r_nmrep.nmc_mhead = NULL;
+                       req->r_xid = 0;         // get a new XID
+                       req->r_flags |= R_RESTART;
+                       goto nfsmout;
+               default:
+                       error = EACCES;
+                       break;
+               }
+               goto nfsmout;
+       }
+
+       /* Now check the verifier */
+       nfsm_chain_get_32(error, &nmrep, verf_type); // verifier flavor
+       nfsm_chain_get_32(error, &nmrep, verf_len);  // verifier length
+       nfsmout_if(error);
+
+       auth = !req->r_cred ? RPCAUTH_NULL : nmp->nm_auth;
+       switch (auth) {
+       case RPCAUTH_NULL:
+       case RPCAUTH_UNIX:
+               /* Any AUTH_UNIX verifier is ignored */
+               if (verf_len > 0)
+                       nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
+               nfsm_chain_get_32(error, &nmrep, accepted_status);
+               break;
+       case RPCAUTH_KRB5:
+       case RPCAUTH_KRB5I:
+       case RPCAUTH_KRB5P:
+               error = nfs_gss_clnt_verf_get(req, &nmrep,
+                       verf_type, verf_len, &accepted_status);
+               break;
+       }
+       nfsmout_if(error);
+
+       switch (accepted_status) {
+       case RPC_SUCCESS:
+               if (req->r_procnum == NFSPROC_NULL) {
+                       /*
+                        * The NFS null procedure is unique,
+                        * in not returning an NFS status.
+                        */
+                       *status = NFS_OK;
+               } else {
+                       nfsm_chain_get_32(error, &nmrep, *status);
+                       nfsmout_if(error);
+               }
 
+               if ((nmp->nm_vers != NFS_VER2) && (*status == NFSERR_TRYLATER)) {
                        /*
-                        * If the File Handle was stale, invalidate the
-                        * lookup cache, just in case.
+                        * It's a JUKEBOX error - delay and try again
                         */
-                       if ((error == ESTALE) && vp)
-                               cache_purge(vp);
-                       if (nmp->nm_flag & NFSMNT_NFSV3) {
-                               *mrp = mrep;
-                               *mdp = md;
-                               *dposp = dpos;
-                               error |= NFSERR_RETERR;
+                       int delay, slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
+
+                       mbuf_freem(mrep);
+                       req->r_nmrep.nmc_mhead = NULL;
+                       if ((req->r_delay >= 30) && !(nmp->nm_state & NFSSTA_MOUNTED)) {
+                               /* we're not yet completely mounted and */
+                               /* we can't complete an RPC, so we fail */
+                               OSAddAtomic(1, &nfsstats.rpctimeouts);
+                               nfs_softterm(req);
+                               error = req->r_error;
+                               goto nfsmout;
+                       }
+                       req->r_delay = !req->r_delay ? NFS_TRYLATERDEL : (req->r_delay * 2);
+                       if (req->r_delay > 30)
+                               req->r_delay = 30;
+                       if (nmp->nm_tprintf_initial_delay && (req->r_delay >= nmp->nm_tprintf_initial_delay)) {
+                               if (!(req->r_flags & R_JBTPRINTFMSG)) {
+                                       req->r_flags |= R_JBTPRINTFMSG;
+                                       lck_mtx_lock(&nmp->nm_lock);
+                                       nmp->nm_jbreqs++;
+                                       lck_mtx_unlock(&nmp->nm_lock);
+                               }
+                               nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_JUKEBOXTIMEO,
+                                       "resource temporarily unavailable (jukebox)");
+                       }
+                       if ((nmp->nm_flag & NFSMNT_SOFT) && (req->r_delay == 30)) {
+                               /* for soft mounts, just give up after a short while */
+                               OSAddAtomic(1, &nfsstats.rpctimeouts);
+                               nfs_softterm(req);
+                               error = req->r_error;
+                               goto nfsmout;
+                       }
+                       delay = req->r_delay;
+                       if (req->r_callback.rcb_func) {
+                               struct timeval now;
+                               microuptime(&now);
+                               req->r_resendtime = now.tv_sec + delay;
                        } else {
-                               mbuf_freem(mrep);
-                               error &= ~NFSERR_RETERR;
+                               do {
+                                       if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
+                                               goto nfsmout;
+                                       tsleep(&lbolt, PSOCK|slpflag, "nfs_jukebox_trylater", 0);
+                               } while (--delay > 0);
                        }
-                       mbuf_freem(rep->r_mreq);
-                       FSDBG_BOT(531, error, rep->r_xid, nmp, rep);
-                       return (error);
+                       req->r_xid = 0;                 // get a new XID
+                       req->r_flags |= R_RESTART;
+                       req->r_start = 0;
+                       FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_TRYLATER);
+                       return (0);
+               }
+
+               if (req->r_flags & R_JBTPRINTFMSG) {
+                       req->r_flags &= ~R_JBTPRINTFMSG;
+                       lck_mtx_lock(&nmp->nm_lock);
+                       nmp->nm_jbreqs--;
+                       clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       nfs_up(nmp, req->r_thread, clearjbtimeo, "resource available again");
+               }
+
+               if (*status == NFS_OK) {
+                       /*
+                        * Successful NFS request
+                        */
+                       *nmrepp = nmrep;
+                       req->r_nmrep.nmc_mhead = NULL;
+                       break;
                }
+               /* Got an NFS error of some kind */
+
+               /*
+                * If the File Handle was stale, invalidate the
+                * lookup cache, just in case.
+                */
+               if ((*status == ESTALE) && req->r_np)
+                       cache_purge(NFSTOV(req->r_np));
+               if (nmp->nm_vers == NFS_VER2)
+                       mbuf_freem(mrep);
+               else
+                       *nmrepp = nmrep;
+               req->r_nmrep.nmc_mhead = NULL;
+               error = 0;
+               break;
+       case RPC_PROGUNAVAIL:
+               error = EPROGUNAVAIL;
+               break;
+       case RPC_PROGMISMATCH:
+               error = ERPCMISMATCH;
+               break;
+       case RPC_PROCUNAVAIL:
+               error = EPROCUNAVAIL;
+               break;
+       case RPC_GARBAGE:
+               error = EBADRPC;
+               break;
+       case RPC_SYSTEM_ERR:
+       default:
+               error = EIO;
+               break;
+       }
+nfsmout:
+       if (req->r_flags & R_JBTPRINTFMSG) {
+               req->r_flags &= ~R_JBTPRINTFMSG;
+               lck_mtx_lock(&nmp->nm_lock);
+               nmp->nm_jbreqs--;
+               clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
+               lck_mtx_unlock(&nmp->nm_lock);
+               if (clearjbtimeo)
+                       nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
+       }
+       FSDBG(273, R_XID32(req->r_xid), nmp, req,
+               (!error && (*status == NFS_OK)) ? 0xf0f0f0f0 : error);
+       return (error);
+}
+
+
+/*
+ * Perform an NFS request synchronously.
+ */
+
+int
+nfs_request(
+       nfsnode_t np,
+       mount_t mp,     /* used only if !np */
+       struct nfsm_chain *nmrest,
+       int procnum,
+       vfs_context_t ctx,
+       struct nfsm_chain *nmrepp,
+       u_int64_t *xidp,
+       int *status)
+{
+       return nfs_request2(np, mp, nmrest, procnum,
+               vfs_context_thread(ctx), vfs_context_ucred(ctx),
+               0, nmrepp, xidp, status);
+}
+
+int
+nfs_request2(
+       nfsnode_t np,
+       mount_t mp,     /* used only if !np */
+       struct nfsm_chain *nmrest,
+       int procnum,
+       thread_t thd,
+       kauth_cred_t cred,
+       int flags,
+       struct nfsm_chain *nmrepp,
+       u_int64_t *xidp,
+       int *status)
+{
+       struct nfsreq rq, *req = &rq;
+       int error;
+
+       if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req)))
+               return (error);
+       req->r_flags |= (flags & R_OPTMASK);
 
-               *mrp = mrep;
-               *mdp = md;
-               *dposp = dpos;
-               mbuf_freem(rep->r_mreq);
-               FSDBG_BOT(531, 0xf0f0f0f0, rep->r_xid, nmp, rep);
-               return (0);
-       }
-       mbuf_freem(mrep);
-       error = EPROTONOSUPPORT;
-nfsmout:
-       mbuf_freem(rep->r_mreq);
-       FSDBG_BOT(531, error, rep->r_xid, nmp, rep);
+       FSDBG_TOP(273, R_XID32(req->r_xid), np, procnum, 0);
+       do {
+               req->r_error = 0;
+               req->r_flags &= ~R_RESTART;
+               if ((error = nfs_request_add_header(req)))
+                       break;
+               if (xidp)
+                       *xidp = req->r_xid;
+               if ((error = nfs_request_send(req, 1)))
+                       break;
+               nfs_request_wait(req);
+               if ((error = nfs_request_finish(req, nmrepp, status)))
+                       break;
+       } while (req->r_flags & R_RESTART);
+
+       FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error);
+       nfs_request_rele(req);
        return (error);
 }
 
-#ifndef NFS_NOSERVER
+
 /*
- * Generate the rpc reply header
- * siz arg. is used to decide if adding a cluster is worthwhile
+ * Set up a new null proc request to exchange GSS context tokens with the
+ * server. Associate the context that we are setting up with the request that we
+ * are sending.
  */
 int
-nfs_rephead(siz, nd, slp, err, mrq, mbp, bposp)
-       int siz;
-       struct nfsrv_descript *nd;
-       struct nfssvc_sock *slp;
-       int err;
-       mbuf_t *mrq;
-       mbuf_t *mbp;
-       caddr_t *bposp;
+nfs_request_gss(
+               mount_t mp,
+               struct nfsm_chain *nmrest,
+               thread_t thd,
+               kauth_cred_t cred,
+               int flags,
+               struct nfs_gss_clnt_ctx *cp,   /* Set to gss context to renew or setup */
+               struct nfsm_chain *nmrepp,
+               int *status)
 {
-       u_long *tl;
-       mbuf_t mreq;
-       caddr_t bpos;
-       mbuf_t mb, mb2;
-       int error, mlen;
+       struct nfsreq rq, *req = &rq;
+       int error;
 
-       /*
-        * If this is a big reply, use a cluster else
-        * try and leave leading space for the lower level headers.
-        */
-       siz += RPC_REPLYSIZ;
-       if (siz >= nfs_mbuf_minclsize) {
-               error = mbuf_getpacket(MBUF_WAITOK, &mreq);
-       } else {
-               error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mreq);
-       }
-       if (error) {
-               /* unable to allocate packet */
-               /* XXX nfsstat? */
+       if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req)))
                return (error);
+       req->r_flags |= (flags & R_OPTMASK);
+       
+       if (cp == NULL) {
+               printf("nfs_request_gss request has no context\n");
+               nfs_request_rele(req);
+               return (NFSERR_EAUTH);
        }
-       mb = mreq;
-       tl = mbuf_data(mreq);
-       mlen = 6 * NFSX_UNSIGNED;
-       if (siz < nfs_mbuf_minclsize) {
-               /* leave space for lower level headers */
-               tl += 80/sizeof(*tl);  /* XXX max_hdr? XXX */
-               mbuf_setdata(mreq, tl, mlen);
-       } else {
-               mbuf_setlen(mreq, mlen);
-       }
-       bpos = ((caddr_t)tl) + mlen;
-       *tl++ = txdr_unsigned(nd->nd_retxid);
-       *tl++ = rpc_reply;
-       if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
-               *tl++ = rpc_msgdenied;
-               if (err & NFSERR_AUTHERR) {
-                       *tl++ = rpc_autherr;
-                       *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
-                       mlen -= NFSX_UNSIGNED;
-                       mbuf_setlen(mreq, mlen);
-                       bpos -= NFSX_UNSIGNED;
-               } else {
-                       *tl++ = rpc_mismatch;
-                       *tl++ = txdr_unsigned(RPC_VER2);
-                       *tl = txdr_unsigned(RPC_VER2);
-               }
-       } else {
-               *tl++ = rpc_msgaccepted;
-
-               /*
-                * For Kerberos authentication, we must send the nickname
-                * verifier back, otherwise just RPCAUTH_NULL.
-                */
-               if (nd->nd_flag & ND_KERBFULL) {
-                   struct nfsuid *nuidp;
-                   struct timeval ktvin, ktvout;
-                   uid_t uid = kauth_cred_getuid(nd->nd_cr);
-
-                   lck_rw_lock_shared(&slp->ns_rwlock);
-                   for (nuidp = NUIDHASH(slp, uid)->lh_first;
-                       nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
-                       if (kauth_cred_getuid(nuidp->nu_cr) == uid &&
-                           (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
-                            &nuidp->nu_haddr, nd->nd_nam2)))
-                           break;
-                   }
-                   if (nuidp) {
-                       ktvin.tv_sec =
-                           txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1);
-                       ktvin.tv_usec =
-                           txdr_unsigned(nuidp->nu_timestamp.tv_usec);
-
-                       /*
-                        * Encrypt the timestamp in ecb mode using the
-                        * session key.
-                        */
-#if NFSKERB
-                       XXX
-#endif
+       nfs_gss_clnt_ctx_ref(req, cp);
 
-                       *tl++ = rpc_auth_kerb;
-                       *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
-                       *tl = ktvout.tv_sec;
-                       nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED);
-                       *tl++ = ktvout.tv_usec;
-                       *tl++ = txdr_unsigned(kauth_cred_getuid(nuidp->nu_cr));
-                   } else {
-                       *tl++ = 0;
-                       *tl++ = 0;
-                   }
-                   lck_rw_done(&slp->ns_rwlock);
-               } else {
-                       *tl++ = 0;
-                       *tl++ = 0;
-               }
-               switch (err) {
-               case EPROGUNAVAIL:
-                       *tl = txdr_unsigned(RPC_PROGUNAVAIL);
-                       break;
-               case EPROGMISMATCH:
-                       *tl = txdr_unsigned(RPC_PROGMISMATCH);
-                       nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED);
-                       // XXX hard coded versions
-                       *tl++ = txdr_unsigned(2);
-                       *tl = txdr_unsigned(3);
+       FSDBG_TOP(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, 0);
+       do {
+               req->r_error = 0;
+               req->r_flags &= ~R_RESTART;
+               if ((error = nfs_request_add_header(req)))
                        break;
-               case EPROCUNAVAIL:
-                       *tl = txdr_unsigned(RPC_PROCUNAVAIL);
+
+               if ((error = nfs_request_send(req, 1)))
                        break;
-               case EBADRPC:
-                       *tl = txdr_unsigned(RPC_GARBAGE);
+               nfs_request_wait(req);
+               if ((error = nfs_request_finish(req, nmrepp, status)))
                        break;
-               default:
-                       *tl = 0;
-                       if (err != NFSERR_RETVOID) {
-                               nfsm_build(tl, u_long *, NFSX_UNSIGNED);
-                               if (err)
-                                   *tl = txdr_unsigned(nfsrv_errmap(nd, err));
-                               else
-                                   *tl = 0;
+       } while (req->r_flags & R_RESTART);
+
+       FSDBG_BOT(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, error);
+       nfs_request_rele(req);
+       return (error);
+}
+       
+/*
+ * Create and start an asynchronous NFS request.
+ */
+int
+nfs_request_async(
+       nfsnode_t np,
+       mount_t mp,     /* used only if !np */
+       struct nfsm_chain *nmrest,
+       int procnum,
+       thread_t thd,
+       kauth_cred_t cred,
+       struct nfsreq_cbinfo *cb,
+       struct nfsreq **reqp)
+{
+       struct nfsreq *req;
+       int error, sent;
+
+       error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, reqp);
+       req = *reqp;
+       FSDBG(274, (req ? R_XID32(req->r_xid) : 0), np, procnum, error);
+       if (error)
+               return (error);
+       req->r_flags |= R_ASYNC;
+       if (cb)
+               req->r_callback = *cb;
+       error = nfs_request_add_header(req);
+       if (!error) {
+               req->r_flags |= R_WAITSENT;
+               if (req->r_callback.rcb_func)
+                       nfs_request_ref(req, 0);
+               error = nfs_request_send(req, 1);
+               lck_mtx_lock(&req->r_mtx);
+               if (!error && !(req->r_flags & R_SENT) && req->r_callback.rcb_func) {
+                       /* make sure to wait until this async I/O request gets sent */
+                       int slpflag = (req->r_nmp && (req->r_nmp->nm_flag & NFSMNT_INT) && req->r_thread) ? PCATCH : 0;
+                       struct timespec ts = { 2, 0 };
+                       while (!(req->r_flags & R_SENT)) {
+                               if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
+                                       break;
+                               msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitsent", &ts);
+                               slpflag = 0;
                        }
-                       break;
                }
+               sent = req->r_flags & R_SENT;
+               lck_mtx_unlock(&req->r_mtx);
+               if (error && req->r_callback.rcb_func && !sent)
+                       nfs_request_rele(req);
        }
+       FSDBG(274, R_XID32(req->r_xid), np, procnum, error);
+       if (error || req->r_callback.rcb_func)
+               nfs_request_rele(req);
+       return (error);
+}
 
-       if (mrq != NULL)
-               *mrq = mreq;
-       *mbp = mb;
-       *bposp = bpos;
-       if (err != 0 && err != NFSERR_RETVOID) {
-               OSAddAtomic(1, (SInt32*)&nfsstats.srvrpc_errs);
+/*
+ * Wait for and finish an asynchronous NFS request.
+ */
+int
+nfs_request_async_finish(
+       struct nfsreq *req,
+       struct nfsm_chain *nmrepp,
+       u_int64_t *xidp,
+       int *status)
+{
+       int error = 0, asyncio = req->r_callback.rcb_func ? 1 : 0;
+
+       lck_mtx_lock(&req->r_mtx);
+       if (!asyncio)
+               req->r_flags |= R_ASYNCWAIT;
+       while (req->r_flags & R_RESENDQ) {  /* wait until the request is off the resend queue */
+               struct timespec ts = { 2, 0 };
+               if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
+                       break;
+               msleep(req, &req->r_mtx, PZERO-1, "nfsresendqwait", &ts);
        }
-       return (0);
-}
+       lck_mtx_unlock(&req->r_mtx);
 
+       if (!error) {
+               nfs_request_wait(req);
+               error = nfs_request_finish(req, nmrepp, status);
+       }
 
-#endif /* NFS_NOSERVER */
+       while (!error && (req->r_flags & R_RESTART)) {
+               if (asyncio && req->r_resendtime) {  /* send later */
+                       lck_mtx_lock(&req->r_mtx);
+                       nfs_asyncio_resend(req);
+                       lck_mtx_unlock(&req->r_mtx);
+                       return (EINPROGRESS);
+               }
+               req->r_error = 0;
+               req->r_flags &= ~R_RESTART;
+               if ((error = nfs_request_add_header(req)))
+                       break;
+               if ((error = nfs_request_send(req, !asyncio)))
+                       break;
+               if (asyncio)
+                       return (EINPROGRESS);
+               nfs_request_wait(req);
+               if ((error = nfs_request_finish(req, nmrepp, status)))
+                       break;
+       }
+       if (xidp)
+               *xidp = req->r_xid;
 
+       FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, error);
+       nfs_request_rele(req);
+       return (error);
+}
 
 /*
- * From FreeBSD 1.58, a Matt Dillon fix...
- * Flag a request as being about to terminate.
- * The nm_sent count is decremented now to avoid deadlocks when the process
- * in soreceive() hasn't yet managed to send its own request.
+ * Cancel a pending asynchronous NFS request.
  */
-static void
-nfs_softterm(struct nfsreq *rep)
+void
+nfs_request_async_cancel(struct nfsreq *req)
 {
-
-       rep->r_flags |= R_SOFTTERM;
-       if (rep->r_flags & R_SENT) {
-               FSDBG(532, rep->r_xid, rep, rep->r_nmp->nm_sent,
-                     rep->r_nmp->nm_cwnd);
-               rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
-               rep->r_flags &= ~R_SENT;
-       }
+       nfs_reqdequeue(req);
+       FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, 0xD1ED1E);
+       nfs_request_rele(req);
 }
 
+/*
+ * Flag a request as being terminated.
+ */
 void
-nfs_timer_funnel(void * arg)
+nfs_softterm(struct nfsreq *req)
 {
-       (void) thread_funnel_set(kernel_flock, TRUE);
-       nfs_timer(arg);
-       (void) thread_funnel_set(kernel_flock, FALSE);
-
+       struct nfsmount *nmp = req->r_nmp;
+       req->r_flags |= R_SOFTTERM;
+       req->r_error = ETIMEDOUT;
+       if (!(req->r_flags & R_CWND) || !nmp)
+               return;
+       /* update congestion window */
+       req->r_flags &= ~R_CWND;
+       lck_mtx_lock(&nmp->nm_lock);
+       FSDBG(532, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
+       nmp->nm_sent -= NFS_CWNDSCALE;
+       if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
+               /* congestion window is open, poke the cwnd queue */
+               struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
+               TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
+               req2->r_cchain.tqe_next = NFSREQNOLIST;
+               wakeup(req2);
+       }
+       lck_mtx_unlock(&nmp->nm_lock);
 }
 
 /*
- * Ensure rep isn't in use by the timer, then dequeue it.
+ * Ensure req isn't in use by the timer, then dequeue it.
  */
-static void
-nfs_repdequeue(struct nfsreq *rep)
+void
+nfs_reqdequeue(struct nfsreq *req)
 {
-
-       while ((rep->r_flags & R_BUSY)) {
-               rep->r_flags |= R_WAITING;
-               tsleep(rep, PSOCK, "repdeq", 0);
+       lck_mtx_lock(nfs_request_mutex);
+       while (req->r_lflags & RL_BUSY) {
+               req->r_lflags |= RL_WAITING;
+               msleep(&req->r_lflags, nfs_request_mutex, PSOCK, "reqdeq", NULL);
+       }
+       if (req->r_lflags & RL_QUEUED) {
+               TAILQ_REMOVE(&nfs_reqq, req, r_chain);
+               req->r_lflags &= ~RL_QUEUED;
        }
-       TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
+       lck_mtx_unlock(nfs_request_mutex);
 }
 
 /*
  * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
  * free()'d out from under it.
  */
-static void
-nfs_repbusy(struct nfsreq *rep)
+void
+nfs_reqbusy(struct nfsreq *req)
 {
-
-       if ((rep->r_flags & R_BUSY))
-               panic("rep locked");
-       rep->r_flags |= R_BUSY;
+       if (req->r_lflags & RL_BUSY)
+               panic("req locked");
+       req->r_lflags |= RL_BUSY;
 }
 
 /*
  * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
  */
-static struct nfsreq *
-nfs_repnext(struct nfsreq *rep)
+struct nfsreq *
+nfs_reqnext(struct nfsreq *req)
 {
-       struct nfsreq * nextrep;
+       struct nfsreq * nextreq;
 
-       if (rep == NULL)
+       if (req == NULL)
                return (NULL);
        /*
         * We need to get and busy the next req before signalling the
         * current one, otherwise wakeup() may block us and we'll race to
         * grab the next req.
         */
-       nextrep = TAILQ_NEXT(rep, r_chain);
-       if (nextrep != NULL)
-               nfs_repbusy(nextrep);
+       nextreq = TAILQ_NEXT(req, r_chain);
+       if (nextreq != NULL)
+               nfs_reqbusy(nextreq);
        /* unbusy and signal. */
-       rep->r_flags &= ~R_BUSY;
-       if ((rep->r_flags & R_WAITING)) {
-               rep->r_flags &= ~R_WAITING;
-               wakeup(rep);
+       req->r_lflags &= ~RL_BUSY;
+       if (req->r_lflags & RL_WAITING) {
+               req->r_lflags &= ~RL_WAITING;
+               wakeup(&req->r_lflags);
        }
-       return (nextrep);
+       return (nextreq);
 }
 
 /*
- * Nfs timer routine
- * Scan the nfsreq list and retranmit any requests that have timed out
- * To avoid retransmission attempts on STREAM sockets (in the future) make
- * sure to set the r_retry field to 0 (implies nm_retry == 0).
+ * NFS request queue timer routine
+ *
+ * Scan the NFS request queue for any requests that have timed out.
+ *
+ * Alert the system of unresponsive servers.
+ * Mark expired requests on soft mounts as terminated.
+ * For UDP, mark/signal requests for retransmission.
  */
 void
-nfs_timer(__unused void *arg)
+nfs_request_timer(__unused void *param0, __unused void *param1)
 {
-       struct nfsreq *rep;
-       mbuf_t m;
-       socket_t so;
+       struct nfsreq *req;
        struct nfsmount *nmp;
-       int timeo;
-       int error;
-#ifndef NFS_NOSERVER
-       struct nfssvc_sock *slp;
-       u_quad_t cur_usec;
-#endif /* NFS_NOSERVER */
-       int flags, rexmit, cwnd, sent;
-       u_long xid;
+       int timeo, maxtime, finish_asyncio, error;
        struct timeval now;
+       TAILQ_HEAD(nfs_mount_pokeq, nfsmount) nfs_mount_poke_queue;
+
+       lck_mtx_lock(nfs_request_mutex);
+       req = TAILQ_FIRST(&nfs_reqq);
+       if (req == NULL) {      /* no requests - turn timer off */
+               nfs_request_timer_on = 0;
+               lck_mtx_unlock(nfs_request_mutex);
+               return;
+       }
+
+       nfs_reqbusy(req);
+       TAILQ_INIT(&nfs_mount_poke_queue);
 
-       rep = TAILQ_FIRST(&nfs_reqq);
-       if (rep != NULL)
-               nfs_repbusy(rep);
        microuptime(&now);
-       for ( ; rep != NULL ; rep = nfs_repnext(rep)) {
-               nmp = rep->r_nmp;
+       for ( ; req != NULL ; req = nfs_reqnext(req)) {
+               nmp = req->r_nmp;
                if (!nmp) /* unmounted */
-                   continue;
-               if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
                        continue;
-               if (nfs_sigintr(nmp, rep, rep->r_procp))
+               if (req->r_error || req->r_nmrep.nmc_mhead)
+                       continue;
+               if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) {
+                       if (req->r_callback.rcb_func != NULL) {
+                               /* async I/O RPC needs to be finished */
+                               lck_mtx_lock(&req->r_mtx);
+                               req->r_error = error;
+                               finish_asyncio = !(req->r_flags & R_WAITSENT);
+                               wakeup(req);
+                               lck_mtx_unlock(&req->r_mtx);
+                               if (finish_asyncio)
+                                       nfs_asyncio_finish(req);
+                       }
                        continue;
-               if (nmp->nm_tprintf_initial_delay != 0 &&
-                   (rep->r_rexmit > 2 || (rep->r_flags & R_RESENDERR)) &&
-                   rep->r_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
-                       rep->r_lastmsg = now.tv_sec;
-                       nfs_down(rep->r_nmp, rep->r_procp, 0, NFSSTA_TIMEO,
+               }
+
+               lck_mtx_lock(&req->r_mtx);
+
+               if (nmp->nm_tprintf_initial_delay &&
+                   ((req->r_rexmit > 2) || (req->r_flags & R_RESENDERR)) &&
+                   ((req->r_lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) {
+                       req->r_lastmsg = now.tv_sec;
+                       nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
                                "not responding");
-                       rep->r_flags |= R_TPRINTFMSG;
+                       req->r_flags |= R_TPRINTFMSG;
+                       lck_mtx_lock(&nmp->nm_lock);
                        if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
+                               lck_mtx_unlock(&nmp->nm_lock);
                                /* we're not yet completely mounted and */
                                /* we can't complete an RPC, so we fail */
-                               OSAddAtomic(1, (SInt32*)&nfsstats.rpctimeouts);
-                               nfs_softterm(rep);
+                               OSAddAtomic(1, &nfsstats.rpctimeouts);
+                               nfs_softterm(req);
+                               finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
+                               wakeup(req);
+                               lck_mtx_unlock(&req->r_mtx);
+                               if (finish_asyncio)
+                                       nfs_asyncio_finish(req);
                                continue;
                        }
+                       lck_mtx_unlock(&nmp->nm_lock);
                }
-               if (rep->r_rtt >= 0) {
-                       rep->r_rtt++;
-                       if (nmp->nm_flag & NFSMNT_DUMBTIMR)
-                               timeo = nmp->nm_timeo;
-                       else
-                               timeo = NFS_RTO(nmp, proct[rep->r_procnum]);
-                       /* ensure 62.5 ms floor */
-                       while (16 * timeo < hz)
-                           timeo *= 2;
-                       if (nmp->nm_timeouts > 0)
-                               timeo *= nfs_backoff[nmp->nm_timeouts - 1];
-                       if (rep->r_rtt <= timeo)
-                               continue;
-                       if (nmp->nm_timeouts < 8)
-                               nmp->nm_timeouts++;
-               }
+
                /*
-                * Check for too many retransmits.  This is never true for
-                * 'hard' mounts because we set r_retry to NFS_MAXREXMIT + 1
-                * and never allow r_rexmit to be more than NFS_MAXREXMIT.
+                * Put a reasonable limit on the maximum timeout,
+                * and reduce that limit when soft mounts get timeouts or are in reconnect.
                 */
-               if (rep->r_rexmit >= rep->r_retry) {    /* too many */
-                       OSAddAtomic(1, (SInt32*)&nfsstats.rpctimeouts);
-                       nfs_softterm(rep);
-                       continue;
-               }
-               if (nmp->nm_sotype != SOCK_DGRAM) {
-                       if (++rep->r_rexmit > NFS_MAXREXMIT)
-                               rep->r_rexmit = NFS_MAXREXMIT;
-                       continue;
-               }
-               if ((so = nmp->nm_so) == NULL)
-                       continue;
+               if (!(nmp->nm_flag & NFSMNT_SOFT))
+                       maxtime = NFS_MAXTIMEO;
+               else if ((req->r_flags & (R_SETUP|R_RECOVER)) ||
+                        ((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8)))
+                       maxtime = (NFS_MAXTIMEO / (nmp->nm_timeouts+1))/2;
+               else
+                       maxtime = NFS_MINTIMEO/4;
 
                /*
-                * If there is enough space and the window allows..
-                *      Resend it
-                * Set r_rtt to -1 in case we fail to send it now.
+                * Check for request timeout.
                 */
-               rep->r_rtt = -1;
-               if (((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
-                   (rep->r_flags & R_SENT) ||
-                   nmp->nm_sent < nmp->nm_cwnd) &&
-                  (mbuf_copym(rep->r_mreq, 0, MBUF_COPYALL, MBUF_DONTWAIT, &m) == 0)){
-                       struct msghdr   msg;
-                       /*
-                        * Iff first send, start timing
-                        * else turn timing off, backoff timer
-                        * and divide congestion window by 2.
-                        * We update these *before* the send to avoid
-                        * racing against receiving the reply.
-                        * We save them so we can restore them on send error.
-                        */
-                       flags = rep->r_flags;
-                       rexmit = rep->r_rexmit;
-                       cwnd = nmp->nm_cwnd;
-                       sent = nmp->nm_sent;
-                       xid = rep->r_xid;
-                       if (rep->r_flags & R_SENT) {
-                               rep->r_flags &= ~R_TIMING;
-                               if (++rep->r_rexmit > NFS_MAXREXMIT)
-                                       rep->r_rexmit = NFS_MAXREXMIT;
-                               nmp->nm_cwnd >>= 1;
-                               if (nmp->nm_cwnd < NFS_CWNDSCALE)
-                                       nmp->nm_cwnd = NFS_CWNDSCALE;
-                               OSAddAtomic(1, (SInt32*)&nfsstats.rpcretries);
+               if (req->r_rtt >= 0) {
+                       req->r_rtt++;
+                       lck_mtx_lock(&nmp->nm_lock);
+                       if (req->r_flags & R_RESENDERR) {
+                               /* with resend errors, retry every few seconds */
+                               timeo = 4*hz;
                        } else {
-                               rep->r_flags |= R_SENT;
-                               nmp->nm_sent += NFS_CWNDSCALE;
-                       }
-                       FSDBG(535, xid, rep, nmp->nm_sent, nmp->nm_cwnd);
+                               if (req->r_procnum == NFSPROC_NULL && req->r_gss_ctx != NULL)
+                                       timeo = NFS_MINIDEMTIMEO; // gss context setup
+                               else if (nmp->nm_flag & NFSMNT_DUMBTIMR)
+                                       timeo = nmp->nm_timeo;
+                               else
+                                       timeo = NFS_RTO(nmp, proct[req->r_procnum]);
 
-                       bzero(&msg, sizeof(msg));
-                       if ((nmp->nm_flag & NFSMNT_NOCONN) == NFSMNT_NOCONN) {
-                               msg.msg_name = mbuf_data(nmp->nm_nam);
-                               msg.msg_namelen = mbuf_len(nmp->nm_nam);
+                               /* ensure 62.5 ms floor */
+                               while (16 * timeo < hz)
+                                       timeo *= 2;
+                               if (nmp->nm_timeouts > 0)
+                                       timeo *= nfs_backoff[nmp->nm_timeouts - 1];
+                       }
+                       /* limit timeout to max */
+                       if (timeo > maxtime)
+                               timeo = maxtime;
+                       if (req->r_rtt <= timeo) {
+                               lck_mtx_unlock(&nmp->nm_lock);
+                               lck_mtx_unlock(&req->r_mtx);
+                               continue;
                        }
-                       error = sock_sendmbuf(so, &msg, m, MSG_DONTWAIT, NULL);
+                       /* The request has timed out */
+                       NFS_SOCK_DBG(("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
+                               req->r_procnum, proct[req->r_procnum],
+                               req->r_xid, req->r_rtt, timeo, nmp->nm_timeouts,
+                               (now.tv_sec - req->r_start)*NFS_HZ, maxtime));
+                       if (nmp->nm_timeouts < 8)
+                               nmp->nm_timeouts++;
+                       nfs_mount_check_dead_timeout(nmp);
+                       /* if it's been a few seconds, try poking the socket */
+                       if ((nmp->nm_sotype == SOCK_STREAM) &&
+                           ((now.tv_sec - req->r_start) >= 3) &&
+                           !(nmp->nm_sockflags & NMSOCK_POKE)) {
+                               nmp->nm_sockflags |= NMSOCK_POKE;
+                               TAILQ_INSERT_TAIL(&nfs_mount_poke_queue, nmp, nm_pokeq);
+                       }
+                       lck_mtx_unlock(&nmp->nm_lock);
+               }
 
-                       FSDBG(535, xid, error, sent, cwnd);
+               /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
+               if (((nmp->nm_flag & NFSMNT_SOFT) || (req->r_flags & (R_SETUP|R_RECOVER))) &&
+                   ((req->r_rexmit >= req->r_retry) || /* too many */
+                    ((now.tv_sec - req->r_start)*NFS_HZ > maxtime))) { /* too long */
+                       OSAddAtomic(1, &nfsstats.rpctimeouts);
+                       lck_mtx_lock(&nmp->nm_lock);
+                       if (!(nmp->nm_state & NFSSTA_TIMEO)) {
+                               lck_mtx_unlock(&nmp->nm_lock);
+                               /* make sure we note the unresponsive server */
+                               /* (maxtime may be less than tprintf delay) */
+                               nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
+                                       "not responding");
+                               req->r_lastmsg = now.tv_sec;
+                               req->r_flags |= R_TPRINTFMSG;
+                       } else {
+                               lck_mtx_unlock(&nmp->nm_lock);
+                       }
+                       NFS_SOCK_DBG(("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
+                               req->r_procnum, req->r_xid, req->r_flags, req->r_rtt,
+                               now.tv_sec - req->r_start));
+                       nfs_softterm(req);
+                       finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
+                       wakeup(req);
+                       lck_mtx_unlock(&req->r_mtx);
+                       if (finish_asyncio)
+                               nfs_asyncio_finish(req);
+                       continue;
+               }
 
-                       if (error) {
-                               if (error == EWOULDBLOCK) {
-                                       rep->r_flags = flags;
-                                       rep->r_rexmit = rexmit;
-                                       nmp->nm_cwnd = cwnd;
-                                       nmp->nm_sent = sent;
-                                       rep->r_xid = xid;
-                               }
-                               else {
-                                       if (NFSIGNORE_SOERROR(nmp->nm_sotype, error)) {
-                                               int clearerror;
-                                               int optlen = sizeof(clearerror);
-                                               sock_getsockopt(nmp->nm_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
-                                       }
-                                       rep->r_flags  = flags | R_RESENDERR;
-                                       rep->r_rexmit = rexmit;
-                                       nmp->nm_cwnd = cwnd;
-                                       nmp->nm_sent = sent;
-                                       if (flags & R_SENT)
-                                               OSAddAtomic(-1, (SInt32*)&nfsstats.rpcretries);
-                               }
-                       } else
-                               rep->r_rtt = 0;
+               /* for TCP, only resend if explicitly requested */
+               if ((nmp->nm_sotype == SOCK_STREAM) && !(req->r_flags & R_MUSTRESEND)) {
+                       if (++req->r_rexmit > NFS_MAXREXMIT)
+                               req->r_rexmit = NFS_MAXREXMIT;
+                       req->r_rtt = 0;
+                       lck_mtx_unlock(&req->r_mtx);
+                       continue;
                }
-       }
-       microuptime(&now);
-#ifndef NFS_NOSERVER
-       /*
-        * Scan the write gathering queues for writes that need to be
-        * completed now.
-        */
-       cur_usec = (u_quad_t)now.tv_sec * 1000000 + (u_quad_t)now.tv_usec;
-       lck_mtx_lock(nfsd_mutex);
-       TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
-           if (slp->ns_wgtime && (slp->ns_wgtime <= cur_usec))
-               nfsrv_wakenfsd(slp);
-       }
-       while ((slp = TAILQ_FIRST(&nfssvc_deadsockhead))) {
-               if ((slp->ns_timestamp + 5) > now.tv_sec)
-                       break;
-               TAILQ_REMOVE(&nfssvc_deadsockhead, slp, ns_chain);
-               nfsrv_slpfree(slp);
-       }
-       lck_mtx_unlock(nfsd_mutex);
-#endif /* NFS_NOSERVER */
 
-       if (nfsbuffreeuptimestamp + 30 <= now.tv_sec) {
                /*
-                * We haven't called nfs_buf_freeup() in a little while.
-                * So, see if we can free up any stale/unused bufs now.
+                * The request needs to be (re)sent.  Kick the requester to resend it.
+                * (unless it's already marked as needing a resend)
                 */
-               nfs_buf_freeup(1);
+               if ((req->r_flags & R_MUSTRESEND) && (req->r_rtt == -1)) {
+                       lck_mtx_unlock(&req->r_mtx);
+                       continue;
+               }
+               NFS_SOCK_DBG(("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
+                       req->r_procnum, req->r_xid, req->r_flags, req->r_rtt));
+               req->r_flags |= R_MUSTRESEND;
+               req->r_rtt = -1;
+               wakeup(req);
+               if ((req->r_flags & (R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
+                       nfs_asyncio_resend(req);
+               lck_mtx_unlock(&req->r_mtx);
        }
 
-       timeout(nfs_timer_funnel, (void *)0, nfs_ticks);
+       lck_mtx_unlock(nfs_request_mutex);
+
+       /* poke any sockets */
+       while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
+               TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
+               nfs_sock_poke(nmp);
+               lck_mtx_lock(&nmp->nm_lock);
+               nmp->nm_sockflags &= ~NMSOCK_POKE;
+               if (!(nmp->nm_state & NFSSTA_MOUNTED))
+                       wakeup(&nmp->nm_sockflags);
+               lck_mtx_unlock(&nmp->nm_lock);
+       }
 
+       nfs_interval_timer_start(nfs_request_timer_call, NFS_REQUESTDELAY);
 }
 
+/*
+ * check a thread's proc for the "noremotehang" flag.
+ */
+int
+nfs_noremotehang(thread_t thd)
+{
+       proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
+       return (p && proc_noremotehang(p));
+}
 
 /*
  * Test for a termination condition pending on the process.
  * This is used to determine if we need to bail on a mount.
- * EIO is returned if there has been a soft timeout.
+ * ETIMEDOUT is returned if there has been a soft timeout.
  * EINTR is returned if there is a signal pending that is not being ignored
  * and the mount is interruptable, or if we are a thread that is in the process
  * of cancellation (also SIGKILL posted).
  */
 int
-nfs_sigintr(nmp, rep, p)
-       struct nfsmount *nmp;
-       struct nfsreq *rep;
-       proc_t p;
+nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocked)
 {
-       sigset_t pending_sigs;
-       int context_good = 0;
-       struct nfsmount *repnmp;
-       extern proc_t kernproc;
+       int error = 0;
 
        if (nmp == NULL)
                return (ENXIO);
-       if (rep != NULL) {
-               repnmp = rep->r_nmp;
-               /* we've had a forced unmount. */
-               if (repnmp == NULL)
-                       return (ENXIO);
-               /* request has timed out on a 'soft' mount. */
-               if (rep->r_flags & R_SOFTTERM)
-                       return (EIO);
-               /*
-                * We're in the progress of a force unmount and there's
-                * been a timeout we're dead and fail IO.
-                */
-               if ((repnmp->nm_state & (NFSSTA_FORCE|NFSSTA_TIMEO)) ==
-                  (NFSSTA_FORCE|NFSSTA_TIMEO))
-                       return (EIO);
+
+       if (req && (req->r_flags & R_SOFTTERM))
+               return (ETIMEDOUT); /* request has been terminated. */
+
+       /*
+        * If we're in the progress of a force unmount and there's
+        * been a timeout, we're dead and fail IO.
+        */
+       if (!nmplocked)
+               lck_mtx_lock(&nmp->nm_lock);
+       if ((nmp->nm_state & NFSSTA_FORCE) &&
+           (nmp->nm_state & (NFSSTA_TIMEO|NFSSTA_JUKEBOXTIMEO|NFSSTA_LOCKTIMEO))) {
+               error = EIO;
+       } else if (nmp->nm_mountp->mnt_kern_flag & MNTK_FRCUNMOUNT) {
                /* Someone is unmounting us, go soft and mark it. */
-               if (repnmp->nm_mountp->mnt_kern_flag & MNTK_FRCUNMOUNT) {
-                       repnmp->nm_flag |= NFSMNT_SOFT;
-                       nmp->nm_state |= NFSSTA_FORCE;
-               }
-               /*
-                * If the mount is hung and we've requested not to hang
-                * on remote filesystems, then bail now.
-                */
-               if (p != NULL && (proc_noremotehang(p)) != 0 &&
-                   (repnmp->nm_state & NFSSTA_TIMEO) != 0)
-                       return (EIO);
+               nmp->nm_flag |= NFSMNT_SOFT;
+               nmp->nm_state |= NFSSTA_FORCE;
        }
-       /* XXX: is this valid?  this probably should be an assertion. */
-       if (p == NULL)
+
+       /* Check if the mount is marked dead. */
+       if (!error && (nmp->nm_state & NFSSTA_DEAD))
+               error = ENXIO;
+
+       /*
+        * If the mount is hung and we've requested not to hang
+        * on remote filesystems, then bail now.
+        */
+       if (!error && (nmp->nm_state & NFSSTA_TIMEO) && nfs_noremotehang(thd))
+               error = EIO;
+
+       if (!nmplocked)
+               lck_mtx_unlock(&nmp->nm_lock);
+       if (error)
+               return (error);
+
+       /* may not have a thread for async I/O */
+       if (thd == NULL)
                return (0);
 
-       /* Is this thread belongs to kernel task; then abort check  is not needed */
-       if ((current_proc() != kernproc) && current_thread_aborted()) {
+       /* If this thread belongs to kernel task; then abort check is not needed */
+       if ((current_proc() != kernproc) && current_thread_aborted())
                return (EINTR);
-       }
-       /* mask off thread and process blocked signals. */
 
-       pending_sigs = proc_pendingsignals(p, NFSINT_SIGMASK);
-       if (pending_sigs && (nmp->nm_flag & NFSMNT_INT) != 0)
+       /* mask off thread and process blocked signals. */
+       if ((nmp->nm_flag & NFSMNT_INT) &&
+           proc_pendingsignals(get_bsdthreadtask_info(thd), NFSINT_SIGMASK))
                return (EINTR);
        return (0);
 }
@@ -1993,162 +3481,445 @@ nfs_sigintr(nmp, rep, p)
  * in progress when a reconnect is necessary.
  */
 int
-nfs_sndlock(rep)
-       struct nfsreq *rep;
+nfs_sndlock(struct nfsreq *req)
 {
+       struct nfsmount *nmp = req->r_nmp;
        int *statep;
-       proc_t p;
-       int error, slpflag = 0, slptimeo = 0;
+       int error = 0, slpflag = 0;
+       struct timespec ts = { 0, 0 };
+
+       if (nmp == NULL)
+               return (ENXIO);
+
+       lck_mtx_lock(&nmp->nm_lock);
+       statep = &nmp->nm_state;
+
+       if ((nmp->nm_flag & NFSMNT_INT) && req->r_thread)
+               slpflag = PCATCH;
+       while (*statep & NFSSTA_SNDLOCK) {
+               if ((error = nfs_sigintr(nmp, req, req->r_thread, 1)))
+                       break;
+               *statep |= NFSSTA_WANTSND;
+               if (nfs_noremotehang(req->r_thread))
+                       ts.tv_sec = 1;
+               msleep(statep, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsndlck", &ts);
+               if (slpflag == PCATCH) {
+                       slpflag = 0;
+                       ts.tv_sec = 2;
+               }
+       }
+       if (!error)
+               *statep |= NFSSTA_SNDLOCK;
+       lck_mtx_unlock(&nmp->nm_lock);
+       return (error);
+}
+
+/*
+ * Unlock the stream socket for others.
+ */
+void
+nfs_sndunlock(struct nfsreq *req)
+{
+       struct nfsmount *nmp = req->r_nmp;
+       int *statep, wake = 0;
+
+       if (nmp == NULL)
+               return;
+       lck_mtx_lock(&nmp->nm_lock);
+       statep = &nmp->nm_state;
+       if ((*statep & NFSSTA_SNDLOCK) == 0)
+               panic("nfs sndunlock");
+       *statep &= ~NFSSTA_SNDLOCK;
+       if (*statep & NFSSTA_WANTSND) {
+               *statep &= ~NFSSTA_WANTSND;
+               wake = 1;
+       }
+       lck_mtx_unlock(&nmp->nm_lock);
+       if (wake)
+               wakeup(statep);
+}
+
+int
+nfs_aux_request(
+       struct nfsmount *nmp,
+       thread_t thd,
+       struct sockaddr_in *saddr,
+       mbuf_t mreq,
+       uint32_t xid,
+       int bindresv,
+       int timeo,
+       struct nfsm_chain *nmrep)
+{
+       int error = 0, on = 1, try, sendat = 2;
+       socket_t so = NULL;
+       struct sockaddr_in sin;
+       struct timeval tv = { 1, 0 };
+       mbuf_t m, mrep = NULL;
+       struct msghdr msg;
+       uint32_t rxid = 0, reply = 0, reply_status, rejected_status;
+       uint32_t verf_type, verf_len, accepted_status;
+       size_t readlen;
+
+       /* create socket and set options */
+       if (((error = sock_socket(saddr->sin_family, SOCK_DGRAM, IPPROTO_UDP, NULL, NULL, &so))) ||
+           ((error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) ||
+           ((error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)))) ||
+           ((error = sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on)))))
+               goto nfsmout;
+       if (bindresv) {
+               int portrange = IP_PORTRANGE_LOW;
+               error = sock_setsockopt(so, IPPROTO_IP, IP_PORTRANGE, &portrange, sizeof(portrange));
+               nfsmout_if(error);
+               /* bind now to check for failure */
+               sin.sin_len = sizeof (struct sockaddr_in);
+               sin.sin_family = AF_INET;
+               sin.sin_addr.s_addr = INADDR_ANY;
+               sin.sin_port = 0;
+               error = sock_bind(so, (struct sockaddr *) &sin);
+               nfsmout_if(error);
+       }
+
+       for (try=0; try < timeo; try++) {
+               if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
+                       break;
+               if (!try || (try == sendat)) {
+                       /* send the request (resending periodically) */
+                       if ((error = mbuf_copym(mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m)))
+                               goto nfsmout;
+                       bzero(&msg, sizeof(msg));
+                       msg.msg_name = saddr;
+                       msg.msg_namelen = saddr->sin_len;
+                       if ((error = sock_sendmbuf(so, &msg, m, 0, NULL)))
+                               goto nfsmout;
+                       sendat *= 2;
+                       if (sendat > 30)
+                               sendat = 30;
+               }
+               /* wait for the response */
+               readlen = 1<<18;
+               bzero(&msg, sizeof(msg));
+               error = sock_receivembuf(so, &msg, &mrep, 0, &readlen);
+               if (error == EWOULDBLOCK)
+                       continue;
+               nfsmout_if(error);
+               /* parse the response */
+               nfsm_chain_dissect_init(error, nmrep, mrep);
+               nfsm_chain_get_32(error, nmrep, rxid);
+               nfsm_chain_get_32(error, nmrep, reply);
+               nfsmout_if(error);
+               if ((rxid != xid) || (reply != RPC_REPLY))
+                       error = EBADRPC;
+               nfsm_chain_get_32(error, nmrep, reply_status);
+               nfsmout_if(error);
+               if (reply_status == RPC_MSGDENIED) {
+                       nfsm_chain_get_32(error, nmrep, rejected_status);
+                       nfsmout_if(error);
+                       error = (rejected_status == RPC_MISMATCH) ? ENOTSUP : EACCES;
+                       goto nfsmout;
+               }
+               nfsm_chain_get_32(error, nmrep, verf_type); /* verifier flavor */
+               nfsm_chain_get_32(error, nmrep, verf_len); /* verifier length */
+               nfsmout_if(error);
+               if (verf_len)
+                       nfsm_chain_adv(error, nmrep, nfsm_rndup(verf_len));
+               nfsm_chain_get_32(error, nmrep, accepted_status);
+               nfsm_assert(error, (accepted_status == RPC_SUCCESS), EIO);
+               break;
+       }
+nfsmout:
+       if (so) {
+               sock_shutdown(so, SHUT_RDWR);
+               sock_close(so);
+       }
+       mbuf_freem(mreq);
+       return (error);
+}
+
+int
+nfs_msg(thread_t thd,
+       const char *server,
+       const char *msg,
+       int error)
+{
+       proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
+       tpr_t tpr;
+
+       if (p)
+               tpr = tprintf_open(p);
+       else
+               tpr = NULL;
+       if (error)
+               tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg, error);
+       else
+               tprintf(tpr, "nfs server %s: %s\n", server, msg);
+       tprintf_close(tpr);
+       return (0);
+}
+
+void
+nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg)
+{
+       int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
+       uint32_t do_vfs_signal;
+       struct timeval now;
 
-       if (rep->r_nmp == NULL)
-               return (ENXIO);
-       statep = &rep->r_nmp->nm_state;
+       if (nmp == NULL)
+               return;
 
-       p = rep->r_procp;
-       if (rep->r_nmp->nm_flag & NFSMNT_INT)
-               slpflag = PCATCH;
-       while (*statep & NFSSTA_SNDLOCK) {
-               error = nfs_sigintr(rep->r_nmp, rep, p);
-               if (error)
-                       return (error);
-               *statep |= NFSSTA_WANTSND;
-               if (p != NULL && (proc_noremotehang(p)) != 0)
-                       slptimeo = hz;
-               tsleep((caddr_t)statep, slpflag | (PZERO - 1), "nfsndlck", slptimeo);
-               if (slpflag == PCATCH) {
-                       slpflag = 0;
-                       slptimeo = 2 * hz;
+       lck_mtx_lock(&nmp->nm_lock);
+
+       timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
+       if (nmp->nm_flag & NFSMNT_MUTEJUKEBOX) /* jukebox timeouts don't count as unresponsive if muted */
+                  timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
+       wasunresponsive = (nmp->nm_state & timeoutmask);
+
+       /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
+       softnobrowse = ((nmp->nm_flag & NFSMNT_SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
+
+       if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO))
+               nmp->nm_state |= NFSSTA_TIMEO;
+       if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO))
+               nmp->nm_state |= NFSSTA_LOCKTIMEO;
+       if ((flags & NFSSTA_JUKEBOXTIMEO) && !(nmp->nm_state & NFSSTA_JUKEBOXTIMEO))
+               nmp->nm_state |= NFSSTA_JUKEBOXTIMEO;
+
+       unresponsive = (nmp->nm_state & timeoutmask);
+
+       if (unresponsive && (nmp->nm_flag & NFSMNT_DEADTIMEOUT)) {
+               microuptime(&now);
+               if (!wasunresponsive) {
+                       nmp->nm_deadto_start = now.tv_sec;
+                       nfs_mount_sock_thread_wake(nmp);
+               } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_deadtimeout) {
+                       if (!(nmp->nm_state & NFSSTA_DEAD))
+                               printf("nfs server %s: dead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+                       nmp->nm_state |= NFSSTA_DEAD;
                }
-               /*
-                * Make sure while we slept that the mountpoint didn't go away.
-                * nfs_sigintr and callers expect it in tact.
-                */
-               if (!rep->r_nmp) 
-                       return (ENXIO); /* don't have lock until out of loop */
        }
-       *statep |= NFSSTA_SNDLOCK;
-       return (0);
+       lck_mtx_unlock(&nmp->nm_lock);
+
+       if (nmp->nm_state & NFSSTA_DEAD)
+               do_vfs_signal = VQ_DEAD;
+       else if (softnobrowse || wasunresponsive || !unresponsive)
+               do_vfs_signal = 0;
+       else
+               do_vfs_signal = VQ_NOTRESP;
+       if (do_vfs_signal)
+               vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, do_vfs_signal, 0);
+
+       nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error);
 }
 
-/*
- * Unlock the stream socket for others.
- */
 void
-nfs_sndunlock(rep)
-       struct nfsreq *rep;
+nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg)
 {
-       int *statep;
+       int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
+       int do_vfs_signal;
 
-       if (rep->r_nmp == NULL)
+       if (nmp == NULL)
                return;
-       statep = &rep->r_nmp->nm_state;
-       if ((*statep & NFSSTA_SNDLOCK) == 0)
-               panic("nfs sndunlock");
-       *statep &= ~NFSSTA_SNDLOCK;
-       if (*statep & NFSSTA_WANTSND) {
-               *statep &= ~NFSSTA_WANTSND;
-               wakeup((caddr_t)statep);
-       }
+
+       if (msg)
+               nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0);
+
+       lck_mtx_lock(&nmp->nm_lock);
+
+       timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
+       if (nmp->nm_flag & NFSMNT_MUTEJUKEBOX) /* jukebox timeouts don't count as unresponsive if muted */
+                  timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
+       wasunresponsive = (nmp->nm_state & timeoutmask);
+
+       /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
+       softnobrowse = ((nmp->nm_flag & NFSMNT_SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
+
+       if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO))
+               nmp->nm_state &= ~NFSSTA_TIMEO;
+       if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO))
+               nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
+       if ((flags & NFSSTA_JUKEBOXTIMEO) && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO))
+               nmp->nm_state &= ~NFSSTA_JUKEBOXTIMEO;
+
+       unresponsive = (nmp->nm_state & timeoutmask);
+
+       if (nmp->nm_deadto_start)
+               nmp->nm_deadto_start = 0;
+       lck_mtx_unlock(&nmp->nm_lock);
+
+       if (softnobrowse)
+               do_vfs_signal = 0;
+       else
+               do_vfs_signal = (wasunresponsive && !unresponsive);
+       if (do_vfs_signal)
+               vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1);
 }
 
-static int
-nfs_rcvlock(struct nfsreq *rep)
+
+#endif /* NFSCLIENT */
+
+#if NFSSERVER
+
+/*
+ * Generate the rpc reply header
+ * siz arg. is used to decide if adding a cluster is worthwhile
+ */
+int
+nfsrv_rephead(
+       struct nfsrv_descript *nd,
+       __unused struct nfsrv_sock *slp,
+       struct nfsm_chain *nmrepp,
+       size_t siz)
 {
-       int *statep;
-       int error, slpflag, slptimeo = 0;
+       mbuf_t mrep;
+       u_int32_t *tl;
+       struct nfsm_chain nmrep;
+       int err, error;
 
-       /* make sure we still have our mountpoint */
-       if (!rep->r_nmp) {
-               if (rep->r_mrep != NULL)
-                       return (EALREADY);
-               return (ENXIO);
-       }
+       err = nd->nd_repstat;
+       if (err && (nd->nd_vers == NFS_VER2))
+               siz = 0;
 
-       statep = &rep->r_nmp->nm_state;
-       FSDBG_TOP(534, rep->r_xid, rep, rep->r_nmp, *statep);
-       if (rep->r_nmp->nm_flag & NFSMNT_INT)
-               slpflag = PCATCH;
-       else
-               slpflag = 0;
-       while (*statep & NFSSTA_RCVLOCK) {
-               if ((error = nfs_sigintr(rep->r_nmp, rep, rep->r_procp))) {
-                       FSDBG_BOT(534, rep->r_xid, rep, rep->r_nmp, 0x100);
-                       return (error);
-               } else if (rep->r_mrep != NULL) {
-                       /*
-                        * Don't bother sleeping if reply already arrived
-                        */
-                       FSDBG_BOT(534, rep->r_xid, rep, rep->r_nmp, 0x101);
-                       return (EALREADY);
+       /*
+        * If this is a big reply, use a cluster else
+        * try and leave leading space for the lower level headers.
+        */
+       siz += RPC_REPLYSIZ;
+       if (siz >= nfs_mbuf_minclsize) {
+               error = mbuf_getpacket(MBUF_WAITOK, &mrep);
+       } else {
+               error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mrep);
+       }
+       if (error) {
+               /* unable to allocate packet */
+               /* XXX should we keep statistics for these errors? */
+               return (error);
+       }
+       if (siz < nfs_mbuf_minclsize) {
+               /* leave space for lower level headers */
+               tl = mbuf_data(mrep);
+               tl += 80/sizeof(*tl);  /* XXX max_hdr? XXX */
+               mbuf_setdata(mrep, tl, 6 * NFSX_UNSIGNED);
+       }
+       nfsm_chain_init(&nmrep, mrep);
+       nfsm_chain_add_32(error, &nmrep, nd->nd_retxid);
+       nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
+       if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
+               nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
+               if (err & NFSERR_AUTHERR) {
+                       nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
+                       nfsm_chain_add_32(error, &nmrep, (err & ~NFSERR_AUTHERR));
+               } else {
+                       nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
+                       nfsm_chain_add_32(error, &nmrep, RPC_VER2);
+                       nfsm_chain_add_32(error, &nmrep, RPC_VER2);
                }
-               FSDBG(534, rep->r_xid, rep, rep->r_nmp, 0x102);
-               *statep |= NFSSTA_WANTRCV;
-               /*
-                * We need to poll if we're P_NOREMOTEHANG so that we
-                * call nfs_sigintr periodically above.
-                */
-               if (rep->r_procp != NULL &&
-                   (proc_noremotehang(rep->r_procp)) != 0)
-                       slptimeo = hz;
-               tsleep((caddr_t)statep, slpflag | (PZERO - 1), "nfsrcvlk", slptimeo);
-               if (slpflag == PCATCH) {
-                       slpflag = 0;
-                       slptimeo = 2 * hz;
+       } else {
+               /* reply status */
+               nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
+               if (nd->nd_gss_context != NULL) {
+                       /* RPCSEC_GSS verifier */
+                       error = nfs_gss_svc_verf_put(nd, &nmrep);
+                       if (error) {
+                               nfsm_chain_add_32(error, &nmrep, RPC_SYSTEM_ERR);
+                               goto done;
+                       }
+               } else {
+                       /* RPCAUTH_NULL verifier */
+                       nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
+                       nfsm_chain_add_32(error, &nmrep, 0);
                }
-               /*
-                * Make sure while we slept that the mountpoint didn't go away.
-                * nfs_sigintr and caller nfs_reply expect it intact.
-                */
-               if (!rep->r_nmp)  {
-                       FSDBG_BOT(534, rep->r_xid, rep, rep->r_nmp, 0x103);
-                       return (ENXIO); /* don't have lock until out of loop */
+               /* accepted status */
+               switch (err) {
+               case EPROGUNAVAIL:
+                       nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
+                       break;
+               case EPROGMISMATCH:
+                       nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
+                       /* XXX hard coded versions? */
+                       nfsm_chain_add_32(error, &nmrep, NFS_VER2);
+                       nfsm_chain_add_32(error, &nmrep, NFS_VER3);
+                       break;
+               case EPROCUNAVAIL:
+                       nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
+                       break;
+               case EBADRPC:
+                       nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
+                       break;
+               default:
+                       nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
+                       if (nd->nd_gss_context != NULL)
+                               error = nfs_gss_svc_prepare_reply(nd, &nmrep);
+                       if (err != NFSERR_RETVOID)
+                               nfsm_chain_add_32(error, &nmrep,
+                                       (err ? nfsrv_errmap(nd, err) : 0));
+                       break;
                }
        }
-       /*
-        * nfs_reply will handle it if reply already arrived.
-        * (We may have slept or been preempted).
-        */
-       FSDBG_BOT(534, rep->r_xid, rep, rep->r_nmp, *statep);
-       *statep |= NFSSTA_RCVLOCK;
+
+done:
+       nfsm_chain_build_done(error, &nmrep);
+       if (error) {
+               /* error composing reply header */
+               /* XXX should we keep statistics for these errors? */
+               mbuf_freem(mrep);
+               return (error);
+       }
+
+       *nmrepp = nmrep;
+       if ((err != 0) && (err != NFSERR_RETVOID))
+               OSAddAtomic(1, &nfsstats.srvrpc_errs);
        return (0);
 }
 
 /*
- * Unlock the stream socket for others.
+ * The nfs server send routine.
+ *
+ * - return EINTR or ERESTART if interrupted by a signal
+ * - return EPIPE if a connection is lost for connection based sockets (TCP...)
+ * - do any cleanup required by recoverable socket errors (???)
  */
-static void
-nfs_rcvunlock(struct nfsreq *rep)
+int
+nfsrv_send(struct nfsrv_sock *slp, mbuf_t nam, mbuf_t top)
 {
-       int *statep;
-       
-       if (rep->r_nmp == NULL)
-               return;
-       statep = &rep->r_nmp->nm_state;
+       int error;
+       socket_t so = slp->ns_so;
+       struct sockaddr *sendnam;
+       struct msghdr msg;
 
-       FSDBG(533, statep, *statep, 0, 0);
-       if ((*statep & NFSSTA_RCVLOCK) == 0)
-               panic("nfs rcvunlock");
-       *statep &= ~NFSSTA_RCVLOCK;
-       if (*statep & NFSSTA_WANTRCV) {
-               *statep &= ~NFSSTA_WANTRCV;
-               wakeup((caddr_t)statep);
+       bzero(&msg, sizeof(msg));
+       if (nam && !sock_isconnected(so) && (slp->ns_sotype != SOCK_STREAM)) {
+               if ((sendnam = mbuf_data(nam))) {
+                       msg.msg_name = (caddr_t)sendnam;
+                       msg.msg_namelen = sendnam->sa_len;
+               }
        }
-}
+       error = sock_sendmbuf(so, &msg, top, 0, NULL);
+       if (!error)
+               return (0);
+       log(LOG_INFO, "nfsd send error %d\n", error);
+
+       if ((error == EWOULDBLOCK) && (slp->ns_sotype == SOCK_STREAM))
+               error = EPIPE;  /* zap TCP sockets if they time out on send */
 
+       /* Handle any recoverable (soft) socket errors here. (???) */
+       if (error != EINTR && error != ERESTART && error != EIO &&
+               error != EWOULDBLOCK && error != EPIPE)
+               error = 0;
+
+       return (error);
+}
 
-#ifndef NFS_NOSERVER
 /*
  * Socket upcall routine for the nfsd sockets.
- * The caddr_t arg is a pointer to the "struct nfssvc_sock".
+ * The caddr_t arg is a pointer to the "struct nfsrv_sock".
  * Essentially do as much as possible non-blocking, else punt and it will
  * be called with MBUF_WAITOK from an nfsd.
  */
 void
 nfsrv_rcv(socket_t so, caddr_t arg, int waitflag)
 {
-       struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
+       struct nfsrv_sock *slp = (struct nfsrv_sock *)arg;
 
-       if (!nfs_numnfsd || !(slp->ns_flag & SLP_VALID))
+       if (!nfsd_thread_count || !(slp->ns_flag & SLP_VALID))
                return;
 
        lck_rw_lock_exclusive(&slp->ns_rwlock);
@@ -2156,7 +3927,7 @@ nfsrv_rcv(socket_t so, caddr_t arg, int waitflag)
        /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
 }
 void
-nfsrv_rcv_locked(socket_t so, struct nfssvc_sock *slp, int waitflag)
+nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag)
 {
        mbuf_t m, mp, mhck, m2;
        int ns_flag=0, error;
@@ -2181,10 +3952,10 @@ nfsrv_rcv_locked(socket_t so, struct nfssvc_sock *slp, int waitflag)
        if (slp->ns_sotype == SOCK_STREAM) {
                /*
                 * If there are already records on the queue, defer soreceive()
-                * to an nfsd so that there is feedback to the TCP layer that
+                * to an(other) nfsd so that there is feedback to the TCP layer that
                 * the nfs servers are heavily loaded.
                 */
-               if (slp->ns_rec && waitflag == MBUF_DONTWAIT) {
+               if (slp->ns_rec) {
                        ns_flag = SLP_NEEDQ;
                        goto dorecs;
                }
@@ -2196,7 +3967,7 @@ nfsrv_rcv_locked(socket_t so, struct nfssvc_sock *slp, int waitflag)
                error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read);
                if (error || mp == NULL) {
                        if (error == EWOULDBLOCK)
-                               ns_flag = SLP_NEEDQ;
+                               ns_flag = (waitflag == MBUF_DONTWAIT) ? SLP_NEEDQ : 0;
                        else
                                ns_flag = SLP_DISCONN;
                        goto dorecs;
@@ -2226,6 +3997,12 @@ nfsrv_rcv_locked(socket_t so, struct nfssvc_sock *slp, int waitflag)
                }
        } else {
                struct sockaddr_storage nam;
+
+               if (slp->ns_reccnt >= nfsrv_sock_max_rec_queue_length) {
+                       /* already have max # RPC records queued on this socket */
+                       ns_flag = SLP_NEEDQ;
+                       goto dorecs;
+               }
                
                bzero(&msg, sizeof(msg));
                msg.msg_name = (caddr_t)&nam;
@@ -2250,29 +4027,14 @@ nfsrv_rcv_locked(socket_t so, struct nfssvc_sock *slp, int waitflag)
                                }
                                if (slp->ns_recend)
                                        mbuf_setnextpkt(slp->ns_recend, m);
-                               else
+                               else {
                                        slp->ns_rec = m;
+                                       slp->ns_flag |= SLP_DOREC;
+                               }
                                slp->ns_recend = m;
                                mbuf_setnextpkt(m, NULL);
+                               slp->ns_reccnt++;
                        }
-#if 0
-                       if (error) {
-                               /*
-                                * This may be needed in the future to support
-                                * non-byte-stream connection-oriented protocols
-                                * such as SCTP.
-                                */
-                               /*
-                                * This (slp->ns_sotype == SOCK_STREAM) should really
-                                * be a check for PR_CONNREQUIRED.
-                                */
-                               if ((slp->ns_sotype == SOCK_STREAM)
-                                       && error != EWOULDBLOCK) {
-                                       ns_flag = SLP_DISCONN;
-                                       goto dorecs;
-                               }
-                       }
-#endif
                } while (mp);
        }
 
@@ -2283,9 +4045,9 @@ dorecs:
        if (ns_flag)
                slp->ns_flag |= ns_flag;
        if (waitflag == MBUF_DONTWAIT) {
-               int wake = (slp->ns_rec || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)));
+               int wake = (slp->ns_flag & SLP_WORKTODO);
                lck_rw_done(&slp->ns_rwlock);
-               if (wake && nfs_numnfsd) {
+               if (wake && nfsd_thread_count) {
                        lck_mtx_lock(nfsd_mutex);
                        nfsrv_wakenfsd(slp);
                        lck_mtx_unlock(nfsd_mutex);
@@ -2298,16 +4060,14 @@ dorecs:
  * stream socket. The "waitflag" argument indicates whether or not it
  * can sleep.
  */
-static int
-nfsrv_getstream(slp, waitflag)
-       struct nfssvc_sock *slp;
-       int waitflag;
+int
+nfsrv_getstream(struct nfsrv_sock *slp, int waitflag)
 {
        mbuf_t m;
        char *cp1, *cp2, *mdata;
        int len, mlen, error;
        mbuf_t om, m2, recm;
-       u_long recmark;
+       u_int32_t recmark;
 
        if (slp->ns_flag & SLP_GETSTREAM)
                panic("nfs getstream");
@@ -2347,7 +4107,7 @@ nfsrv_getstream(slp, waitflag)
                        slp->ns_flag |= SLP_LASTFRAG;
                else
                        slp->ns_flag &= ~SLP_LASTFRAG;
-               if (slp->ns_reclen < NFS_MINPACKET || slp->ns_reclen > NFS_MAXPACKET) {
+               if (slp->ns_reclen <= 0 || slp->ns_reclen > NFS_MAXPACKET) {
                        slp->ns_flag &= ~SLP_GETSTREAM;
                        return (EPERM);
                }
@@ -2435,8 +4195,10 @@ nfsrv_getstream(slp, waitflag)
            if (slp->ns_flag & SLP_LASTFRAG) {
                if (slp->ns_recend)
                    mbuf_setnextpkt(slp->ns_recend, slp->ns_frag);
-               else
+               else {
                    slp->ns_rec = slp->ns_frag;
+                   slp->ns_flag |= SLP_DOREC;
+               }
                slp->ns_recend = slp->ns_frag;
                slp->ns_frag = NULL;
            }
@@ -2447,18 +4209,18 @@ nfsrv_getstream(slp, waitflag)
  * Parse an RPC header.
  */
 int
-nfsrv_dorec(slp, nfsd, ndp)
-       struct nfssvc_sock *slp;
-       struct nfsd *nfsd;
-       struct nfsrv_descript **ndp;
+nfsrv_dorec(
+       struct nfsrv_sock *slp,
+       struct nfsd *nfsd,
+       struct nfsrv_descript **ndp)
 {
        mbuf_t m;
        mbuf_t nam;
        struct nfsrv_descript *nd;
-       int error;
+       int error = 0;
 
        *ndp = NULL;
-       if ((slp->ns_flag & SLP_VALID) == 0 || (slp->ns_rec == NULL))
+       if (!(slp->ns_flag & (SLP_VALID|SLP_DOREC)) || (slp->ns_rec == NULL))
                return (ENOBUFS);
        MALLOC_ZONE(nd, struct nfsrv_descript *,
                        sizeof (struct nfsrv_descript), M_NFSRVDESC, M_WAITOK);
@@ -2468,8 +4230,11 @@ nfsrv_dorec(slp, nfsd, ndp)
        slp->ns_rec = mbuf_nextpkt(m);
        if (slp->ns_rec)
                mbuf_setnextpkt(m, NULL);
-       else
+       else {
+               slp->ns_flag &= ~SLP_DOREC;
                slp->ns_recend = NULL;
+       }
+       slp->ns_reccnt--;
        if (mbuf_type(m) == MBUF_TYPE_SONAME) {
                nam = m;
                m = mbuf_next(m);
@@ -2477,16 +4242,17 @@ nfsrv_dorec(slp, nfsd, ndp)
                        panic("nfsrv_dorec: mbuf_setnext failed %d\n", error);
        } else
                nam = NULL;
-       nd->nd_md = nd->nd_mrep = m;
        nd->nd_nam2 = nam;
-       nd->nd_dpos = mbuf_data(m);
-       error = nfs_getreq(nd, nfsd, TRUE);
+       nfsm_chain_dissect_init(error, &nd->nd_nmreq, m);
+       if (!error)
+               error = nfsrv_getreq(nd);
        if (error) {
                if (nam)
                        mbuf_freem(nam);
-               FREE_ZONE((caddr_t)nd,  sizeof *nd, M_NFSRVDESC);
+               FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC);
                return (error);
        }
+       nd->nd_mrep = NULL;
        *ndp = nd;
        nfsd->nfsd_nd = nd;
        return (0);
@@ -2498,269 +4264,149 @@ nfsrv_dorec(slp, nfsd, ndp)
  * - fill in the cred struct.
  */
 int
-nfs_getreq(nd, nfsd, has_header)
-       struct nfsrv_descript *nd;
-       struct nfsd *nfsd;
-       int has_header;
+nfsrv_getreq(struct nfsrv_descript *nd)
 {
+       struct nfsm_chain *nmreq;
        int len, i;
-       u_long *tl;
-       long t1;
-       uio_t uiop;
-       caddr_t dpos, cp2, cp;
-       u_long nfsvers, auth_type;
-       uid_t nickuid;
-       int error = 0, ticklen;
-       mbuf_t mrep, md;
-       struct nfsuid *nuidp;
+       u_int32_t nfsvers, auth_type;
+       int error = 0;
        uid_t user_id;
        gid_t group_id;
        int ngroups;
        struct ucred temp_cred;
-       struct timeval tvin, tvout, now;
-       char uio_buf[ UIO_SIZEOF(1) ];
-#if 0                          /* until encrypted keys are implemented */
-       NFSKERBKEYSCHED_T keys; /* stores key schedule */
-#endif
+       uint32_t val;
 
        nd->nd_cr = NULL;
-
-       mrep = nd->nd_mrep;
-       md = nd->nd_md;
-       dpos = nd->nd_dpos;
-       if (has_header) {
-               nfsm_dissect(tl, u_long *, 10 * NFSX_UNSIGNED);
-               nd->nd_retxid = fxdr_unsigned(u_long, *tl++);
-               if (*tl++ != rpc_call) {
-                       mbuf_freem(mrep);
-                       return (EBADRPC);
-               }
-       } else
-               nfsm_dissect(tl, u_long *, 8 * NFSX_UNSIGNED);
+       nd->nd_gss_context = NULL;
+       nd->nd_gss_seqnum = 0;
+       nd->nd_gss_mb = NULL;
+
+       user_id = group_id = -2;
+       val = auth_type = len = 0;
+
+       nmreq = &nd->nd_nmreq;
+       nfsm_chain_get_32(error, nmreq, nd->nd_retxid); // XID
+       nfsm_chain_get_32(error, nmreq, val);           // RPC Call
+       if (!error && (val != RPC_CALL))
+               error = EBADRPC;
+       nfsmout_if(error);
        nd->nd_repstat = 0;
-       nd->nd_flag = 0;
-       if (*tl++ != rpc_vers) {
+       nfsm_chain_get_32(error, nmreq, val);   // RPC Version
+       nfsmout_if(error);
+       if (val != RPC_VER2) {
                nd->nd_repstat = ERPCMISMATCH;
                nd->nd_procnum = NFSPROC_NOOP;
                return (0);
        }
-       if (*tl != nfs_prog) {
+       nfsm_chain_get_32(error, nmreq, val);   // RPC Program Number
+       nfsmout_if(error);
+       if (val != NFS_PROG) {
                nd->nd_repstat = EPROGUNAVAIL;
                nd->nd_procnum = NFSPROC_NOOP;
                return (0);
        }
-       tl++;
-       nfsvers = fxdr_unsigned(u_long, *tl++);
+       nfsm_chain_get_32(error, nmreq, nfsvers);// NFS Version Number
+       nfsmout_if(error);
        if ((nfsvers < NFS_VER2) || (nfsvers > NFS_VER3)) {
                nd->nd_repstat = EPROGMISMATCH;
                nd->nd_procnum = NFSPROC_NOOP;
                return (0);
        }
-       else if (nfsvers == NFS_VER3)
-               nd->nd_flag = ND_NFSV3;
-       nd->nd_procnum = fxdr_unsigned(u_long, *tl++);
-       if (nd->nd_procnum == NFSPROC_NULL)
-               return (0);
+       nd->nd_vers = nfsvers;
+       nfsm_chain_get_32(error, nmreq, nd->nd_procnum);// NFS Procedure Number
+       nfsmout_if(error);
        if ((nd->nd_procnum >= NFS_NPROCS) ||
-               (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
+               ((nd->nd_vers == NFS_VER2) && (nd->nd_procnum > NFSV2PROC_STATFS))) {
                nd->nd_repstat = EPROCUNAVAIL;
                nd->nd_procnum = NFSPROC_NOOP;
                return (0);
        }
-       if ((nd->nd_flag & ND_NFSV3) == 0)
+       if (nfsvers != NFS_VER3)
                nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
-       auth_type = *tl++;
-       len = fxdr_unsigned(int, *tl++);
-       if (len < 0 || len > RPCAUTH_MAXSIZ) {
-               mbuf_freem(mrep);
-               return (EBADRPC);
-       }
-
-       nd->nd_flag &= ~ND_KERBAUTH;
-       /*
-        * Handle auth_unix or auth_kerb.
-        */
-       if (auth_type == rpc_auth_unix) {
-               len = fxdr_unsigned(int, *++tl);
-               if (len < 0 || len > NFS_MAXNAMLEN) {
-                       mbuf_freem(mrep);
-                       return (EBADRPC);
-               }
+       nfsm_chain_get_32(error, nmreq, auth_type);     // Auth Flavor
+       nfsm_chain_get_32(error, nmreq, len);           // Auth Length
+       if (!error && (len < 0 || len > RPCAUTH_MAXSIZ))
+               error = EBADRPC;
+       nfsmout_if(error);
+
+       /* Handle authentication */
+       if (auth_type == RPCAUTH_UNIX) {
+               if (nd->nd_procnum == NFSPROC_NULL)
+                       return (0);
+               nd->nd_sec = RPCAUTH_UNIX;
+               nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED);    // skip stamp
+               nfsm_chain_get_32(error, nmreq, len);           // hostname length
+               if (len < 0 || len > NFS_MAXNAMLEN)
+                       error = EBADRPC;
+               nfsm_chain_adv(error, nmreq, nfsm_rndup(len));  // skip hostname
+               nfsmout_if(error);
+
+               /* create a temporary credential using the bits from the wire */
                bzero(&temp_cred, sizeof(temp_cred));
-               nfsm_adv(nfsm_rndup(len));
-               nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED);
-               user_id = fxdr_unsigned(uid_t, *tl++);
-               group_id = fxdr_unsigned(gid_t, *tl++);
+               nfsm_chain_get_32(error, nmreq, user_id);
+               nfsm_chain_get_32(error, nmreq, group_id);
                temp_cred.cr_groups[0] = group_id;
-               len = fxdr_unsigned(int, *tl);
-               if (len < 0 || len > RPCAUTH_UNIXGIDS) {
-                       mbuf_freem(mrep);
-                       return (EBADRPC);
-               }
-               nfsm_dissect(tl, u_long *, (len + 2) * NFSX_UNSIGNED);
+               nfsm_chain_get_32(error, nmreq, len);           // extra GID count
+               if ((len < 0) || (len > RPCAUTH_UNIXGIDS))
+                       error = EBADRPC;
+               nfsmout_if(error);
                for (i = 1; i <= len; i++)
-                   if (i < NGROUPS)
-                       temp_cred.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
-                   else
-                       tl++;
+                       if (i < NGROUPS)
+                               nfsm_chain_get_32(error, nmreq, temp_cred.cr_groups[i]);
+                       else
+                               nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED);
+               nfsmout_if(error);
                ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
                if (ngroups > 1)
-                   nfsrvw_sort(&temp_cred.cr_groups[0], ngroups);
-               len = fxdr_unsigned(int, *++tl);
-               if (len < 0 || len > RPCAUTH_MAXSIZ) {
-                       mbuf_freem(mrep);
-                       return (EBADRPC);
-               }
+                       nfsrv_group_sort(&temp_cred.cr_groups[0], ngroups);
+               nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED);    // verifier flavor (should be AUTH_NONE)
+               nfsm_chain_get_32(error, nmreq, len);           // verifier length
+               if (len < 0 || len > RPCAUTH_MAXSIZ)
+                       error = EBADRPC;
+               if (len > 0)
+                       nfsm_chain_adv(error, nmreq, nfsm_rndup(len));
+
+               /* request creation of a real credential */
                temp_cred.cr_uid = user_id;
                temp_cred.cr_ngroups = ngroups;
-               nd->nd_cr = kauth_cred_create(&temp_cred); 
+               nd->nd_cr = kauth_cred_create(&temp_cred);
                if (nd->nd_cr == NULL) {
                        nd->nd_repstat = ENOMEM;
                        nd->nd_procnum = NFSPROC_NOOP;
                        return (0);
                }
-               if (len > 0)
-                       nfsm_adv(nfsm_rndup(len));
-       } else if (auth_type == rpc_auth_kerb) {
-               switch (fxdr_unsigned(int, *tl++)) {
-               case RPCAKN_FULLNAME:
-                       ticklen = fxdr_unsigned(int, *tl);
-                       *((u_long *)nfsd->nfsd_authstr) = *tl;
-                       uiop = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, 
-                                               &uio_buf[0], sizeof(uio_buf));
-                       if (!uiop) {
-                               nd->nd_repstat = ENOMEM;
-                               nd->nd_procnum = NFSPROC_NOOP;
-                               return (0);
-                       }
-
-                       // LP64todo - fix this
-                       nfsd->nfsd_authlen = (nfsm_rndup(ticklen) + (NFSX_UNSIGNED * 2));
-                       if ((nfsm_rndup(ticklen) + NFSX_UNSIGNED) > (len - 2 * NFSX_UNSIGNED)) {
-                               mbuf_freem(mrep);
-                               return (EBADRPC);
-                       }
-                       uio_addiov(uiop, CAST_USER_ADDR_T(&nfsd->nfsd_authstr[4]), RPCAUTH_MAXSIZ - 4);
-                       // LP64todo - fix this
-                       nfsm_mtouio(uiop, uio_resid(uiop));
-                       nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED);
-                       if (*tl++ != rpc_auth_kerb ||
-                               fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
-                               printf("Bad kerb verifier\n");
-                               nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
-                               nd->nd_procnum = NFSPROC_NOOP;
-                               return (0);
-                       }
-                       nfsm_dissect(cp, caddr_t, 4 * NFSX_UNSIGNED);
-                       tl = (u_long *)cp;
-                       if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
-                               printf("Not fullname kerb verifier\n");
-                               nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
-                               nd->nd_procnum = NFSPROC_NOOP;
-                               return (0);
-                       }
-                       cp += NFSX_UNSIGNED;
-                       bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
-                       nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
-                       nd->nd_flag |= ND_KERBFULL;
-                       nfsd->nfsd_flag |= NFSD_NEEDAUTH;
-                       break;
-               case RPCAKN_NICKNAME:
-                       if (len != 2 * NFSX_UNSIGNED) {
-                               printf("Kerb nickname short\n");
-                               nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
-                               nd->nd_procnum = NFSPROC_NOOP;
-                               return (0);
-                       }
-                       nickuid = fxdr_unsigned(uid_t, *tl);
-                       nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED);
-                       if (*tl++ != rpc_auth_kerb ||
-                               fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
-                               printf("Kerb nick verifier bad\n");
-                               nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
-                               nd->nd_procnum = NFSPROC_NOOP;
-                               return (0);
-                       }
-                       nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED);
-                       tvin.tv_sec = *tl++;
-                       tvin.tv_usec = *tl;
-
-                       for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
-                           nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
-                               if (kauth_cred_getuid(nuidp->nu_cr) == nickuid &&
-                                   (!nd->nd_nam2 ||
-                                    netaddr_match(NU_NETFAM(nuidp),
-                                     &nuidp->nu_haddr, nd->nd_nam2)))
-                                       break;
-                       }
-                       if (!nuidp) {
-                               nd->nd_repstat =
-                                       (NFSERR_AUTHERR|AUTH_REJECTCRED);
-                               nd->nd_procnum = NFSPROC_NOOP;
-                               return (0);
-                       }
-
-                       /*
-                        * Now, decrypt the timestamp using the session key
-                        * and validate it.
-                        */
-#if NFSKERB
-                       XXX
-#endif
-
-                       tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
-                       tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
-                       microtime(&now);
-                       if (nuidp->nu_expire < now.tv_sec ||
-                           nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
-                           (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
-                            nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
-                               nuidp->nu_expire = 0;
-                               nd->nd_repstat =
-                                   (NFSERR_AUTHERR|AUTH_REJECTVERF);
-                               nd->nd_procnum = NFSPROC_NOOP;
-                               return (0);
-                       }
-                       bzero(&temp_cred, sizeof(temp_cred));
-                       ngroups = nuidp->nu_cr->cr_ngroups;
-                       for (i = 0; i < ngroups; i++)
-                               temp_cred.cr_groups[i] = nuidp->nu_cr->cr_groups[i];
-                       if (ngroups > 1)
-                               nfsrvw_sort(&temp_cred.cr_groups[0], ngroups);
-
-                       temp_cred.cr_uid = kauth_cred_getuid(nuidp->nu_cr);
-                       temp_cred.cr_ngroups = ngroups;
-                       nd->nd_cr = kauth_cred_create(&temp_cred); 
-                       if (!nd->nd_cr) {
-                               nd->nd_repstat = ENOMEM;
-                               nd->nd_procnum = NFSPROC_NOOP;
-                               return (0);
-                       }
-                       nd->nd_flag |= ND_KERBNICK;
-               };
+       } else if (auth_type == RPCSEC_GSS) {
+               error = nfs_gss_svc_cred_get(nd, nmreq);
+               if (error) {
+                       if (error == EINVAL)
+                               goto nfsmout;   // drop the request
+                       nd->nd_repstat = error;
+                       nd->nd_procnum = NFSPROC_NOOP;
+                       return (0);
+               }
        } else {
+               if (nd->nd_procnum == NFSPROC_NULL)     // assume it's AUTH_NONE
+                       return (0);
                nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
                nd->nd_procnum = NFSPROC_NOOP;
                return (0);
        }
-
-       nd->nd_md = md;
-       nd->nd_dpos = dpos;
        return (0);
 nfsmout:
-       if (nd->nd_cr)
-               kauth_cred_rele(nd->nd_cr);
+       if (IS_VALID_CRED(nd->nd_cr))
+               kauth_cred_unref(&nd->nd_cr);
+       nfsm_chain_cleanup(nmreq);
        return (error);
 }
 
 /*
  * Search for a sleeping nfsd and wake it up.
- * SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the
- * running nfsds will go look for the work in the nfssvc_sock list.
+ * SIDE EFFECT: If none found, make sure the socket is queued up so that one
+ * of the running nfsds will go look for the work in the nfsrv_sockwait list.
  * Note: Must be called with nfsd_mutex held.
  */
 void
-nfsrv_wakenfsd(struct nfssvc_sock *slp)
+nfsrv_wakenfsd(struct nfsrv_sock *slp)
 {
        struct nfsd *nd;
 
@@ -2768,89 +4414,22 @@ nfsrv_wakenfsd(struct nfssvc_sock *slp)
                return;
 
        lck_rw_lock_exclusive(&slp->ns_rwlock);
-
-       if (nfsd_waiting) {
-               TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
-                       if (nd->nfsd_flag & NFSD_WAITING) {
-                               nd->nfsd_flag &= ~NFSD_WAITING;
-                               if (nd->nfsd_slp)
-                                       panic("nfsd wakeup");
-                               slp->ns_sref++;
-                               nd->nfsd_slp = slp;
-                               lck_rw_done(&slp->ns_rwlock);
-                               wakeup((caddr_t)nd);
-                               return;
-                       }
-               }
+       /* if there's work to do on this socket, make sure it's queued up */
+       if ((slp->ns_flag & SLP_WORKTODO) && !(slp->ns_flag & SLP_QUEUED)) {
+               TAILQ_INSERT_TAIL(&nfsrv_sockwait, slp, ns_svcq);
+               slp->ns_flag |= SLP_WAITQ;
        }
-
-       slp->ns_flag |= SLP_DOREC;
-
        lck_rw_done(&slp->ns_rwlock);
 
-       nfsd_head_flag |= NFSD_CHECKSLP;
-}
-#endif /* NFS_NOSERVER */
-
-static int
-nfs_msg(proc_t p,
-       const char *server,
-       const char *msg,
-       int error)
-{
-       tpr_t tpr;
-
-       if (p)
-               tpr = tprintf_open(p);
-       else
-               tpr = NULL;
-       if (error)
-               tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg,
-                   error);
-       else
-               tprintf(tpr, "nfs server %s: %s\n", server, msg);
-       tprintf_close(tpr);
-       return (0);
-}
-
-void
-nfs_down(nmp, proc, error, flags, msg)
-       struct nfsmount *nmp;
-       proc_t proc;
-       int error, flags;
-       const char *msg;
-{
-       if (nmp == NULL)
+       /* wake up a waiting nfsd, if possible */
+       nd = TAILQ_FIRST(&nfsd_queue);
+       if (!nd)
                return;
-       if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
-               vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 0);
-               nmp->nm_state |= NFSSTA_TIMEO;
-       }
-       if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
-               vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESPLOCK, 0);
-               nmp->nm_state |= NFSSTA_LOCKTIMEO;
-       }
-       nfs_msg(proc, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error);
-}
 
-void
-nfs_up(nmp, proc, flags, msg)
-       struct nfsmount *nmp;
-       proc_t proc;
-       int flags;
-       const char *msg;
-{
-       if (nmp == NULL)
-               return;
-       if (msg)
-               nfs_msg(proc, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0);
-       if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
-               nmp->nm_state &= ~NFSSTA_TIMEO;
-               vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1);
-       }
-       if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
-               nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
-               vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESPLOCK, 1);
-       }
+       TAILQ_REMOVE(&nfsd_queue, nd, nfsd_queue);
+       nd->nfsd_flag &= ~NFSD_WAITING;
+       wakeup(nd);
 }
 
+#endif /* NFSSERVER */
+