+ * NFS socket reconnect routine:
+ * Called when a connection is broken.
+ * - disconnect the old socket
+ * - nfs_connect() again
+ * - set R_MUSTRESEND for all outstanding requests on mount point
+ * If this fails the mount point is DEAD!
+ */
+int
+nfs_reconnect(struct nfsmount *nmp)
+{
+ struct nfsreq *rq;
+ struct timeval now;
+ thread_t thd = current_thread();
+ int error, wentdown = 0, verbose = 1;
+ time_t lastmsg;
+ int timeo;
+
+ microuptime(&now);
+ lastmsg = now.tv_sec - (nmp->nm_tprintf_delay - nmp->nm_tprintf_initial_delay);
+
+ nfs_disconnect(nmp);
+
+
+ lck_mtx_lock(&nmp->nm_lock);
+ timeo = nfs_is_squishy(nmp) ? 8 : 30;
+ lck_mtx_unlock(&nmp->nm_lock);
+
+ while ((error = nfs_connect(nmp, verbose, timeo))) {
+ verbose = 0;
+ nfs_disconnect(nmp);
+ if ((error == EINTR) || (error == ERESTART))
+ return (EINTR);
+ if (error == EIO)
+ return (EIO);
+ microuptime(&now);
+ if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) {
+ lastmsg = now.tv_sec;
+ nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect");
+ wentdown = 1;
+ }
+ lck_mtx_lock(&nmp->nm_lock);
+ if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
+ /* we're not yet completely mounted and */
+ /* we can't reconnect, so we fail */
+ lck_mtx_unlock(&nmp->nm_lock);
+ NFS_SOCK_DBG("Not mounted returning %d\n", error);
+ return (error);
+ }
+ nfs_mount_check_dead_timeout(nmp);
+ if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (error);
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2*hz);
+ if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
+ return (error);
+ }
+
+ if (wentdown)
+ nfs_up(nmp, thd, NFSSTA_TIMEO, "connected");
+
+ /*
+ * Loop through outstanding request list and mark all requests
+ * as needing a resend. (Though nfs_need_reconnect() probably
+ * marked them all already.)
+ */
+ lck_mtx_lock(nfs_request_mutex);
+ TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
+ if (rq->r_nmp == nmp) {
+ lck_mtx_lock(&rq->r_mtx);
+ if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
+ rq->r_flags |= R_MUSTRESEND;
+ rq->r_rtt = -1;
+ wakeup(rq);
+ if ((rq->r_flags & (R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
+ nfs_asyncio_resend(rq);
+ }
+ lck_mtx_unlock(&rq->r_mtx);
+ }
+ }
+ lck_mtx_unlock(nfs_request_mutex);
+ return (0);
+}
+
+/*
+ * NFS disconnect. Clean up and unlink.
+ */
+void
+nfs_disconnect(struct nfsmount *nmp)
+{
+ struct nfs_socket *nso;
+
+ lck_mtx_lock(&nmp->nm_lock);
+tryagain:
+ if (nmp->nm_nso) {
+ struct timespec ts = { 1, 0 };
+ if (nmp->nm_state & NFSSTA_SENDING) { /* wait for sending to complete */
+ nmp->nm_state |= NFSSTA_WANTSND;
+ msleep(&nmp->nm_state, &nmp->nm_lock, PZERO-1, "nfswaitsending", &ts);
+ goto tryagain;
+ }
+ if (nmp->nm_sockflags & NMSOCK_POKE) { /* wait for poking to complete */
+ msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO-1, "nfswaitpoke", &ts);
+ goto tryagain;
+ }
+ nmp->nm_sockflags |= NMSOCK_DISCONNECTING;
+ nmp->nm_sockflags &= ~NMSOCK_READY;
+ nso = nmp->nm_nso;
+ nmp->nm_nso = NULL;
+ if (nso->nso_saddr == nmp->nm_saddr)
+ nso->nso_saddr = NULL;
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_socket_destroy(nso);
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_sockflags &= ~NMSOCK_DISCONNECTING;
+ lck_mtx_unlock(&nmp->nm_lock);
+ } else {
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+}
+
+/*
+ * mark an NFS mount as needing a reconnect/resends.
+ */
+void
+nfs_need_reconnect(struct nfsmount *nmp)
+{
+ struct nfsreq *rq;
+
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_sockflags &= ~(NMSOCK_READY|NMSOCK_SETUP);
+ lck_mtx_unlock(&nmp->nm_lock);
+
+ /*
+ * Loop through outstanding request list and
+ * mark all requests as needing a resend.
+ */
+ lck_mtx_lock(nfs_request_mutex);
+ TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
+ if (rq->r_nmp == nmp) {
+ lck_mtx_lock(&rq->r_mtx);
+ if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
+ rq->r_flags |= R_MUSTRESEND;
+ rq->r_rtt = -1;
+ wakeup(rq);
+ if ((rq->r_flags & (R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
+ nfs_asyncio_resend(rq);
+ }
+ lck_mtx_unlock(&rq->r_mtx);
+ }
+ }
+ lck_mtx_unlock(nfs_request_mutex);
+}
+
+
+/*
+ * thread to handle miscellaneous async NFS socket work (reconnects/resends)
+ */
+void
+nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
+{
+ struct nfsmount *nmp = arg;
+ struct timespec ts = { 30, 0 };
+ thread_t thd = current_thread();
+ struct nfsreq *req;
+ struct timeval now;
+ int error, dofinish;
+ nfsnode_t np;
+ int do_reconnect_sleep = 0;
+
+ lck_mtx_lock(&nmp->nm_lock);
+
+ while (!(nmp->nm_sockflags & NMSOCK_READY) ||
+ !TAILQ_EMPTY(&nmp->nm_resendq) ||
+ !LIST_EMPTY(&nmp->nm_monlist) ||
+ nmp->nm_deadto_start ||
+ (nmp->nm_state & NFSSTA_RECOVER) ||
+ ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq)))
+ {
+ if (nmp->nm_sockflags & NMSOCK_UNMOUNT)
+ break;
+ /* do reconnect, if necessary */
+ if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_FORCE)) {
+ if (nmp->nm_reconnect_start <= 0) {
+ microuptime(&now);
+ nmp->nm_reconnect_start = now.tv_sec;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+ /*
+ * XXX We don't want to call reconnect again right away if returned errors
+ * before that may not have blocked. This has caused spamming null procs
+ * from machines in the pass.
+ */
+ if (do_reconnect_sleep)
+ tsleep(nfs_mount_sock_thread, PSOCK, "nfs_reconnect_sock_thread_delay", hz);
+ error = nfs_reconnect(nmp);
+ if (error) {
+ int lvl = 7;
+ if (error == EIO || error == EINTR) {
+ lvl = (do_reconnect_sleep++ % 600) ? 7 : 0;
+ }
+ nfs_printf(NFS_FAC_SOCK, lvl, "nfs reconnect %s: returned %d\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
+ } else {
+ nmp->nm_reconnect_start = 0;
+ do_reconnect_sleep = 0;
+ }
+ lck_mtx_lock(&nmp->nm_lock);
+ }
+ if ((nmp->nm_sockflags & NMSOCK_READY) &&
+ (nmp->nm_state & NFSSTA_RECOVER) &&
+ !(nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
+ !(nmp->nm_state & NFSSTA_FORCE)) {
+ /* perform state recovery */
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_recover(nmp);
+ lck_mtx_lock(&nmp->nm_lock);
+ }
+ /* handle NFSv4 delegation returns */
+ while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & NFSSTA_FORCE) &&
+ (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) &&
+ ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs4_delegation_return(np, R_RECOVER, thd, nmp->nm_mcred);
+ lck_mtx_lock(&nmp->nm_lock);
+ }
+ /* do resends, if necessary/possible */
+ while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) || (nmp->nm_state & NFSSTA_FORCE)) &&
+ ((req = TAILQ_FIRST(&nmp->nm_resendq)))) {
+ if (req->r_resendtime)
+ microuptime(&now);
+ while (req && !(nmp->nm_state & NFSSTA_FORCE) && req->r_resendtime && (now.tv_sec < req->r_resendtime))
+ req = TAILQ_NEXT(req, r_rchain);
+ if (!req)
+ break;
+ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ req->r_rchain.tqe_next = NFSREQNOLIST;
+ lck_mtx_unlock(&nmp->nm_lock);
+ lck_mtx_lock(&req->r_mtx);
+ if (req->r_error || req->r_nmrep.nmc_mhead) {
+ dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
+ req->r_flags &= ~R_RESENDQ;
+ wakeup(req);
+ lck_mtx_unlock(&req->r_mtx);
+ if (dofinish)
+ nfs_asyncio_finish(req);
+ lck_mtx_lock(&nmp->nm_lock);
+ continue;
+ }
+ if ((req->r_flags & R_RESTART) || nfs_request_using_gss(req)) {
+ req->r_flags &= ~R_RESTART;
+ req->r_resendtime = 0;
+ lck_mtx_unlock(&req->r_mtx);
+ /* async RPCs on GSS mounts need to be rebuilt and resent. */
+ nfs_reqdequeue(req);
+ if (nfs_request_using_gss(req)) {
+ nfs_gss_clnt_rpcdone(req);
+ error = nfs_gss_clnt_args_restore(req);
+ if (error == ENEEDAUTH)
+ req->r_xid = 0;
+ }
+ NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
+ nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid,
+ req->r_flags, req->r_rtt);
+ error = !req->r_nmp ? ENXIO : 0; /* unmounted? */
+ if (!error)
+ error = nfs_sigintr(nmp, req, req->r_thread, 0);
+ if (!error)
+ error = nfs_request_add_header(req);
+ if (!error)
+ error = nfs_request_send(req, 0);
+ lck_mtx_lock(&req->r_mtx);
+ if (req->r_flags & R_RESENDQ)
+ req->r_flags &= ~R_RESENDQ;
+ if (error)
+ req->r_error = error;
+ wakeup(req);
+ dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
+ lck_mtx_unlock(&req->r_mtx);
+ if (dofinish)
+ nfs_asyncio_finish(req);
+ lck_mtx_lock(&nmp->nm_lock);
+ error = 0;
+ continue;
+ }
+ NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
+ req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
+ error = !req->r_nmp ? ENXIO : 0; /* unmounted? */
+ if (!error)
+ error = nfs_sigintr(nmp, req, req->r_thread, 0);
+ if (!error) {
+ req->r_flags |= R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ error = nfs_send(req, 0);
+ lck_mtx_lock(&req->r_mtx);
+ if (!error) {
+ if (req->r_flags & R_RESENDQ)
+ req->r_flags &= ~R_RESENDQ;
+ wakeup(req);
+ lck_mtx_unlock(&req->r_mtx);
+ lck_mtx_lock(&nmp->nm_lock);
+ continue;
+ }
+ }
+ req->r_error = error;
+ if (req->r_flags & R_RESENDQ)
+ req->r_flags &= ~R_RESENDQ;
+ wakeup(req);
+ dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
+ lck_mtx_unlock(&req->r_mtx);
+ if (dofinish)
+ nfs_asyncio_finish(req);
+ lck_mtx_lock(&nmp->nm_lock);
+ }
+ if (nmp->nm_deadto_start)
+ nfs_mount_check_dead_timeout(nmp);
+ if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))
+ break;
+ /* check monitored nodes, if necessary/possible */
+ if (!LIST_EMPTY(&nmp->nm_monlist)) {
+ nmp->nm_state |= NFSSTA_MONITOR_SCAN;
+ LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) {
+ if (!(nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING|NFSSTA_FORCE)))
+ break;
+ np->n_mflag |= NMMONSCANINPROG;
+ lck_mtx_unlock(&nmp->nm_lock);
+ error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED|NGA_MONITOR));
+ if (!error && ISSET(np->n_flag, NUPDATESIZE)) /* update quickly to avoid multiple events */
+ nfs_data_update_size(np, 0);
+ lck_mtx_lock(&nmp->nm_lock);
+ np->n_mflag &= ~NMMONSCANINPROG;
+ if (np->n_mflag & NMMONSCANWANT) {
+ np->n_mflag &= ~NMMONSCANWANT;
+ wakeup(&np->n_mflag);
+ }
+ if (error || !(nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING|NFSSTA_FORCE)))
+ break;
+ }
+ nmp->nm_state &= ~NFSSTA_MONITOR_SCAN;
+ if (nmp->nm_state & NFSSTA_UNMOUNTING)
+ wakeup(&nmp->nm_state); /* let unmounting thread know scan is done */
+ }
+ if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING))) {
+ if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) ||
+ (nmp->nm_state & NFSSTA_RECOVER))
+ ts.tv_sec = 1;
+ else
+ ts.tv_sec = 5;
+ msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts);
+ }
+ }
+
+ /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
+ if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
+ (nmp->nm_state & NFSSTA_MOUNTED) && NMFLAG(nmp, CALLUMNT) &&
+ (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs3_umount_rpc(nmp, vfs_context_kernel(),
+ (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2);
+ lck_mtx_lock(&nmp->nm_lock);
+ }
+
+ if (nmp->nm_sockthd == thd)
+ nmp->nm_sockthd = NULL;
+ lck_mtx_unlock(&nmp->nm_lock);
+ wakeup(&nmp->nm_sockthd);
+ thread_terminate(thd);
+}
+
+/* start or wake a mount's socket thread */
+void
+nfs_mount_sock_thread_wake(struct nfsmount *nmp)
+{
+ if (nmp->nm_sockthd)
+ wakeup(&nmp->nm_sockthd);
+ else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS)
+ thread_deallocate(nmp->nm_sockthd);
+}
+
+/*
+ * Check if we should mark the mount dead because the
+ * unresponsive mount has reached the dead timeout.
+ * (must be called with nmp locked)
+ */
+void
+nfs_mount_check_dead_timeout(struct nfsmount *nmp)
+{
+ struct timeval now;
+
+ if (nmp->nm_deadto_start == 0)
+ return;
+ if (nmp->nm_state & NFSSTA_DEAD)
+ return;
+ nfs_is_squishy(nmp);
+ if (nmp->nm_curdeadtimeout <= 0)
+ return;
+ microuptime(&now);
+ if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout)
+ return;
+ printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
+ (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
+ nmp->nm_state |= NFSSTA_DEAD;
+ vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_DEAD, 0);
+}
+
+/*
+ * NFS callback channel socket state
+ */
+struct nfs_callback_socket
+{
+ TAILQ_ENTRY(nfs_callback_socket) ncbs_link;
+ socket_t ncbs_so; /* the socket */
+ struct sockaddr_storage ncbs_saddr; /* socket address */
+ struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */
+ time_t ncbs_stamp; /* last accessed at */
+ uint32_t ncbs_flags; /* see below */
+};
+#define NCBSOCK_UPCALL 0x0001
+#define NCBSOCK_UPCALLWANT 0x0002
+#define NCBSOCK_DEAD 0x0004
+
+/*
+ * NFS callback channel state
+ *
+ * One listening socket for accepting socket connections from servers and
+ * a list of connected sockets to handle callback requests on.
+ * Mounts registered with the callback channel are assigned IDs and
+ * put on a list so that the callback request handling code can match
+ * the requests up with mounts.
+ */
+socket_t nfs4_cb_so = NULL;
+socket_t nfs4_cb_so6 = NULL;
+in_port_t nfs4_cb_port = 0;
+in_port_t nfs4_cb_port6 = 0;
+uint32_t nfs4_cb_id = 0;
+uint32_t nfs4_cb_so_usecount = 0;
+TAILQ_HEAD(nfs4_cb_sock_list,nfs_callback_socket) nfs4_cb_socks;
+TAILQ_HEAD(nfs4_cb_mount_list,nfsmount) nfs4_cb_mounts;
+
+int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t);
+
+/*
+ * Set up the callback channel for the NFS mount.
+ *
+ * Initializes the callback channel socket state and
+ * assigns a callback ID to the mount.
+ */
+void
+nfs4_mount_callback_setup(struct nfsmount *nmp)
+{
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ socket_t so = NULL;
+ socket_t so6 = NULL;
+ struct timeval timeo;
+ int error, on = 1;
+ in_port_t port;
+
+ lck_mtx_lock(nfs_global_mutex);
+ if (nfs4_cb_id == 0) {
+ TAILQ_INIT(&nfs4_cb_mounts);
+ TAILQ_INIT(&nfs4_cb_socks);
+ nfs4_cb_id++;
+ }
+ nmp->nm_cbid = nfs4_cb_id++;
+ if (nmp->nm_cbid == 0)
+ nmp->nm_cbid = nfs4_cb_id++;
+ nfs4_cb_so_usecount++;
+ TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
+
+ if (nfs4_cb_so) {
+ lck_mtx_unlock(nfs_global_mutex);
+ return;
+ }
+
+ /* IPv4 */
+ error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d creating listening IPv4 socket\n", error);
+ goto fail;
+ }
+ so = nfs4_cb_so;
+
+ sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(INADDR_ANY);
+ sin.sin_port = htons(nfs_callback_port); /* try to use specified port */
+ error = sock_bind(so, (struct sockaddr *)&sin);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error);
+ goto fail;
+ }
+ error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d getting listening IPv4 socket port\n", error);
+ goto fail;
+ }
+ nfs4_cb_port = ntohs(sin.sin_port);
+
+ error = sock_listen(so, 32);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d on IPv4 listen\n", error);
+ goto fail;
+ }
+
+ /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
+ timeo.tv_usec = 0;
+ timeo.tv_sec = 60;
+ error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error);
+ error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error);
+ sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+ sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
+ sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
+ error = 0;
+
+ /* IPv6 */
+ error = sock_socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so6);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d creating listening IPv6 socket\n", error);
+ goto fail;
+ }
+ so6 = nfs4_cb_so6;
+
+ sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
+ sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
+ /* try to use specified port or same port as IPv4 */
+ port = nfs_callback_port ? nfs_callback_port : nfs4_cb_port;
+ipv6_bind_again:
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = in6addr_any;
+ sin6.sin6_port = htons(port);
+ error = sock_bind(so6, (struct sockaddr *)&sin6);
+ if (error) {
+ if (port != nfs_callback_port) {
+ /* if we simply tried to match the IPv4 port, then try any port */
+ port = 0;
+ goto ipv6_bind_again;
+ }
+ log(LOG_INFO, "nfs callback setup: error %d binding listening IPv6 socket\n", error);
+ goto fail;
+ }
+ error = sock_getsockname(so6, (struct sockaddr *)&sin6, sin6.sin6_len);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d getting listening IPv6 socket port\n", error);
+ goto fail;
+ }
+ nfs4_cb_port6 = ntohs(sin6.sin6_port);
+
+ error = sock_listen(so6, 32);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d on IPv6 listen\n", error);
+ goto fail;
+ }
+
+ /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
+ timeo.tv_usec = 0;
+ timeo.tv_sec = 60;
+ error = sock_setsockopt(so6, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error);
+ error = sock_setsockopt(so6, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error);
+ sock_setsockopt(so6, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+ sock_setsockopt(so6, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
+ sock_setsockopt(so6, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
+ error = 0;
+
+fail:
+ if (error) {
+ nfs4_cb_so = nfs4_cb_so6 = NULL;
+ lck_mtx_unlock(nfs_global_mutex);
+ if (so) {
+ sock_shutdown(so, SHUT_RDWR);
+ sock_close(so);
+ }
+ if (so6) {
+ sock_shutdown(so6, SHUT_RDWR);
+ sock_close(so6);
+ }
+ } else {
+ lck_mtx_unlock(nfs_global_mutex);
+ }
+}
+
+/*
+ * Shut down the callback channel for the NFS mount.
+ *
+ * Clears the mount's callback ID and releases the mounts
+ * reference on the callback socket. Last reference dropped
+ * will also shut down the callback socket(s).
+ */
+void
+nfs4_mount_callback_shutdown(struct nfsmount *nmp)
+{
+ struct nfs_callback_socket *ncbsp;
+ socket_t so, so6;
+ struct nfs4_cb_sock_list cb_socks;
+ struct timespec ts = {1,0};
+
+ lck_mtx_lock(nfs_global_mutex);
+ TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
+ /* wait for any callbacks in progress to complete */
+ while (nmp->nm_cbrefs)
+ msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts);
+ nmp->nm_cbid = 0;
+ if (--nfs4_cb_so_usecount) {
+ lck_mtx_unlock(nfs_global_mutex);
+ return;
+ }
+ so = nfs4_cb_so;
+ so6 = nfs4_cb_so6;
+ nfs4_cb_so = nfs4_cb_so6 = NULL;
+ TAILQ_INIT(&cb_socks);
+ TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
+ lck_mtx_unlock(nfs_global_mutex);
+ if (so) {
+ sock_shutdown(so, SHUT_RDWR);
+ sock_close(so);
+ }
+ if (so6) {
+ sock_shutdown(so6, SHUT_RDWR);
+ sock_close(so6);
+ }
+ while ((ncbsp = TAILQ_FIRST(&cb_socks))) {
+ TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link);
+ sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
+ sock_close(ncbsp->ncbs_so);
+ nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
+ FREE(ncbsp, M_TEMP);
+ }
+}
+
+/*
+ * Check periodically for stale/unused nfs callback sockets
+ */
+#define NFS4_CB_TIMER_PERIOD 30
+#define NFS4_CB_IDLE_MAX 300
+void
+nfs4_callback_timer(__unused void *param0, __unused void *param1)
+{
+ struct nfs_callback_socket *ncbsp, *nextncbsp;
+ struct timeval now;
+
+loop:
+ lck_mtx_lock(nfs_global_mutex);
+ if (TAILQ_EMPTY(&nfs4_cb_socks)) {
+ nfs4_callback_timer_on = 0;
+ lck_mtx_unlock(nfs_global_mutex);
+ return;
+ }
+ microuptime(&now);
+ TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) {
+ if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) &&
+ (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX)))
+ continue;
+ TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
+ lck_mtx_unlock(nfs_global_mutex);
+ sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
+ sock_close(ncbsp->ncbs_so);
+ nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
+ FREE(ncbsp, M_TEMP);
+ goto loop;
+ }
+ nfs4_callback_timer_on = 1;
+ nfs_interval_timer_start(nfs4_callback_timer_call,
+ NFS4_CB_TIMER_PERIOD * 1000);
+ lck_mtx_unlock(nfs_global_mutex);
+}
+
+/*
+ * Accept a new callback socket.
+ */
+void
+nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
+{
+ socket_t newso = NULL;
+ struct nfs_callback_socket *ncbsp;
+ struct nfsmount *nmp;
+ struct timeval timeo, now;
+ int error, on = 1, ip;
+
+ if (so == nfs4_cb_so)
+ ip = 4;
+ else if (so == nfs4_cb_so6)
+ ip = 6;
+ else
+ return;
+
+ /* allocate/initialize a new nfs_callback_socket */
+ MALLOC(ncbsp, struct nfs_callback_socket *, sizeof(struct nfs_callback_socket), M_TEMP, M_WAITOK);
+ if (!ncbsp) {
+ log(LOG_ERR, "nfs callback accept: no memory for new socket\n");
+ return;
+ }
+ bzero(ncbsp, sizeof(*ncbsp));
+ ncbsp->ncbs_saddr.ss_len = (ip == 4) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
+ nfs_rpc_record_state_init(&ncbsp->ncbs_rrs);
+
+ /* accept a new socket */
+ error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_saddr,
+ ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT,
+ nfs4_cb_rcv, ncbsp, &newso);
+ if (error) {
+ log(LOG_INFO, "nfs callback accept: error %d accepting IPv%d socket\n", error, ip);
+ FREE(ncbsp, M_TEMP);
+ return;
+ }
+
+ /* set up the new socket */
+ /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
+ timeo.tv_usec = 0;
+ timeo.tv_sec = 60;
+ error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error, ip);
+ error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error, ip);
+ sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+ sock_setsockopt(newso, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
+ sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
+ sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
+
+ ncbsp->ncbs_so = newso;
+ microuptime(&now);
+ ncbsp->ncbs_stamp = now.tv_sec;
+
+ lck_mtx_lock(nfs_global_mutex);
+
+ /* add it to the list */
+ TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
+
+ /* verify it's from a host we have mounted */
+ TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
+ /* check if socket's source address matches this mount's server address */
+ if (!nmp->nm_saddr)
+ continue;
+ if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0)
+ break;
+ }
+ if (!nmp) /* we don't want this socket, mark it dead */
+ ncbsp->ncbs_flags |= NCBSOCK_DEAD;
+
+ /* make sure the callback socket cleanup timer is running */
+ /* (shorten the timer if we've got a socket we don't want) */
+ if (!nfs4_callback_timer_on) {
+ nfs4_callback_timer_on = 1;
+ nfs_interval_timer_start(nfs4_callback_timer_call,
+ !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000));
+ } else if (!nmp && (nfs4_callback_timer_on < 2)) {
+ nfs4_callback_timer_on = 2;
+ thread_call_cancel(nfs4_callback_timer_call);
+ nfs_interval_timer_start(nfs4_callback_timer_call, 500);
+ }
+
+ lck_mtx_unlock(nfs_global_mutex);
+}
+
+/*
+ * Receive mbufs from callback sockets into RPC records and process each record.
+ * Detect connection has been closed and shut down.
+ */
+void
+nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
+{
+ struct nfs_callback_socket *ncbsp = arg;
+ struct timespec ts = {1,0};
+ struct timeval now;
+ mbuf_t m;
+ int error = 0, recv = 1;
+
+ lck_mtx_lock(nfs_global_mutex);
+ while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
+ /* wait if upcall is already in progress */
+ ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
+ msleep(ncbsp, nfs_global_mutex, PSOCK, "cbupcall", &ts);
+ }
+ ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
+ lck_mtx_unlock(nfs_global_mutex);
+
+ /* loop while we make error-free progress */
+ while (!error && recv) {
+ error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, MSG_DONTWAIT, &recv, &m);
+ if (m) /* handle the request */
+ error = nfs4_cb_handler(ncbsp, m);
+ }
+
+ /* note: no error and no data indicates server closed its end */
+ if ((error != EWOULDBLOCK) && (error || !recv)) {
+ /*
+ * Socket is either being closed or should be.
+ * We can't close the socket in the context of the upcall.
+ * So we mark it as dead and leave it for the cleanup timer to reap.
+ */
+ ncbsp->ncbs_stamp = 0;
+ ncbsp->ncbs_flags |= NCBSOCK_DEAD;
+ } else {
+ microuptime(&now);
+ ncbsp->ncbs_stamp = now.tv_sec;
+ }
+
+ lck_mtx_lock(nfs_global_mutex);
+ ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
+ lck_mtx_unlock(nfs_global_mutex);
+ wakeup(ncbsp);
+}
+
+/*
+ * Handle an NFS callback channel request.
+ */
+int
+nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
+{
+ socket_t so = ncbsp->ncbs_so;
+ struct nfsm_chain nmreq, nmrep;
+ mbuf_t mhead = NULL, mrest = NULL, m;
+ struct msghdr msg;
+ struct nfsmount *nmp;
+ fhandle_t fh;
+ nfsnode_t np;
+ nfs_stateid stateid;
+ uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
+ uint32_t val, xid, procnum, taglen, cbid, numops, op, status;
+ uint32_t auth_type, auth_len;
+ uint32_t numres, *pnumres;
+ int error = 0, replen, len;
+ size_t sentlen = 0;
+
+ xid = numops = op = status = procnum = taglen = cbid = 0;
+
+ nfsm_chain_dissect_init(error, &nmreq, mreq);
+ nfsm_chain_get_32(error, &nmreq, xid); // RPC XID
+ nfsm_chain_get_32(error, &nmreq, val); // RPC Call
+ nfsm_assert(error, (val == RPC_CALL), EBADRPC);
+ nfsm_chain_get_32(error, &nmreq, val); // RPC Version
+ nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH);
+ nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number
+ nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL);
+ nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number
+ nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH);
+ nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number
+ nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL);
+
+ /* Handle authentication */
+ /* XXX just ignore auth for now - handling kerberos may be tricky */
+ nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor
+ nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length
+ nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
+ if (!error && (auth_len > 0))
+ nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
+ nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
+ nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length
+ nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
+ if (!error && (auth_len > 0))
+ nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
+ if (error) {
+ status = error;
+ error = 0;
+ goto nfsmout;
+ }
+
+ switch (procnum) {
+ case NFSPROC4_CB_NULL:
+ status = NFSERR_RETVOID;
+ break;
+ case NFSPROC4_CB_COMPOUND:
+ /* tag, minorversion, cb ident, numops, op array */
+ nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */
+ nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC);
+
+ /* start building the body of the response */
+ nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5*NFSX_UNSIGNED);
+ nfsm_chain_init(&nmrep, mrest);
+
+ /* copy tag from request to response */
+ nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */
+ for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) {
+ nfsm_chain_get_32(error, &nmreq, val);
+ nfsm_chain_add_32(error, &nmrep, val);
+ }
+
+ /* insert number of results placeholder */
+ numres = 0;
+ nfsm_chain_add_32(error, &nmrep, numres);
+ pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED);
+
+ nfsm_chain_get_32(error, &nmreq, val); /* minorversion */
+ nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH);
+ nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */
+ nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */
+ if (error) {
+ if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH))
+ status = error;
+ else if ((error == ENOBUFS) || (error == ENOMEM))
+ status = NFSERR_RESOURCE;
+ else
+ status = NFSERR_SERVERFAULT;
+ error = 0;
+ nfsm_chain_null(&nmrep);
+ goto nfsmout;
+ }
+ /* match the callback ID to a registered mount */
+ lck_mtx_lock(nfs_global_mutex);
+ TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
+ if (nmp->nm_cbid != cbid)
+ continue;
+ /* verify socket's source address matches this mount's server address */
+ if (!nmp->nm_saddr)
+ continue;
+ if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0)
+ break;
+ }
+ /* mark the NFS mount as busy */
+ if (nmp)
+ nmp->nm_cbrefs++;
+ lck_mtx_unlock(nfs_global_mutex);
+ if (!nmp) {
+ /* if no mount match, just drop socket. */
+ error = EPERM;
+ nfsm_chain_null(&nmrep);
+ goto out;
+ }
+
+ /* process ops, adding results to mrest */
+ while (numops > 0) {
+ numops--;
+ nfsm_chain_get_32(error, &nmreq, op);
+ if (error)
+ break;
+ switch (op) {
+ case NFS_OP_CB_GETATTR:
+ // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
+ np = NULL;
+ nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+ bmlen = NFS_ATTR_BITMAP_LEN;
+ nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
+ if (error) {
+ status = error;
+ error = 0;
+ numops = 0; /* don't process any more ops */
+ } else {
+ /* find the node for the file handle */
+ error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
+ if (error || !np) {
+ status = NFSERR_BADHANDLE;
+ error = 0;
+ np = NULL;
+ numops = 0; /* don't process any more ops */
+ }
+ }
+ nfsm_chain_add_32(error, &nmrep, op);
+ nfsm_chain_add_32(error, &nmrep, status);
+ if (!error && (status == EBADRPC))
+ error = status;
+ if (np) {
+ /* only allow returning size, change, and mtime attrs */
+ NFS_CLEAR_ATTRIBUTES(&rbitmap);
+ attrbytes = 0;
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
+ NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE);
+ attrbytes += 2 * NFSX_UNSIGNED;
+ }
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
+ NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE);
+ attrbytes += 2 * NFSX_UNSIGNED;
+ }
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
+ NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY);
+ attrbytes += 3 * NFSX_UNSIGNED;
+ }
+ nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN);
+ nfsm_chain_add_32(error, &nmrep, attrbytes);
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE))
+ nfsm_chain_add_64(error, &nmrep,
+ np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0));
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE))
+ nfsm_chain_add_64(error, &nmrep, np->n_size);
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
+ nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]);
+ nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]);
+ }
+ nfs_node_unlock(np);
+ vnode_put(NFSTOV(np));
+ np = NULL;
+ }
+ /*
+ * If we hit an error building the reply, we can't easily back up.
+ * So we'll just update the status and hope the server ignores the
+ * extra garbage.
+ */
+ break;
+ case NFS_OP_CB_RECALL:
+ // (STATEID, TRUNCATE, FH) -> (STATUS)
+ np = NULL;
+ nfsm_chain_get_stateid(error, &nmreq, &stateid);
+ nfsm_chain_get_32(error, &nmreq, truncate);
+ nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+ if (error) {
+ status = error;
+ error = 0;
+ numops = 0; /* don't process any more ops */
+ } else {
+ /* find the node for the file handle */
+ error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
+ if (error || !np) {
+ status = NFSERR_BADHANDLE;
+ error = 0;
+ np = NULL;
+ numops = 0; /* don't process any more ops */
+ } else if (!(np->n_openflags & N_DELEG_MASK) ||
+ bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) {
+ /* delegation stateid state doesn't match */
+ status = NFSERR_BAD_STATEID;
+ numops = 0; /* don't process any more ops */
+ }
+ if (!status) /* add node to recall queue, and wake socket thread */
+ nfs4_delegation_return_enqueue(np);
+ if (np) {
+ nfs_node_unlock(np);
+ vnode_put(NFSTOV(np));
+ }
+ }
+ nfsm_chain_add_32(error, &nmrep, op);
+ nfsm_chain_add_32(error, &nmrep, status);
+ if (!error && (status == EBADRPC))
+ error = status;
+ break;
+ case NFS_OP_CB_ILLEGAL:
+ default:
+ nfsm_chain_add_32(error, &nmrep, NFS_OP_CB_ILLEGAL);
+ status = NFSERR_OP_ILLEGAL;
+ nfsm_chain_add_32(error, &nmrep, status);
+ numops = 0; /* don't process any more ops */
+ break;
+ }
+ numres++;
+ }
+
+ if (!status && error) {
+ if (error == EBADRPC)
+ status = error;
+ else if ((error == ENOBUFS) || (error == ENOMEM))
+ status = NFSERR_RESOURCE;
+ else
+ status = NFSERR_SERVERFAULT;
+ error = 0;
+ }
+
+ /* Now, set the numres field */
+ *pnumres = txdr_unsigned(numres);
+ nfsm_chain_build_done(error, &nmrep);
+ nfsm_chain_null(&nmrep);
+
+ /* drop the callback reference on the mount */
+ lck_mtx_lock(nfs_global_mutex);
+ nmp->nm_cbrefs--;
+ if (!nmp->nm_cbid)
+ wakeup(&nmp->nm_cbrefs);
+ lck_mtx_unlock(nfs_global_mutex);
+ break;
+ }
+
+nfsmout:
+ if (status == EBADRPC)
+ OSAddAtomic64(1, &nfsstats.rpcinvalid);
+
+ /* build reply header */
+ error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead);
+ nfsm_chain_init(&nmrep, mhead);
+ nfsm_chain_add_32(error, &nmrep, 0); /* insert space for an RPC record mark */
+ nfsm_chain_add_32(error, &nmrep, xid);
+ nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
+ if ((status == ERPCMISMATCH) || (status & NFSERR_AUTHERR)) {
+ nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
+ if (status & NFSERR_AUTHERR) {
+ nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
+ nfsm_chain_add_32(error, &nmrep, (status & ~NFSERR_AUTHERR));
+ } else {
+ nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
+ nfsm_chain_add_32(error, &nmrep, RPC_VER2);
+ nfsm_chain_add_32(error, &nmrep, RPC_VER2);
+ }
+ } else {
+ /* reply status */
+ nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
+ /* XXX RPCAUTH_NULL verifier */
+ nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
+ nfsm_chain_add_32(error, &nmrep, 0);
+ /* accepted status */
+ switch (status) {
+ case EPROGUNAVAIL:
+ nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
+ break;
+ case EPROGMISMATCH:
+ nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
+ nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
+ nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
+ break;
+ case EPROCUNAVAIL:
+ nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
+ break;
+ case EBADRPC:
+ nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
+ break;
+ default:
+ nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
+ if (status != NFSERR_RETVOID)
+ nfsm_chain_add_32(error, &nmrep, status);
+ break;
+ }
+ }
+ nfsm_chain_build_done(error, &nmrep);
+ if (error) {
+ nfsm_chain_null(&nmrep);
+ goto out;
+ }
+ error = mbuf_setnext(nmrep.nmc_mcur, mrest);
+ if (error) {
+ printf("nfs cb: mbuf_setnext failed %d\n", error);
+ goto out;
+ }
+ mrest = NULL;
+ /* Calculate the size of the reply */
+ replen = 0;
+ for (m = nmrep.nmc_mhead; m; m = mbuf_next(m))
+ replen += mbuf_len(m);
+ mbuf_pkthdr_setlen(mhead, replen);
+ error = mbuf_pkthdr_setrcvif(mhead, NULL);
+ nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000);
+ nfsm_chain_null(&nmrep);
+
+ /* send the reply */
+ bzero(&msg, sizeof(msg));
+ error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen);
+ mhead = NULL;
+ if (!error && ((int)sentlen != replen))
+ error = EWOULDBLOCK;
+ if (error == EWOULDBLOCK) /* inability to send response is considered fatal */
+ error = ETIMEDOUT;
+out:
+ if (error)
+ nfsm_chain_cleanup(&nmrep);
+ if (mhead)
+ mbuf_freem(mhead);
+ if (mrest)
+ mbuf_freem(mrest);
+ if (mreq)
+ mbuf_freem(mreq);
+ return (error);
+}
+
+
+/*
+ * Initialize an nfs_rpc_record_state structure.
+ */
+void
+nfs_rpc_record_state_init(struct nfs_rpc_record_state *nrrsp)
+{
+ bzero(nrrsp, sizeof(*nrrsp));
+ nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
+}
+
+/*
+ * Clean up an nfs_rpc_record_state structure.
+ */
+void
+nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *nrrsp)
+{
+ if (nrrsp->nrrs_m) {
+ mbuf_freem(nrrsp->nrrs_m);
+ nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
+ }
+}
+
+/*
+ * Read the next (marked) RPC record from the socket.
+ *
+ * *recvp returns if any data was received.
+ * *mp returns the next complete RPC record
+ */
+int
+nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, int *recvp, mbuf_t *mp)
+{
+ struct iovec aio;
+ struct msghdr msg;
+ size_t rcvlen;
+ int error = 0;
+ mbuf_t m;
+
+ *recvp = 0;
+ *mp = NULL;
+
+ /* read the TCP RPC record marker */
+ while (!error && nrrsp->nrrs_markerleft) {
+ aio.iov_base = ((char*)&nrrsp->nrrs_fragleft +
+ sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft);
+ aio.iov_len = nrrsp->nrrs_markerleft;
+ bzero(&msg, sizeof(msg));
+ msg.msg_iov = &aio;
+ msg.msg_iovlen = 1;
+ error = sock_receive(so, &msg, flags, &rcvlen);
+ if (error || !rcvlen)
+ break;
+ *recvp = 1;
+ nrrsp->nrrs_markerleft -= rcvlen;
+ if (nrrsp->nrrs_markerleft)
+ continue;
+ /* record marker complete */
+ nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft);
+ if (nrrsp->nrrs_fragleft & 0x80000000) {
+ nrrsp->nrrs_lastfrag = 1;
+ nrrsp->nrrs_fragleft &= ~0x80000000;
+ }
+ nrrsp->nrrs_reclen += nrrsp->nrrs_fragleft;
+ if (nrrsp->nrrs_reclen > NFS_MAXPACKET) {
+ /* This is SERIOUS! We are out of sync with the sender. */
+ log(LOG_ERR, "impossible RPC record length (%d) on callback", nrrsp->nrrs_reclen);
+ error = EFBIG;
+ }
+ }
+
+ /* read the TCP RPC record fragment */
+ while (!error && !nrrsp->nrrs_markerleft && nrrsp->nrrs_fragleft) {
+ m = NULL;
+ rcvlen = nrrsp->nrrs_fragleft;
+ error = sock_receivembuf(so, NULL, &m, flags, &rcvlen);
+ if (error || !rcvlen || !m)
+ break;
+ *recvp = 1;
+ /* append mbufs to list */
+ nrrsp->nrrs_fragleft -= rcvlen;
+ if (!nrrsp->nrrs_m) {
+ nrrsp->nrrs_m = m;
+ } else {
+ error = mbuf_setnext(nrrsp->nrrs_mlast, m);
+ if (error) {
+ printf("nfs tcp rcv: mbuf_setnext failed %d\n", error);
+ mbuf_freem(m);
+ break;
+ }
+ }
+ while (mbuf_next(m))
+ m = mbuf_next(m);
+ nrrsp->nrrs_mlast = m;
+ }
+
+ /* done reading fragment? */
+ if (!error && !nrrsp->nrrs_markerleft && !nrrsp->nrrs_fragleft) {
+ /* reset socket fragment parsing state */
+ nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
+ if (nrrsp->nrrs_lastfrag) {
+ /* RPC record complete */
+ *mp = nrrsp->nrrs_m;
+ /* reset socket record parsing state */
+ nrrsp->nrrs_reclen = 0;
+ nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
+ nrrsp->nrrs_lastfrag = 0;
+ }
+ }
+
+ return (error);
+}
+
+
+
+/*
+ * The NFS client send routine.
+ *
+ * Send the given NFS request out the mount's socket.
+ * Holds nfs_sndlock() for the duration of this call.
+ *
+ * - check for request termination (sigintr)
+ * - wait for reconnect, if necessary
+ * - UDP: check the congestion window
+ * - make a copy of the request to send
+ * - UDP: update the congestion window
+ * - send the request
+ *
+ * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
+ * rexmit count is also updated if this isn't the first send.
+ *
+ * If the send is not successful, make sure R_MUSTRESEND is set.
+ * If this wasn't the first transmit, set R_RESENDERR.
+ * Also, undo any UDP congestion window changes made.
+ *
+ * If the error appears to indicate that the socket should
+ * be reconnected, mark the socket for reconnection.
+ *
+ * Only return errors when the request should be aborted.
+ */
+int
+nfs_send(struct nfsreq *req, int wait)
+{
+ struct nfsmount *nmp;
+ struct nfs_socket *nso;
+ int error, error2, sotype, rexmit, slpflag = 0, needrecon;
+ struct msghdr msg;
+ struct sockaddr *sendnam;
+ mbuf_t mreqcopy;
+ size_t sentlen = 0;
+ struct timespec ts = { 2, 0 };
+
+again:
+ error = nfs_sndlock(req);
+ if (error) {
+ lck_mtx_lock(&req->r_mtx);
+ req->r_error = error;
+ req->r_flags &= ~R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ return (error);
+ }
+
+ error = nfs_sigintr(req->r_nmp, req, NULL, 0);
+ if (error) {
+ nfs_sndunlock(req);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_error = error;
+ req->r_flags &= ~R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ return (error);
+ }
+ nmp = req->r_nmp;
+ sotype = nmp->nm_sotype;
+
+ /*
+ * If it's a setup RPC but we're not in SETUP... must need reconnect.
+ * If it's a recovery RPC but the socket's not ready... must need reconnect.
+ */
+ if (((req->r_flags & R_SETUP) && !(nmp->nm_sockflags & NMSOCK_SETUP)) ||
+ ((req->r_flags & R_RECOVER) && !(nmp->nm_sockflags & NMSOCK_READY))) {
+ error = ETIMEDOUT;
+ nfs_sndunlock(req);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_error = error;
+ req->r_flags &= ~R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ return (error);
+ }
+
+ /* If the socket needs reconnection, do that now. */
+ /* wait until socket is ready - unless this request is part of setup */
+ lck_mtx_lock(&nmp->nm_lock);
+ if (!(nmp->nm_sockflags & NMSOCK_READY) &&
+ !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) {
+ if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR))
+ slpflag |= PCATCH;
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_sndunlock(req);
+ if (!wait) {
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_SENDING;
+ req->r_flags |= R_MUSTRESEND;
+ req->r_rtt = 0;
+ lck_mtx_unlock(&req->r_mtx);
+ return (0);
+ }
+ NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req->r_xid);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_MUSTRESEND;
+ req->r_rtt = 0;
+ lck_mtx_unlock(&req->r_mtx);
+ lck_mtx_lock(&nmp->nm_lock);
+ while (!(nmp->nm_sockflags & NMSOCK_READY)) {
+ /* don't bother waiting if the socket thread won't be reconnecting it */
+ if (nmp->nm_state & NFSSTA_FORCE) {
+ error = EIO;
+ break;
+ }
+ if (NMFLAG(nmp, SOFT) && (nmp->nm_reconnect_start > 0)) {
+ struct timeval now;
+ microuptime(&now);
+ if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) {
+ /* soft mount in reconnect for a while... terminate ASAP */
+ OSAddAtomic64(1, &nfsstats.rpctimeouts);
+ req->r_flags |= R_SOFTTERM;
+ req->r_error = error = ETIMEDOUT;
+ break;
+ }
+ }
+ /* make sure socket thread is running, then wait */
+ nfs_mount_sock_thread_wake(nmp);
+ if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1)))
+ break;
+ msleep(req, &nmp->nm_lock, slpflag|PSOCK, "nfsconnectwait", &ts);
+ slpflag = 0;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (error) {
+ lck_mtx_lock(&req->r_mtx);
+ req->r_error = error;
+ req->r_flags &= ~R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ return (error);
+ }
+ goto again;
+ }
+ nso = nmp->nm_nso;
+ /* note that we're using the mount's socket to do the send */
+ nmp->nm_state |= NFSSTA_SENDING; /* will be cleared by nfs_sndunlock() */
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (!nso) {
+ nfs_sndunlock(req);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_SENDING;
+ req->r_flags |= R_MUSTRESEND;
+ req->r_rtt = 0;
+ lck_mtx_unlock(&req->r_mtx);
+ return (0);
+ }
+
+ lck_mtx_lock(&req->r_mtx);
+ rexmit = (req->r_flags & R_SENT);
+
+ if (sotype == SOCK_DGRAM) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if (!(req->r_flags & R_CWND) && (nmp->nm_sent >= nmp->nm_cwnd)) {
+ /* if we can't send this out yet, wait on the cwnd queue */
+ slpflag = (NMFLAG(nmp, INTR) && req->r_thread) ? PCATCH : 0;
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_sndunlock(req);
+ req->r_flags &= ~R_SENDING;
+ req->r_flags |= R_MUSTRESEND;
+ lck_mtx_unlock(&req->r_mtx);
+ if (!wait) {
+ req->r_rtt = 0;
+ return (0);
+ }
+ lck_mtx_lock(&nmp->nm_lock);
+ while (nmp->nm_sent >= nmp->nm_cwnd) {
+ if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1)))
+ break;
+ TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain);
+ msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts);
+ slpflag = 0;
+ if ((req->r_cchain.tqe_next != NFSREQNOLIST)) {
+ TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
+ req->r_cchain.tqe_next = NFSREQNOLIST;
+ }
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ goto again;
+ }
+ /*
+ * We update these *before* the send to avoid racing
+ * against others who may be looking to send requests.
+ */
+ if (!rexmit) {
+ /* first transmit */
+ req->r_flags |= R_CWND;
+ nmp->nm_sent += NFS_CWNDSCALE;
+ } else {
+ /*
+ * When retransmitting, turn timing off
+ * and divide congestion window by 2.
+ */
+ req->r_flags &= ~R_TIMING;
+ nmp->nm_cwnd >>= 1;
+ if (nmp->nm_cwnd < NFS_CWNDSCALE)
+ nmp->nm_cwnd = NFS_CWNDSCALE;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+
+ req->r_flags &= ~R_MUSTRESEND;
+ lck_mtx_unlock(&req->r_mtx);
+
+ error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL,
+ wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy);
+ if (error) {
+ if (wait)
+ log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error);
+ nfs_sndunlock(req);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_SENDING;
+ req->r_flags |= R_MUSTRESEND;
+ req->r_rtt = 0;
+ lck_mtx_unlock(&req->r_mtx);
+ return (0);
+ }
+
+ bzero(&msg, sizeof(msg));
+ if ((sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so) && ((sendnam = nmp->nm_saddr))) {
+ msg.msg_name = (caddr_t)sendnam;
+ msg.msg_namelen = sendnam->sa_len;
+ }
+ error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen);
+#ifdef NFS_SOCKET_DEBUGGING
+ if (error || (sentlen != req->r_mreqlen))
+ NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
+ req->r_xid, (int)sentlen, (int)req->r_mreqlen, error);
+#endif
+ if (!error && (sentlen != req->r_mreqlen))
+ error = EWOULDBLOCK;
+ needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen));
+
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_SENDING;
+ req->r_rtt = 0;
+ if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT))
+ req->r_rexmit = NFS_MAXREXMIT;
+
+ if (!error) {
+ /* SUCCESS */
+ req->r_flags &= ~R_RESENDERR;
+ if (rexmit)
+ OSAddAtomic64(1, &nfsstats.rpcretries);
+ req->r_flags |= R_SENT;
+ if (req->r_flags & R_WAITSENT) {
+ req->r_flags &= ~R_WAITSENT;
+ wakeup(req);
+ }
+ nfs_sndunlock(req);
+ lck_mtx_unlock(&req->r_mtx);
+ return (0);
+ }
+
+ /* send failed */
+ req->r_flags |= R_MUSTRESEND;
+ if (rexmit)
+ req->r_flags |= R_RESENDERR;
+ if ((error == EINTR) || (error == ERESTART))
+ req->r_error = error;
+ lck_mtx_unlock(&req->r_mtx);
+
+ if (sotype == SOCK_DGRAM) {
+ /*
+ * Note: even though a first send may fail, we consider
+ * the request sent for congestion window purposes.
+ * So we don't need to undo any of the changes made above.
+ */
+ /*
+ * Socket errors ignored for connectionless sockets??
+ * For now, ignore them all
+ */
+ if ((error != EINTR) && (error != ERESTART) &&
+ (error != EWOULDBLOCK) && (error != EIO) && (nso == nmp->nm_nso)) {
+ int clearerror = 0, optlen = sizeof(clearerror);
+ sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
+#ifdef NFS_SOCKET_DEBUGGING
+ if (clearerror)
+ NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
+ error, clearerror);
+#endif
+ }
+ }
+
+ /* check if it appears we should reconnect the socket */
+ switch (error) {
+ case EWOULDBLOCK:
+ /* if send timed out, reconnect if on TCP */
+ if (sotype != SOCK_STREAM)
+ break;
+ case EPIPE:
+ case EADDRNOTAVAIL:
+ case ENETDOWN:
+ case ENETUNREACH:
+ case ENETRESET:
+ case ECONNABORTED:
+ case ECONNRESET:
+ case ENOTCONN:
+ case ESHUTDOWN:
+ case ECONNREFUSED:
+ case EHOSTDOWN:
+ case EHOSTUNREACH:
+ needrecon = 1;
+ break;
+ }
+ if (needrecon && (nso == nmp->nm_nso)) { /* mark socket as needing reconnect */
+ NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req->r_xid, error);
+ nfs_need_reconnect(nmp);
+ }
+
+ nfs_sndunlock(req);
+
+ /*
+ * Don't log some errors:
+ * EPIPE errors may be common with servers that drop idle connections.
+ * EADDRNOTAVAIL may occur on network transitions.
+ * ENOTCONN may occur under some network conditions.
+ */
+ if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN))
+ error = 0;
+ if (error && (error != EINTR) && (error != ERESTART))
+ log(LOG_INFO, "nfs send error %d for server %s\n", error,
+ !req->r_nmp ? "<unmounted>" :
+ vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname);
+
+ if (nfs_is_dead(error, nmp))
+ error = EIO;
+
+ /* prefer request termination error over other errors */
+ error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
+ if (error2)
+ error = error2;
+
+ /* only allow the following errors to be returned */
+ if ((error != EINTR) && (error != ERESTART) && (error != EIO) &&
+ (error != ENXIO) && (error != ETIMEDOUT))
+ error = 0;
+ return (error);
+}
+
+/*
+ * NFS client socket upcalls
+ *
+ * Pull RPC replies out of an NFS mount's socket and match them
+ * up with the pending request.
+ *
+ * The datagram code is simple because we always get whole
+ * messages out of the socket.
+ *
+ * The stream code is more involved because we have to parse
+ * the RPC records out of the stream.
+ */
+
+/* NFS client UDP socket upcall */
+void
+nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag)
+{
+ struct nfsmount *nmp = arg;
+ struct nfs_socket *nso = nmp->nm_nso;
+ size_t rcvlen;
+ mbuf_t m;
+ int error = 0;
+
+ if (nmp->nm_sockflags & NMSOCK_CONNECTING)
+ return;
+
+ do {
+ /* make sure we're on the current socket */
+ if (!nso || (nso->nso_so != so))
+ return;
+
+ m = NULL;
+ rcvlen = 1000000;
+ error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
+ if (m)
+ nfs_request_match_reply(nmp, m);
+ } while (m && !error);
+
+ if (error && (error != EWOULDBLOCK)) {
+ /* problems with the socket... mark for reconnection */
+ NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error);
+ nfs_need_reconnect(nmp);
+ }
+}
+
+/* NFS client TCP socket upcall */
+void
+nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag)
+{
+ struct nfsmount *nmp = arg;
+ struct nfs_socket *nso = nmp->nm_nso;
+ struct nfs_rpc_record_state nrrs;
+ mbuf_t m;
+ int error = 0;
+ int recv = 1;
+
+ if (nmp->nm_sockflags & NMSOCK_CONNECTING)
+ return;
+
+ /* make sure we're on the current socket */
+ lck_mtx_lock(&nmp->nm_lock);
+ nso = nmp->nm_nso;
+ if (!nso || (nso->nso_so != so) || (nmp->nm_sockflags & (NMSOCK_DISCONNECTING))) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+
+ /* make sure this upcall should be trying to do work */
+ lck_mtx_lock(&nso->nso_lock);
+ if (nso->nso_flags & (NSO_UPCALL|NSO_DISCONNECTING|NSO_DEAD)) {
+ lck_mtx_unlock(&nso->nso_lock);
+ return;
+ }
+ nso->nso_flags |= NSO_UPCALL;
+ nrrs = nso->nso_rrs;
+ lck_mtx_unlock(&nso->nso_lock);
+
+ /* loop while we make error-free progress */
+ while (!error && recv) {
+ error = nfs_rpc_record_read(so, &nrrs, MSG_DONTWAIT, &recv, &m);
+ if (m) /* match completed response with request */
+ nfs_request_match_reply(nmp, m);
+ }
+
+ lck_mtx_lock(&nmp->nm_lock);
+ if (nmp->nm_nso == nso) {
+ /* still the same socket, so update socket's RPC parsing state */
+ lck_mtx_unlock(&nmp->nm_lock);
+ lck_mtx_lock(&nso->nso_lock);
+ nso->nso_rrs = nrrs;
+ nso->nso_flags &= ~NSO_UPCALL;
+ lck_mtx_unlock(&nso->nso_lock);
+ if (nmp->nm_sockflags & NMSOCK_DISCONNECTING)
+ wakeup(&nmp->nm_sockflags);
+ } else {
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+#ifdef NFS_SOCKET_DEBUGGING
+ if (!recv && (error != EWOULDBLOCK))
+ NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error);
+#endif
+ /* note: no error and no data indicates server closed its end */
+ if ((error != EWOULDBLOCK) && (error || !recv)) {
+ /* problems with the socket... mark for reconnection */
+ NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error);
+ nfs_need_reconnect(nmp);
+ }
+}
+
+/*
+ * "poke" a socket to try to provoke any pending errors
+ */
+void
+nfs_sock_poke(struct nfsmount *nmp)
+{
+ struct iovec aio;
+ struct msghdr msg;
+ size_t len;
+ int error = 0;
+ int dummy;
+
+ lck_mtx_lock(&nmp->nm_lock);
+ if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) ||
+ !(nmp->nm_sockflags & NMSOCK_READY) || !nmp->nm_nso || !nmp->nm_nso->nso_so) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ aio.iov_base = &dummy;
+ aio.iov_len = 0;
+ len = 0;
+ bzero(&msg, sizeof(msg));
+ msg.msg_iov = &aio;
+ msg.msg_iovlen = 1;
+ error = sock_send(nmp->nm_nso->nso_so, &msg, MSG_DONTWAIT, &len);
+ NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error);
+ nfs_is_dead(error, nmp);
+}
+
+/*
+ * Match an RPC reply with the corresponding request
+ */
+void
+nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
+{
+ struct nfsreq *req;
+ struct nfsm_chain nmrep;
+ u_int32_t reply = 0, rxid = 0;
+ int error = 0, asyncioq, t1;
+
+ /* Get the xid and check that it is an rpc reply */
+ nfsm_chain_dissect_init(error, &nmrep, mrep);
+ nfsm_chain_get_32(error, &nmrep, rxid);
+ nfsm_chain_get_32(error, &nmrep, reply);
+ if (error || (reply != RPC_REPLY)) {
+ OSAddAtomic64(1, &nfsstats.rpcinvalid);
+ mbuf_freem(mrep);
+ return;
+ }
+
+ /*
+ * Loop through the request list to match up the reply
+ * Iff no match, just drop it.
+ */
+ lck_mtx_lock(nfs_request_mutex);
+ TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
+ if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid)))
+ continue;
+ /* looks like we have it, grab lock and double check */
+ lck_mtx_lock(&req->r_mtx);
+ if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
+ lck_mtx_unlock(&req->r_mtx);
+ continue;
+ }
+ /* Found it.. */
+ req->r_nmrep = nmrep;
+ lck_mtx_lock(&nmp->nm_lock);
+ if (nmp->nm_sotype == SOCK_DGRAM) {
+ /*
+ * Update congestion window.
+ * Do the additive increase of one rpc/rtt.
+ */
+ FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
+ if (nmp->nm_cwnd <= nmp->nm_sent) {
+ nmp->nm_cwnd +=
+ ((NFS_CWNDSCALE * NFS_CWNDSCALE) +
+ (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
+ if (nmp->nm_cwnd > NFS_MAXCWND)
+ nmp->nm_cwnd = NFS_MAXCWND;
+ }
+ if (req->r_flags & R_CWND) {
+ nmp->nm_sent -= NFS_CWNDSCALE;
+ req->r_flags &= ~R_CWND;
+ }
+ if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
+ /* congestion window is open, poke the cwnd queue */
+ struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
+ TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
+ req2->r_cchain.tqe_next = NFSREQNOLIST;
+ wakeup(req2);
+ }
+ }
+ /*
+ * Update rtt using a gain of 0.125 on the mean
+ * and a gain of 0.25 on the deviation.
+ */
+ if (req->r_flags & R_TIMING) {
+ /*
+ * Since the timer resolution of
+ * NFS_HZ is so course, it can often
+ * result in r_rtt == 0. Since
+ * r_rtt == N means that the actual
+ * rtt is between N+dt and N+2-dt ticks,
+ * add 1.
+ */
+ if (proct[req->r_procnum] == 0)
+ panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum);
+ t1 = req->r_rtt + 1;
+ t1 -= (NFS_SRTT(req) >> 3);
+ NFS_SRTT(req) += t1;
+ if (t1 < 0)
+ t1 = -t1;
+ t1 -= (NFS_SDRTT(req) >> 2);
+ NFS_SDRTT(req) += t1;
+ }
+ nmp->nm_timeouts = 0;
+ lck_mtx_unlock(&nmp->nm_lock);
+ /* signal anyone waiting on this request */
+ wakeup(req);
+ asyncioq = (req->r_callback.rcb_func != NULL);
+ if (nfs_request_using_gss(req))
+ nfs_gss_clnt_rpcdone(req);
+ lck_mtx_unlock(&req->r_mtx);
+ lck_mtx_unlock(nfs_request_mutex);
+ /* if it's an async RPC with a callback, queue it up */
+ if (asyncioq)
+ nfs_asyncio_finish(req);
+ break;
+ }
+
+ if (!req) {
+ /* not matched to a request, so drop it. */
+ lck_mtx_unlock(nfs_request_mutex);
+ OSAddAtomic64(1, &nfsstats.rpcunexpected);
+ mbuf_freem(mrep);
+ }
+}
+
+/*
+ * Wait for the reply for a given request...
+ * ...potentially resending the request if necessary.