+ if (!vers) {
+ if (nso->nso_protocol == PMAPPROG)
+ vers = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
+ else if (nso->nso_protocol == NFS_PROG)
+ vers = PVER2MAJOR(nmp->nm_max_vers);
+ }
+ lck_mtx_unlock(&nso->nso_lock);
+ error = nfsm_rpchead2(nmp, nso->nso_sotype, nso->nso_protocol, vers, 0, RPCAUTH_SYS,
+ vfs_context_ucred(vfs_context_kernel()), NULL, NULL, &xid, &mreq);
+ lck_mtx_lock(&nso->nso_lock);
+ if (!error) {
+ nso->nso_flags |= NSO_PINGING;
+ nso->nso_pingxid = R_XID32(xid);
+ nso->nso_reqtimestamp = now->tv_sec;
+ bzero(&msg, sizeof(msg));
+ if ((nso->nso_sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so)) {
+ msg.msg_name = nso->nso_saddr;
+ msg.msg_namelen = nso->nso_saddr->sa_len;
+ }
+ for (reqlen=0, m=mreq; m; m = mbuf_next(m))
+ reqlen += mbuf_len(m);
+ lck_mtx_unlock(&nso->nso_lock);
+ error = sock_sendmbuf(nso->nso_so, &msg, mreq, 0, &sentlen);
+ NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
+ lck_mtx_lock(&nso->nso_lock);
+ if (!error && (sentlen != reqlen))
+ error = ETIMEDOUT;
+ }
+ if (error) {
+ nso->nso_error = error;
+ nso->nso_flags |= NSO_DEAD;
+ return (0);
+ }
+
+ return (1);
+}
+
+/*
+ * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
+ * Set the nfs socket protocol and version if needed.
+ */
+void
+nfs_connect_search_socket_found(struct nfsmount *nmp, struct nfs_socket_search *nss, struct nfs_socket *nso)
+{
+ NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
+ if (!nso->nso_version) {
+ /* If the version isn't set, the default must have worked. */
+ if (nso->nso_protocol == PMAPPROG)
+ nso->nso_version = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
+ if (nso->nso_protocol == NFS_PROG)
+ nso->nso_version = PVER2MAJOR(nmp->nm_max_vers);
+ }
+ TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
+ nss->nss_sockcnt--;
+ nss->nss_sock = nso;
+}
+
+/*
+ * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
+ * the list. Dead socket are then destroyed.
+ */
+void
+nfs_connect_search_socket_reap(struct nfsmount *nmp __unused, struct nfs_socket_search *nss, struct timeval *now)
+{
+ struct nfs_socket *nso, *nsonext;
+
+ TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
+ lck_mtx_lock(&nso->nso_lock);
+ if (now->tv_sec >= (nso->nso_timestamp + nss->nss_timeo)) {
+ /* took too long */
+ NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
+ nso->nso_error = ETIMEDOUT;
+ nso->nso_flags |= NSO_DEAD;
+ }
+ if (!(nso->nso_flags & NSO_DEAD)) {
+ lck_mtx_unlock(&nso->nso_lock);
+ continue;
+ }
+ lck_mtx_unlock(&nso->nso_lock);
+ NFS_SOCK_DBG("nfs connect %s reaping socket %p %d\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, nso->nso_error);
+ nfs_socket_search_update_error(nss, nso->nso_error);
+ TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
+ nss->nss_sockcnt--;
+ nfs_socket_destroy(nso);
+ /* If there are more sockets to try, force the starting of another socket */
+ if (nss->nss_addrcnt > 0)
+ nss->nss_last = -2;
+ }
+}
+
+/*
+ * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
+ */
+int
+nfs_connect_search_check(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
+{
+ int error;
+
+ /* log a warning if connect is taking a while */
+ if (((now->tv_sec - nss->nss_timestamp) >= 8) && ((nss->nss_flags & (NSS_VERBOSE|NSS_WARNED)) == NSS_VERBOSE)) {
+ printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+ nss->nss_flags |= NSS_WARNED;
+ }
+ if (nmp->nm_sockflags & NMSOCK_UNMOUNT)
+ return (EINTR);
+ if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0)))
+ return (error);
+
+ /* If we were succesfull at sending a ping, wait up to a second for a reply */
+ if (nss->nss_last >= 0)
+ tsleep(nss, PSOCK, "nfs_connect_search_wait", hz);
+
+ return (0);
+}
+
+
+/*
+ * Continue the socket search until we have something to report.
+ */
+int
+nfs_connect_search_loop(struct nfsmount *nmp, struct nfs_socket_search *nss)
+{
+ struct nfs_socket *nso;
+ struct timeval now;
+ int error;
+ int verbose = (nss->nss_flags & NSS_VERBOSE);
+
+loop:
+ microuptime(&now);
+ NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, now.tv_sec);
+
+ /* add a new socket to the socket list if needed and available */
+ error = nfs_connect_search_new_socket(nmp, nss, &now);
+ if (error) {
+ NFS_SOCK_DBG("nfs connect returned %d\n", error);
+ return (error);
+ }
+
+ /* check each active socket on the list and try to push it along */
+ TAILQ_FOREACH(nso, &nss->nss_socklist, nso_link) {
+ lck_mtx_lock(&nso->nso_lock);
+
+ /* If not connected connect it */
+ if (!(nso->nso_flags & NSO_CONNECTED)) {
+ if (!nfs_connect_search_socket_connect(nmp, nso, verbose)) {
+ lck_mtx_unlock(&nso->nso_lock);
+ continue;
+ }
+ }
+
+ /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
+ if (!(nso->nso_flags & (NSO_PINGING|NSO_VERIFIED)) ||
+ ((nso->nso_sotype == SOCK_DGRAM) && (now.tv_sec >= nso->nso_reqtimestamp+2))) {
+ if (!nfs_connect_search_ping(nmp, nso, &now)) {
+ lck_mtx_unlock(&nso->nso_lock);
+ continue;
+ }
+ }
+
+ /* Has the socket been verified by the up call routine? */
+ if (nso->nso_flags & NSO_VERIFIED) {
+ /* WOOHOO!! This socket looks good! */
+ nfs_connect_search_socket_found(nmp, nss, nso);
+ lck_mtx_unlock(&nso->nso_lock);
+ break;
+ }
+ lck_mtx_unlock(&nso->nso_lock);
+ }
+
+ /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
+ nfs_connect_search_socket_reap(nmp, nss, &now);
+
+ /*
+ * Keep looping if we haven't found a socket yet and we have more
+ * sockets to (continue to) try.
+ */
+ error = 0;
+ if (!nss->nss_sock && (!TAILQ_EMPTY(&nss->nss_socklist) || nss->nss_addrcnt)) {
+ error = nfs_connect_search_check(nmp, nss, &now);
+ if (!error)
+ goto loop;
+ }
+
+ NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
+ return (error);
+}
+
+/*
+ * Initialize a new NFS connection.
+ *
+ * Search for a location to connect a socket to and initialize the connection.
+ *
+ * An NFS mount may have multiple locations/servers/addresses available.
+ * We attempt to connect to each one asynchronously and will start
+ * several sockets in parallel if other locations are slow to answer.
+ * We'll use the first NFS socket we can successfully set up.
+ *
+ * The search may involve contacting the portmapper service first.
+ *
+ * A mount's initial connection may require negotiating some parameters such
+ * as socket type and NFS version.
+ */
+
+int
+nfs_connect(struct nfsmount *nmp, int verbose, int timeo)
+{
+ struct nfs_socket_search nss;
+ struct nfs_socket *nso, *nsonfs;
+ struct sockaddr_storage ss;
+ struct sockaddr *saddr, *oldsaddr;
+ sock_upcall upcall;
+ struct timeval now, start;
+ int error, savederror, nfsvers;
+ int tryv4 = 1;
+ uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM;
+ fhandle_t *fh = NULL;
+ char *path = NULL;
+ in_port_t port;
+ int addrtotal = 0;
+
+ /* paranoia... check that we have at least one address in the locations */
+ uint32_t loc, serv;
+ for (loc=0; loc < nmp->nm_locations.nl_numlocs; loc++) {
+ for (serv=0; serv < nmp->nm_locations.nl_locations[loc]->nl_servcount; serv++) {
+ addrtotal += nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount;
+ if (nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount == 0)
+ NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname,
+ nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name);
+ }
+ }
+
+ if (addrtotal == 0) {
+ NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+ return (EINVAL);
+ } else
+ NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, addrtotal);
+
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_sockflags |= NMSOCK_CONNECTING;
+ nmp->nm_nss = &nss;
+ lck_mtx_unlock(&nmp->nm_lock);
+ microuptime(&start);
+ savederror = error = 0;
+
+tryagain:
+ /* initialize socket search state */
+ bzero(&nss, sizeof(nss));
+ nss.nss_addrcnt = addrtotal;
+ nss.nss_error = savederror;
+ TAILQ_INIT(&nss.nss_socklist);
+ nss.nss_sotype = sotype;
+ nss.nss_startloc = nmp->nm_locations.nl_current;
+ nss.nss_timestamp = start.tv_sec;
+ nss.nss_timeo = timeo;
+ if (verbose)
+ nss.nss_flags |= NSS_VERBOSE;
+
+ /* First time connecting, we may need to negotiate some things */
+ if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
+ if (!nmp->nm_vers) {
+ /* No NFS version specified... */
+ if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
+ if (PVER2MAJOR(nmp->nm_max_vers) >= NFS_VER4 && tryv4) {
+ nss.nss_port = NFS_PORT;
+ nss.nss_protocol = NFS_PROG;
+ nss.nss_version = 4;
+ nss.nss_flags |= NSS_FALLBACK2PMAP;
+ } else {
+ /* ...connect to portmapper first if we (may) need any ports. */
+ nss.nss_port = PMAPPORT;
+ nss.nss_protocol = PMAPPROG;
+ nss.nss_version = 0;
+ }
+ } else {
+ /* ...connect to NFS port first. */
+ nss.nss_port = nmp->nm_nfsport;
+ nss.nss_protocol = NFS_PROG;
+ nss.nss_version = 0;
+ }
+ } else if (nmp->nm_vers >= NFS_VER4) {
+ if (tryv4) {
+ /* For NFSv4, we use the given (or default) port. */
+ nss.nss_port = nmp->nm_nfsport ? nmp->nm_nfsport : NFS_PORT;
+ nss.nss_protocol = NFS_PROG;
+ nss.nss_version = 4;
+ /*
+ * set NSS_FALLBACK2PMAP here to pick up any non standard port
+ * if no port is specified on the mount;
+ * Note nm_vers is set so we will only try NFS_VER4.
+ */
+ if (!nmp->nm_nfsport)
+ nss.nss_flags |= NSS_FALLBACK2PMAP;
+ } else {
+ nss.nss_port = PMAPPORT;
+ nss.nss_protocol = PMAPPROG;
+ nss.nss_version = 0;
+ }
+ } else {
+ /* For NFSv3/v2... */
+ if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
+ /* ...connect to portmapper first if we need any ports. */
+ nss.nss_port = PMAPPORT;
+ nss.nss_protocol = PMAPPROG;
+ nss.nss_version = 0;
+ } else {
+ /* ...connect to NFS port first. */
+ nss.nss_port = nmp->nm_nfsport;
+ nss.nss_protocol = NFS_PROG;
+ nss.nss_version = nmp->nm_vers;
+ }
+ }
+ NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
+ nss.nss_protocol, nss.nss_version);
+ } else {
+ /* we've connected before, just connect to NFS port */
+ if (!nmp->nm_nfsport) {
+ /* need to ask portmapper which port that would be */
+ nss.nss_port = PMAPPORT;
+ nss.nss_protocol = PMAPPROG;
+ nss.nss_version = 0;
+ } else {
+ nss.nss_port = nmp->nm_nfsport;
+ nss.nss_protocol = NFS_PROG;
+ nss.nss_version = nmp->nm_vers;
+ }
+ NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
+ nss.nss_protocol, nss.nss_version);
+ }
+
+ /* Set next location to first valid location. */
+ /* If start location is invalid, find next location. */
+ nss.nss_nextloc = nss.nss_startloc;
+ if ((nss.nss_nextloc.nli_serv >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servcount) ||
+ (nss.nss_nextloc.nli_addr >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servers[nss.nss_nextloc.nli_serv]->ns_addrcount)) {
+ nfs_location_next(&nmp->nm_locations, &nss.nss_nextloc);
+ if (!nfs_location_index_cmp(&nss.nss_nextloc, &nss.nss_startloc)) {
+ NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+ return (ENOENT);
+ }
+ }
+ nss.nss_last = -1;
+
+keepsearching:
+
+ error = nfs_connect_search_loop(nmp, &nss);
+ if (error || !nss.nss_sock) {
+ /* search failed */
+ nfs_socket_search_cleanup(&nss);
+ if (nss.nss_flags & NSS_FALLBACK2PMAP) {
+ tryv4 = 0;
+ NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
+ goto tryagain;
+ }
+
+ if (!error && (nss.nss_sotype == SOCK_STREAM) && !nmp->nm_sotype && (nmp->nm_vers < NFS_VER4)) {
+ /* Try using UDP */
+ sotype = SOCK_DGRAM;
+ savederror = nss.nss_error;
+ NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
+ goto tryagain;
+ }
+ if (!error)
+ error = nss.nss_error ? nss.nss_error : ETIMEDOUT;
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
+ nmp->nm_nss = NULL;
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (nss.nss_flags & NSS_WARNED)
+ log(LOG_INFO, "nfs_connect: socket connect aborted for %s\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+ if (fh)
+ FREE(fh, M_TEMP);
+ if (path)
+ FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+ NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
+ return (error);
+ }
+
+ /* try to use nss_sock */
+ nso = nss.nss_sock;
+ nss.nss_sock = NULL;
+
+ /* We may be speaking to portmap first... to determine port(s). */
+ if (nso->nso_saddr->sa_family == AF_INET)
+ port = ntohs(((struct sockaddr_in*)nso->nso_saddr)->sin_port);
+ else
+ port = ntohs(((struct sockaddr_in6*)nso->nso_saddr)->sin6_port);
+ if (port == PMAPPORT) {
+ /* Use this portmapper port to get the port #s we need. */
+ NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
+
+ /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
+ sock_setupcall(nso->nso_so, NULL, NULL);
+
+ /* Set up socket address and port for NFS socket. */
+ bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
+
+ /* If NFS version not set, try nm_max_vers down to nm_min_vers */
+ nfsvers = nmp->nm_vers ? nmp->nm_vers : PVER2MAJOR(nmp->nm_max_vers);
+ if (!(port = nmp->nm_nfsport)) {
+ if (ss.ss_family == AF_INET)
+ ((struct sockaddr_in*)&ss)->sin_port = htons(0);
+ else if (ss.ss_family == AF_INET6)
+ ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
+ for (; nfsvers >= (int)PVER2MAJOR(nmp->nm_min_vers); nfsvers--) {
+ if (nmp->nm_vers && nmp->nm_vers != nfsvers)
+ continue; /* Wrong version */
+ if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM)
+ continue; /* NFSv4 does not do UDP */
+ error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
+ nso->nso_so, NFS_PROG, nfsvers,
+ (nso->nso_sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP, timeo);
+ if (!error) {
+ if (ss.ss_family == AF_INET)
+ port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
+ else if (ss.ss_family == AF_INET6)
+ port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
+ if (!port)
+ error = EPROGUNAVAIL;
+ if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0)
+ continue; /* We already tried this */
+ }
+ if (!error)
+ break;
+ }
+ if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0)
+ error = EPROGUNAVAIL;
+ if (error) {
+ nfs_socket_search_update_error(&nss, error);
+ nfs_socket_destroy(nso);
+ goto keepsearching;
+ }
+ }
+ /* Create NFS protocol socket and add it to the list of sockets. */
+ /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
+ error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nso->nso_sotype, port,
+ NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs);
+ if (error) {
+ nfs_socket_search_update_error(&nss, error);
+ nfs_socket_destroy(nso);
+ goto keepsearching;
+ }
+ nsonfs->nso_location = nso->nso_location;
+ nsonfs->nso_wake = &nss;
+ error = sock_setupcall(nsonfs->nso_so, nfs_connect_upcall, nsonfs);
+ if (error) {
+ nfs_socket_search_update_error(&nss, error);
+ nfs_socket_destroy(nsonfs);
+ nfs_socket_destroy(nso);
+ goto keepsearching;
+ }
+ TAILQ_INSERT_TAIL(&nss.nss_socklist, nsonfs, nso_link);
+ nss.nss_sockcnt++;
+ if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
+ /* Set up socket address and port for MOUNT socket. */
+ error = 0;
+ bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
+ port = nmp->nm_mountport;
+ if (ss.ss_family == AF_INET)
+ ((struct sockaddr_in*)&ss)->sin_port = htons(port);
+ else if (ss.ss_family == AF_INET6)
+ ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
+ if (!port) {
+ /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
+ /* If NFS version is unknown, optimistically choose for NFSv3. */
+ int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
+ int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
+ error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
+ nso->nso_so, RPCPROG_MNT, mntvers, mntproto, timeo);
+ }
+ if (!error) {
+ if (ss.ss_family == AF_INET)
+ port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
+ else if (ss.ss_family == AF_INET6)
+ port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
+ if (!port)
+ error = EPROGUNAVAIL;
+ }
+ /* create sockaddr for MOUNT */
+ if (!error)
+ MALLOC(nsonfs->nso_saddr2, struct sockaddr *, ss.ss_len, M_SONAME, M_WAITOK|M_ZERO);
+ if (!error && !nsonfs->nso_saddr2)
+ error = ENOMEM;
+ if (!error)
+ bcopy(&ss, nsonfs->nso_saddr2, ss.ss_len);
+ if (error) {
+ lck_mtx_lock(&nsonfs->nso_lock);
+ nsonfs->nso_error = error;
+ nsonfs->nso_flags |= NSO_DEAD;
+ lck_mtx_unlock(&nsonfs->nso_lock);
+ }
+ }
+ nfs_socket_destroy(nso);
+ goto keepsearching;
+ }
+
+ /* nso is an NFS socket */
+ NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
+
+ /* If NFS version wasn't specified, it was determined during the connect. */
+ nfsvers = nmp->nm_vers ? nmp->nm_vers : (int)nso->nso_version;
+
+ /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
+ if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
+ error = 0;
+ saddr = nso->nso_saddr2;
+ if (!saddr) {
+ /* Need sockaddr for MOUNT port */
+ bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
+ port = nmp->nm_mountport;
+ if (ss.ss_family == AF_INET)
+ ((struct sockaddr_in*)&ss)->sin_port = htons(port);
+ else if (ss.ss_family == AF_INET6)
+ ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
+ if (!port) {
+ /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
+ int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
+ int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
+ error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
+ NULL, RPCPROG_MNT, mntvers, mntproto, timeo);
+ if (ss.ss_family == AF_INET)
+ port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
+ else if (ss.ss_family == AF_INET6)
+ port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
+ }
+ if (!error) {
+ if (port)
+ saddr = (struct sockaddr*)&ss;
+ else
+ error = EPROGUNAVAIL;
+ }
+ }
+ if (saddr)
+ MALLOC(fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK|M_ZERO);
+ if (saddr && fh)
+ MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ if (!saddr || !fh || !path) {
+ if (!error)
+ error = ENOMEM;
+ if (fh)
+ FREE(fh, M_TEMP);
+ if (path)
+ FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+ fh = NULL;
+ path = NULL;
+ nfs_socket_search_update_error(&nss, error);
+ nfs_socket_destroy(nso);
+ goto keepsearching;
+ }
+ nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, path, MAXPATHLEN, 1);
+ error = nfs3_mount_rpc(nmp, saddr, nso->nso_sotype, nfsvers,
+ path, vfs_context_current(), timeo, fh, &nmp->nm_servsec);
+ NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
+ if (!error) {
+ /* Make sure we can agree on a security flavor. */
+ int o, s; /* indices into mount option and server security flavor lists */
+ int found = 0;
+
+ if ((nfsvers == NFS_VER3) && !nmp->nm_servsec.count) {
+ /* Some servers return an empty list to indicate RPCAUTH_SYS? */
+ nmp->nm_servsec.count = 1;
+ nmp->nm_servsec.flavors[0] = RPCAUTH_SYS;
+ }
+ if (nmp->nm_sec.count) {
+ /* Choose the first flavor in our list that the server supports. */
+ if (!nmp->nm_servsec.count) {
+ /* we don't know what the server supports, just use our first choice */
+ nmp->nm_auth = nmp->nm_sec.flavors[0];
+ found = 1;
+ }
+ for (o=0; !found && (o < nmp->nm_sec.count); o++)
+ for (s=0; !found && (s < nmp->nm_servsec.count); s++)
+ if (nmp->nm_sec.flavors[o] == nmp->nm_servsec.flavors[s]) {
+ nmp->nm_auth = nmp->nm_sec.flavors[o];
+ found = 1;
+ }
+ } else {
+ /* Choose the first one we support from the server's list. */
+ if (!nmp->nm_servsec.count) {
+ nmp->nm_auth = RPCAUTH_SYS;
+ found = 1;
+ }
+ for (s=0; s < nmp->nm_servsec.count; s++)
+ switch (nmp->nm_servsec.flavors[s]) {
+ case RPCAUTH_SYS:
+ /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
+ if (found && (nmp->nm_auth == RPCAUTH_NONE))
+ found = 0;
+ case RPCAUTH_NONE:
+ case RPCAUTH_KRB5:
+ case RPCAUTH_KRB5I:
+ case RPCAUTH_KRB5P:
+ if (!found) {
+ nmp->nm_auth = nmp->nm_servsec.flavors[s];
+ found = 1;
+ }
+ break;
+ }
+ }
+ error = !found ? EAUTH : 0;
+ }
+ FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+ path = NULL;
+ if (error) {
+ nfs_socket_search_update_error(&nss, error);
+ FREE(fh, M_TEMP);
+ fh = NULL;
+ nfs_socket_destroy(nso);
+ goto keepsearching;
+ }
+ if (nmp->nm_fh)
+ FREE(nmp->nm_fh, M_TEMP);
+ nmp->nm_fh = fh;
+ fh = NULL;
+ NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_CALLUMNT);
+ }
+
+ /* put the real upcall in place */
+ upcall = (nso->nso_sotype == SOCK_STREAM) ? nfs_tcp_rcv : nfs_udp_rcv;
+ error = sock_setupcall(nso->nso_so, upcall, nmp);
+ if (error) {
+ nfs_socket_search_update_error(&nss, error);
+ nfs_socket_destroy(nso);
+ goto keepsearching;
+ }
+
+ if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
+ /* set mntfromname to this location */
+ if (!NM_OMATTR_GIVEN(nmp, MNTFROM))
+ nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location,
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname,
+ sizeof(vfs_statfs(nmp->nm_mountp)->f_mntfromname), 0);
+ /* some negotiated values need to remain unchanged for the life of the mount */
+ if (!nmp->nm_sotype)
+ nmp->nm_sotype = nso->nso_sotype;
+ if (!nmp->nm_vers) {
+ nmp->nm_vers = nfsvers;
+ /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
+ if ((nfsvers >= NFS_VER4) && !NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
+ if (nso->nso_saddr->sa_family == AF_INET)
+ port = ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
+ else if (nso->nso_saddr->sa_family == AF_INET6)
+ port = ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
+ else
+ port = 0;
+ if (port == NFS_PORT)
+ nmp->nm_nfsport = NFS_PORT;
+ }
+ }
+ /* do some version-specific pre-mount set up */
+ if (nmp->nm_vers >= NFS_VER4) {
+ microtime(&now);
+ nmp->nm_mounttime = ((uint64_t)now.tv_sec << 32) | now.tv_usec;
+ if (!NMFLAG(nmp, NOCALLBACK))
+ nfs4_mount_callback_setup(nmp);
+ }
+ }
+
+ /* Initialize NFS socket state variables */
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
+ nmp->nm_srtt[3] = (NFS_TIMEO << 3);
+ nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
+ nmp->nm_sdrtt[3] = 0;
+ if (nso->nso_sotype == SOCK_DGRAM) {
+ nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
+ nmp->nm_sent = 0;
+ } else if (nso->nso_sotype == SOCK_STREAM) {
+ nmp->nm_timeouts = 0;
+ }
+ nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
+ nmp->nm_sockflags |= NMSOCK_SETUP;
+ /* move the socket to the mount structure */
+ nmp->nm_nso = nso;
+ oldsaddr = nmp->nm_saddr;
+ nmp->nm_saddr = nso->nso_saddr;
+ lck_mtx_unlock(&nmp->nm_lock);
+ error = nfs_connect_setup(nmp);
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_sockflags &= ~NMSOCK_SETUP;
+ if (!error) {
+ nmp->nm_sockflags |= NMSOCK_READY;
+ wakeup(&nmp->nm_sockflags);
+ }
+ if (error) {
+ NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
+ nfs_socket_search_update_error(&nss, error);
+ nmp->nm_saddr = oldsaddr;
+ if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
+ /* undo settings made prior to setup */
+ if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_SOCKET_TYPE))
+ nmp->nm_sotype = 0;
+ if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_VERSION)) {
+ if (nmp->nm_vers >= NFS_VER4) {
+ if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT))
+ nmp->nm_nfsport = 0;
+ if (nmp->nm_cbid)
+ nfs4_mount_callback_shutdown(nmp);
+ if (IS_VALID_CRED(nmp->nm_mcred))
+ kauth_cred_unref(&nmp->nm_mcred);
+ bzero(&nmp->nm_un, sizeof(nmp->nm_un));
+ }
+ nmp->nm_vers = 0;
+ }
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ nmp->nm_nso = NULL;
+ nfs_socket_destroy(nso);
+ goto keepsearching;
+ }
+
+ /* update current location */
+ if ((nmp->nm_locations.nl_current.nli_flags & NLI_VALID) &&
+ (nmp->nm_locations.nl_current.nli_serv != nso->nso_location.nli_serv)) {
+ /* server has changed, we should initiate failover/recovery */
+ // XXX
+ }
+ nmp->nm_locations.nl_current = nso->nso_location;
+ nmp->nm_locations.nl_current.nli_flags |= NLI_VALID;
+
+ if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
+ /* We have now successfully connected... make a note of it. */
+ nmp->nm_sockflags |= NMSOCK_HASCONNECTED;
+ }
+
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (oldsaddr)
+ FREE(oldsaddr, M_SONAME);
+
+ if (nss.nss_flags & NSS_WARNED)
+ log(LOG_INFO, "nfs_connect: socket connect completed for %s\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+
+ nmp->nm_nss = NULL;
+ nfs_socket_search_cleanup(&nss);
+ if (fh)
+ FREE(fh, M_TEMP);
+ if (path)
+ FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+ NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+ return (0);
+}
+
+
+/* setup & confirm socket connection is functional */
+int
+nfs_connect_setup(struct nfsmount *nmp)
+{
+ int error = 0;
+
+ if (nmp->nm_vers >= NFS_VER4) {
+ if (nmp->nm_state & NFSSTA_CLIENTID) {
+ /* first, try to renew our current state */
+ error = nfs4_renew(nmp, R_SETUP);
+ if ((error == NFSERR_ADMIN_REVOKED) ||
+ (error == NFSERR_CB_PATH_DOWN) ||
+ (error == NFSERR_EXPIRED) ||
+ (error == NFSERR_LEASE_MOVED) ||
+ (error == NFSERR_STALE_CLIENTID)) {
+ lck_mtx_lock(&nmp->nm_lock);
+ nfs_need_recover(nmp, error);
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+ }
+ error = nfs4_setclientid(nmp);
+ }
+ return (error);
+}
+
+/*
+ * NFS socket reconnect routine:
+ * Called when a connection is broken.
+ * - disconnect the old socket
+ * - nfs_connect() again
+ * - set R_MUSTRESEND for all outstanding requests on mount point
+ * If this fails the mount point is DEAD!
+ */
+int
+nfs_reconnect(struct nfsmount *nmp)
+{
+ struct nfsreq *rq;
+ struct timeval now;
+ thread_t thd = current_thread();
+ int error, wentdown = 0, verbose = 1;
+ time_t lastmsg;
+ int timeo;
+
+ microuptime(&now);
+ lastmsg = now.tv_sec - (nmp->nm_tprintf_delay - nmp->nm_tprintf_initial_delay);
+
+ nfs_disconnect(nmp);
+
+
+ lck_mtx_lock(&nmp->nm_lock);
+ timeo = nfs_is_squishy(nmp) ? 8 : 30;
+ lck_mtx_unlock(&nmp->nm_lock);
+
+ while ((error = nfs_connect(nmp, verbose, timeo))) {
+ verbose = 0;
+ nfs_disconnect(nmp);
+ if ((error == EINTR) || (error == ERESTART))
+ return (EINTR);
+ if (error == EIO)
+ return (EIO);
+ microuptime(&now);
+ if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) {
+ lastmsg = now.tv_sec;
+ nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect", 0);
+ wentdown = 1;
+ }
+ lck_mtx_lock(&nmp->nm_lock);
+ if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
+ /* we're not yet completely mounted and */
+ /* we can't reconnect, so we fail */
+ lck_mtx_unlock(&nmp->nm_lock);
+ NFS_SOCK_DBG("Not mounted returning %d\n", error);
+ return (error);
+ }
+
+ if (nfs_mount_check_dead_timeout(nmp)) {
+ nfs_mount_make_zombie(nmp);
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (ENXIO);
+ }
+
+ if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (error);
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2*hz);
+ if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
+ return (error);
+ }
+
+ if (wentdown)
+ nfs_up(nmp, thd, NFSSTA_TIMEO, "connected");
+
+ /*
+ * Loop through outstanding request list and mark all requests
+ * as needing a resend. (Though nfs_need_reconnect() probably
+ * marked them all already.)
+ */
+ lck_mtx_lock(nfs_request_mutex);
+ TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
+ if (rq->r_nmp == nmp) {
+ lck_mtx_lock(&rq->r_mtx);
+ if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
+ rq->r_flags |= R_MUSTRESEND;
+ rq->r_rtt = -1;
+ wakeup(rq);
+ if ((rq->r_flags & (R_IOD|R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
+ nfs_asyncio_resend(rq);
+ }
+ lck_mtx_unlock(&rq->r_mtx);
+ }
+ }
+ lck_mtx_unlock(nfs_request_mutex);
+ return (0);
+}
+
+/*
+ * NFS disconnect. Clean up and unlink.
+ */
+void
+nfs_disconnect(struct nfsmount *nmp)
+{
+ struct nfs_socket *nso;
+
+ lck_mtx_lock(&nmp->nm_lock);
+tryagain:
+ if (nmp->nm_nso) {
+ struct timespec ts = { 1, 0 };
+ if (nmp->nm_state & NFSSTA_SENDING) { /* wait for sending to complete */
+ nmp->nm_state |= NFSSTA_WANTSND;
+ msleep(&nmp->nm_state, &nmp->nm_lock, PZERO-1, "nfswaitsending", &ts);
+ goto tryagain;
+ }
+ if (nmp->nm_sockflags & NMSOCK_POKE) { /* wait for poking to complete */
+ msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO-1, "nfswaitpoke", &ts);
+ goto tryagain;
+ }
+ nmp->nm_sockflags |= NMSOCK_DISCONNECTING;
+ nmp->nm_sockflags &= ~NMSOCK_READY;
+ nso = nmp->nm_nso;
+ nmp->nm_nso = NULL;
+ if (nso->nso_saddr == nmp->nm_saddr)
+ nso->nso_saddr = NULL;
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_socket_destroy(nso);
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_sockflags &= ~NMSOCK_DISCONNECTING;
+ lck_mtx_unlock(&nmp->nm_lock);
+ } else {
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+}
+
+/*
+ * mark an NFS mount as needing a reconnect/resends.
+ */
+void
+nfs_need_reconnect(struct nfsmount *nmp)
+{
+ struct nfsreq *rq;
+
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_sockflags &= ~(NMSOCK_READY|NMSOCK_SETUP);
+ lck_mtx_unlock(&nmp->nm_lock);
+
+ /*
+ * Loop through outstanding request list and
+ * mark all requests as needing a resend.
+ */
+ lck_mtx_lock(nfs_request_mutex);
+ TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
+ if (rq->r_nmp == nmp) {
+ lck_mtx_lock(&rq->r_mtx);
+ if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
+ rq->r_flags |= R_MUSTRESEND;
+ rq->r_rtt = -1;
+ wakeup(rq);
+ if ((rq->r_flags & (R_IOD|R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
+ nfs_asyncio_resend(rq);
+ }
+ lck_mtx_unlock(&rq->r_mtx);
+ }
+ }
+ lck_mtx_unlock(nfs_request_mutex);
+}
+
+
+/*
+ * thread to handle miscellaneous async NFS socket work (reconnects/resends)
+ */
+void
+nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
+{
+ struct nfsmount *nmp = arg;
+ struct timespec ts = { 30, 0 };
+ thread_t thd = current_thread();
+ struct nfsreq *req;
+ struct timeval now;
+ int error, dofinish;
+ nfsnode_t np;
+ int do_reconnect_sleep = 0;
+
+ lck_mtx_lock(&nmp->nm_lock);
+ while (!(nmp->nm_sockflags & NMSOCK_READY) ||
+ !TAILQ_EMPTY(&nmp->nm_resendq) ||
+ !LIST_EMPTY(&nmp->nm_monlist) ||
+ nmp->nm_deadto_start ||
+ (nmp->nm_state & NFSSTA_RECOVER) ||
+ ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq)))
+ {
+ if (nmp->nm_sockflags & NMSOCK_UNMOUNT)
+ break;
+ /* do reconnect, if necessary */
+ if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) {
+ if (nmp->nm_reconnect_start <= 0) {
+ microuptime(&now);
+ nmp->nm_reconnect_start = now.tv_sec;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+ /*
+ * XXX We don't want to call reconnect again right away if returned errors
+ * before that may not have blocked. This has caused spamming null procs
+ * from machines in the pass.
+ */
+ if (do_reconnect_sleep)
+ tsleep(nfs_mount_sock_thread, PSOCK, "nfs_reconnect_sock_thread_delay", hz);
+ error = nfs_reconnect(nmp);
+ if (error) {
+ int lvl = 7;
+ if (error == EIO || error == EINTR) {
+ lvl = (do_reconnect_sleep++ % 600) ? 7 : 0;
+ }
+ nfs_printf(NFS_FAC_SOCK, lvl, "nfs reconnect %s: returned %d\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
+ } else {
+ nmp->nm_reconnect_start = 0;
+ do_reconnect_sleep = 0;
+ }
+ lck_mtx_lock(&nmp->nm_lock);
+ }
+ if ((nmp->nm_sockflags & NMSOCK_READY) &&
+ (nmp->nm_state & NFSSTA_RECOVER) &&
+ !(nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
+ !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) {
+ /* perform state recovery */
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_recover(nmp);
+ lck_mtx_lock(&nmp->nm_lock);
+ }
+ /* handle NFSv4 delegation returns */
+ while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) &&
+ (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) &&
+ ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs4_delegation_return(np, R_RECOVER, thd, nmp->nm_mcred);
+ lck_mtx_lock(&nmp->nm_lock);
+ }
+ /* do resends, if necessary/possible */
+ while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) ||
+ (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) &&
+ ((req = TAILQ_FIRST(&nmp->nm_resendq)))) {
+ if (req->r_resendtime)
+ microuptime(&now);
+ while (req && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime))
+ req = TAILQ_NEXT(req, r_rchain);
+ if (!req)
+ break;
+ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ req->r_rchain.tqe_next = NFSREQNOLIST;
+ lck_mtx_unlock(&nmp->nm_lock);
+ lck_mtx_lock(&req->r_mtx);
+ /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
+ if (req->r_error || req->r_nmrep.nmc_mhead) {
+ dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
+ req->r_flags &= ~R_RESENDQ;
+ wakeup(req);
+ lck_mtx_unlock(&req->r_mtx);
+ if (dofinish)
+ nfs_asyncio_finish(req);
+ nfs_request_rele(req);
+ lck_mtx_lock(&nmp->nm_lock);
+ continue;
+ }
+ if ((req->r_flags & R_RESTART) || nfs_request_using_gss(req)) {
+ req->r_flags &= ~R_RESTART;
+ req->r_resendtime = 0;
+ lck_mtx_unlock(&req->r_mtx);
+ /* async RPCs on GSS mounts need to be rebuilt and resent. */
+ nfs_reqdequeue(req);
+ if (nfs_request_using_gss(req)) {
+ nfs_gss_clnt_rpcdone(req);
+ error = nfs_gss_clnt_args_restore(req);
+ if (error == ENEEDAUTH)
+ req->r_xid = 0;
+ }
+ NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
+ nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid,
+ req->r_flags, req->r_rtt);
+ error = nfs_sigintr(nmp, req, req->r_thread, 0);
+ if (!error)
+ error = nfs_request_add_header(req);
+ if (!error)
+ error = nfs_request_send(req, 0);
+ lck_mtx_lock(&req->r_mtx);
+ if (req->r_flags & R_RESENDQ)
+ req->r_flags &= ~R_RESENDQ;
+ if (error)
+ req->r_error = error;
+ wakeup(req);
+ dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
+ lck_mtx_unlock(&req->r_mtx);
+ if (dofinish)
+ nfs_asyncio_finish(req);
+ nfs_request_rele(req);
+ lck_mtx_lock(&nmp->nm_lock);
+ error = 0;
+ continue;
+ }
+ NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
+ req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
+ error = nfs_sigintr(nmp, req, req->r_thread, 0);
+ if (!error) {
+ req->r_flags |= R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ error = nfs_send(req, 0);
+ lck_mtx_lock(&req->r_mtx);
+ if (!error) {
+ if (req->r_flags & R_RESENDQ)
+ req->r_flags &= ~R_RESENDQ;
+ wakeup(req);
+ lck_mtx_unlock(&req->r_mtx);
+ nfs_request_rele(req);
+ lck_mtx_lock(&nmp->nm_lock);
+ continue;
+ }
+ }
+ req->r_error = error;
+ if (req->r_flags & R_RESENDQ)
+ req->r_flags &= ~R_RESENDQ;
+ wakeup(req);
+ dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
+ lck_mtx_unlock(&req->r_mtx);
+ if (dofinish)
+ nfs_asyncio_finish(req);
+ nfs_request_rele(req);
+ lck_mtx_lock(&nmp->nm_lock);
+ }
+ if (nfs_mount_check_dead_timeout(nmp)) {
+ nfs_mount_make_zombie(nmp);
+ break;
+ }
+
+ if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))
+ break;
+ /* check monitored nodes, if necessary/possible */
+ if (!LIST_EMPTY(&nmp->nm_monlist)) {
+ nmp->nm_state |= NFSSTA_MONITOR_SCAN;
+ LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) {
+ if (!(nmp->nm_sockflags & NMSOCK_READY) ||
+ (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING|NFSSTA_FORCE|NFSSTA_DEAD)))
+ break;
+ np->n_mflag |= NMMONSCANINPROG;
+ lck_mtx_unlock(&nmp->nm_lock);
+ error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED|NGA_MONITOR));
+ if (!error && ISSET(np->n_flag, NUPDATESIZE)) /* update quickly to avoid multiple events */
+ nfs_data_update_size(np, 0);
+ lck_mtx_lock(&nmp->nm_lock);
+ np->n_mflag &= ~NMMONSCANINPROG;
+ if (np->n_mflag & NMMONSCANWANT) {
+ np->n_mflag &= ~NMMONSCANWANT;
+ wakeup(&np->n_mflag);
+ }
+ if (error || !(nmp->nm_sockflags & NMSOCK_READY) ||
+ (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING|NFSSTA_FORCE|NFSSTA_DEAD)))
+ break;
+ }
+ nmp->nm_state &= ~NFSSTA_MONITOR_SCAN;
+ if (nmp->nm_state & NFSSTA_UNMOUNTING)
+ wakeup(&nmp->nm_state); /* let unmounting thread know scan is done */
+ }
+ if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING))) {
+ if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) ||
+ (nmp->nm_state & NFSSTA_RECOVER))
+ ts.tv_sec = 1;
+ else
+ ts.tv_sec = 5;
+ msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts);
+ }
+ }
+
+ /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
+ if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
+ (nmp->nm_state & NFSSTA_MOUNTED) && NMFLAG(nmp, CALLUMNT) &&
+ (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs3_umount_rpc(nmp, vfs_context_kernel(),
+ (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2);
+ lck_mtx_lock(&nmp->nm_lock);
+ }
+
+ if (nmp->nm_sockthd == thd)
+ nmp->nm_sockthd = NULL;
+ lck_mtx_unlock(&nmp->nm_lock);
+ wakeup(&nmp->nm_sockthd);
+ thread_terminate(thd);
+}
+
+/* start or wake a mount's socket thread */
+void
+nfs_mount_sock_thread_wake(struct nfsmount *nmp)
+{
+ if (nmp->nm_sockthd)
+ wakeup(&nmp->nm_sockthd);
+ else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS)
+ thread_deallocate(nmp->nm_sockthd);
+}
+
+/*
+ * Check if we should mark the mount dead because the
+ * unresponsive mount has reached the dead timeout.
+ * (must be called with nmp locked)
+ */
+int
+nfs_mount_check_dead_timeout(struct nfsmount *nmp)
+{
+ struct timeval now;
+
+ if (nmp->nm_state & NFSSTA_DEAD)
+ return 1;
+ if (nmp->nm_deadto_start == 0)
+ return 0;
+ nfs_is_squishy(nmp);
+ if (nmp->nm_curdeadtimeout <= 0)
+ return 0;
+ microuptime(&now);
+ if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout)
+ return 0;
+ return 1;
+}
+
+/*
+ * Call nfs_mount_zombie to remove most of the
+ * nfs state for the mount, and then ask to be forcibly unmounted.
+ *
+ * Assumes the nfs mount structure lock nm_lock is held.
+ */
+
+void
+nfs_mount_make_zombie(struct nfsmount *nmp)
+{
+ fsid_t fsid;
+
+ if (!nmp)
+ return;
+
+ if (nmp->nm_state & NFSSTA_DEAD)
+ return;
+
+ printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
+ (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
+ fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_mount_zombie(nmp, NFSSTA_DEAD);
+ vfs_event_signal(&fsid, VQ_DEAD, 0);
+ lck_mtx_lock(&nmp->nm_lock);
+}
+
+
+/*
+ * NFS callback channel socket state
+ */
+struct nfs_callback_socket
+{
+ TAILQ_ENTRY(nfs_callback_socket) ncbs_link;
+ socket_t ncbs_so; /* the socket */
+ struct sockaddr_storage ncbs_saddr; /* socket address */
+ struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */
+ time_t ncbs_stamp; /* last accessed at */
+ uint32_t ncbs_flags; /* see below */
+};
+#define NCBSOCK_UPCALL 0x0001
+#define NCBSOCK_UPCALLWANT 0x0002
+#define NCBSOCK_DEAD 0x0004
+
+/*
+ * NFS callback channel state
+ *
+ * One listening socket for accepting socket connections from servers and
+ * a list of connected sockets to handle callback requests on.
+ * Mounts registered with the callback channel are assigned IDs and
+ * put on a list so that the callback request handling code can match
+ * the requests up with mounts.
+ */
+socket_t nfs4_cb_so = NULL;
+socket_t nfs4_cb_so6 = NULL;
+in_port_t nfs4_cb_port = 0;
+in_port_t nfs4_cb_port6 = 0;
+uint32_t nfs4_cb_id = 0;
+uint32_t nfs4_cb_so_usecount = 0;
+TAILQ_HEAD(nfs4_cb_sock_list,nfs_callback_socket) nfs4_cb_socks;
+TAILQ_HEAD(nfs4_cb_mount_list,nfsmount) nfs4_cb_mounts;
+
+int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t);
+
+/*
+ * Set up the callback channel for the NFS mount.
+ *
+ * Initializes the callback channel socket state and
+ * assigns a callback ID to the mount.
+ */
+void
+nfs4_mount_callback_setup(struct nfsmount *nmp)
+{
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ socket_t so = NULL;
+ socket_t so6 = NULL;
+ struct timeval timeo;
+ int error, on = 1;
+ in_port_t port;
+
+ lck_mtx_lock(nfs_global_mutex);
+ if (nfs4_cb_id == 0) {
+ TAILQ_INIT(&nfs4_cb_mounts);
+ TAILQ_INIT(&nfs4_cb_socks);
+ nfs4_cb_id++;
+ }
+ nmp->nm_cbid = nfs4_cb_id++;
+ if (nmp->nm_cbid == 0)
+ nmp->nm_cbid = nfs4_cb_id++;
+ nfs4_cb_so_usecount++;
+ TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
+
+ if (nfs4_cb_so) {
+ lck_mtx_unlock(nfs_global_mutex);
+ return;
+ }
+
+ /* IPv4 */
+ error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d creating listening IPv4 socket\n", error);
+ goto fail;
+ }
+ so = nfs4_cb_so;
+
+ sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(INADDR_ANY);
+ sin.sin_port = htons(nfs_callback_port); /* try to use specified port */
+ error = sock_bind(so, (struct sockaddr *)&sin);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error);
+ goto fail;
+ }
+ error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d getting listening IPv4 socket port\n", error);
+ goto fail;
+ }
+ nfs4_cb_port = ntohs(sin.sin_port);
+
+ error = sock_listen(so, 32);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d on IPv4 listen\n", error);
+ goto fail;
+ }
+
+ /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
+ timeo.tv_usec = 0;
+ timeo.tv_sec = 60;
+ error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error);
+ error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error);
+ sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+ sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
+ sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
+ error = 0;
+
+ /* IPv6 */
+ error = sock_socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so6);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d creating listening IPv6 socket\n", error);
+ goto fail;
+ }
+ so6 = nfs4_cb_so6;
+
+ sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
+ sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
+ /* try to use specified port or same port as IPv4 */
+ port = nfs_callback_port ? nfs_callback_port : nfs4_cb_port;
+ipv6_bind_again:
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = in6addr_any;
+ sin6.sin6_port = htons(port);
+ error = sock_bind(so6, (struct sockaddr *)&sin6);
+ if (error) {
+ if (port != nfs_callback_port) {
+ /* if we simply tried to match the IPv4 port, then try any port */
+ port = 0;
+ goto ipv6_bind_again;
+ }
+ log(LOG_INFO, "nfs callback setup: error %d binding listening IPv6 socket\n", error);
+ goto fail;
+ }
+ error = sock_getsockname(so6, (struct sockaddr *)&sin6, sin6.sin6_len);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d getting listening IPv6 socket port\n", error);
+ goto fail;
+ }
+ nfs4_cb_port6 = ntohs(sin6.sin6_port);
+
+ error = sock_listen(so6, 32);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d on IPv6 listen\n", error);
+ goto fail;
+ }
+
+ /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
+ timeo.tv_usec = 0;
+ timeo.tv_sec = 60;
+ error = sock_setsockopt(so6, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error);
+ error = sock_setsockopt(so6, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error);
+ sock_setsockopt(so6, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+ sock_setsockopt(so6, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
+ sock_setsockopt(so6, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
+ error = 0;
+
+fail:
+ if (error) {
+ nfs4_cb_so = nfs4_cb_so6 = NULL;
+ lck_mtx_unlock(nfs_global_mutex);
+ if (so) {
+ sock_shutdown(so, SHUT_RDWR);
+ sock_close(so);
+ }
+ if (so6) {
+ sock_shutdown(so6, SHUT_RDWR);
+ sock_close(so6);
+ }
+ } else {
+ lck_mtx_unlock(nfs_global_mutex);
+ }
+}
+
+/*
+ * Shut down the callback channel for the NFS mount.
+ *
+ * Clears the mount's callback ID and releases the mounts
+ * reference on the callback socket. Last reference dropped
+ * will also shut down the callback socket(s).
+ */
+void
+nfs4_mount_callback_shutdown(struct nfsmount *nmp)
+{
+ struct nfs_callback_socket *ncbsp;
+ socket_t so, so6;
+ struct nfs4_cb_sock_list cb_socks;
+ struct timespec ts = {1,0};
+
+ lck_mtx_lock(nfs_global_mutex);
+ TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
+ /* wait for any callbacks in progress to complete */
+ while (nmp->nm_cbrefs)
+ msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts);
+ nmp->nm_cbid = 0;
+ if (--nfs4_cb_so_usecount) {
+ lck_mtx_unlock(nfs_global_mutex);
+ return;
+ }
+ so = nfs4_cb_so;
+ so6 = nfs4_cb_so6;
+ nfs4_cb_so = nfs4_cb_so6 = NULL;
+ TAILQ_INIT(&cb_socks);
+ TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
+ lck_mtx_unlock(nfs_global_mutex);
+ if (so) {
+ sock_shutdown(so, SHUT_RDWR);
+ sock_close(so);
+ }
+ if (so6) {
+ sock_shutdown(so6, SHUT_RDWR);
+ sock_close(so6);
+ }
+ while ((ncbsp = TAILQ_FIRST(&cb_socks))) {
+ TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link);
+ sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
+ sock_close(ncbsp->ncbs_so);
+ nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
+ FREE(ncbsp, M_TEMP);
+ }
+}
+
+/*
+ * Check periodically for stale/unused nfs callback sockets
+ */
+#define NFS4_CB_TIMER_PERIOD 30
+#define NFS4_CB_IDLE_MAX 300
+void
+nfs4_callback_timer(__unused void *param0, __unused void *param1)
+{
+ struct nfs_callback_socket *ncbsp, *nextncbsp;
+ struct timeval now;
+
+loop:
+ lck_mtx_lock(nfs_global_mutex);
+ if (TAILQ_EMPTY(&nfs4_cb_socks)) {
+ nfs4_callback_timer_on = 0;
+ lck_mtx_unlock(nfs_global_mutex);
+ return;
+ }
+ microuptime(&now);
+ TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) {
+ if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) &&
+ (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX)))
+ continue;
+ TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
+ lck_mtx_unlock(nfs_global_mutex);
+ sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
+ sock_close(ncbsp->ncbs_so);
+ nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
+ FREE(ncbsp, M_TEMP);
+ goto loop;
+ }
+ nfs4_callback_timer_on = 1;
+ nfs_interval_timer_start(nfs4_callback_timer_call,
+ NFS4_CB_TIMER_PERIOD * 1000);
+ lck_mtx_unlock(nfs_global_mutex);
+}
+
+/*
+ * Accept a new callback socket.
+ */
+void
+nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
+{
+ socket_t newso = NULL;
+ struct nfs_callback_socket *ncbsp;
+ struct nfsmount *nmp;
+ struct timeval timeo, now;
+ int error, on = 1, ip;
+
+ if (so == nfs4_cb_so)
+ ip = 4;
+ else if (so == nfs4_cb_so6)
+ ip = 6;
+ else
+ return;
+
+ /* allocate/initialize a new nfs_callback_socket */
+ MALLOC(ncbsp, struct nfs_callback_socket *, sizeof(struct nfs_callback_socket), M_TEMP, M_WAITOK);
+ if (!ncbsp) {
+ log(LOG_ERR, "nfs callback accept: no memory for new socket\n");
+ return;
+ }
+ bzero(ncbsp, sizeof(*ncbsp));
+ ncbsp->ncbs_saddr.ss_len = (ip == 4) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
+ nfs_rpc_record_state_init(&ncbsp->ncbs_rrs);
+
+ /* accept a new socket */
+ error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_saddr,
+ ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT,
+ nfs4_cb_rcv, ncbsp, &newso);
+ if (error) {
+ log(LOG_INFO, "nfs callback accept: error %d accepting IPv%d socket\n", error, ip);
+ FREE(ncbsp, M_TEMP);
+ return;
+ }
+
+ /* set up the new socket */
+ /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
+ timeo.tv_usec = 0;
+ timeo.tv_sec = 60;
+ error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error, ip);
+ error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error, ip);
+ sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+ sock_setsockopt(newso, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
+ sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
+ sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
+
+ ncbsp->ncbs_so = newso;
+ microuptime(&now);
+ ncbsp->ncbs_stamp = now.tv_sec;
+
+ lck_mtx_lock(nfs_global_mutex);
+
+ /* add it to the list */
+ TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
+
+ /* verify it's from a host we have mounted */
+ TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
+ /* check if socket's source address matches this mount's server address */
+ if (!nmp->nm_saddr)
+ continue;
+ if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0)
+ break;
+ }
+ if (!nmp) /* we don't want this socket, mark it dead */
+ ncbsp->ncbs_flags |= NCBSOCK_DEAD;
+
+ /* make sure the callback socket cleanup timer is running */
+ /* (shorten the timer if we've got a socket we don't want) */
+ if (!nfs4_callback_timer_on) {
+ nfs4_callback_timer_on = 1;
+ nfs_interval_timer_start(nfs4_callback_timer_call,
+ !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000));
+ } else if (!nmp && (nfs4_callback_timer_on < 2)) {
+ nfs4_callback_timer_on = 2;
+ thread_call_cancel(nfs4_callback_timer_call);
+ nfs_interval_timer_start(nfs4_callback_timer_call, 500);
+ }
+
+ lck_mtx_unlock(nfs_global_mutex);
+}
+
+/*
+ * Receive mbufs from callback sockets into RPC records and process each record.
+ * Detect connection has been closed and shut down.
+ */
+void
+nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
+{
+ struct nfs_callback_socket *ncbsp = arg;
+ struct timespec ts = {1,0};
+ struct timeval now;
+ mbuf_t m;
+ int error = 0, recv = 1;
+
+ lck_mtx_lock(nfs_global_mutex);
+ while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
+ /* wait if upcall is already in progress */
+ ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
+ msleep(ncbsp, nfs_global_mutex, PSOCK, "cbupcall", &ts);
+ }
+ ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
+ lck_mtx_unlock(nfs_global_mutex);
+
+ /* loop while we make error-free progress */
+ while (!error && recv) {
+ error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, MSG_DONTWAIT, &recv, &m);
+ if (m) /* handle the request */
+ error = nfs4_cb_handler(ncbsp, m);
+ }
+
+ /* note: no error and no data indicates server closed its end */
+ if ((error != EWOULDBLOCK) && (error || !recv)) {
+ /*
+ * Socket is either being closed or should be.
+ * We can't close the socket in the context of the upcall.
+ * So we mark it as dead and leave it for the cleanup timer to reap.
+ */
+ ncbsp->ncbs_stamp = 0;
+ ncbsp->ncbs_flags |= NCBSOCK_DEAD;
+ } else {
+ microuptime(&now);
+ ncbsp->ncbs_stamp = now.tv_sec;
+ }
+
+ lck_mtx_lock(nfs_global_mutex);
+ ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
+ lck_mtx_unlock(nfs_global_mutex);
+ wakeup(ncbsp);
+}
+
+/*
+ * Handle an NFS callback channel request.
+ */
+int
+nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
+{
+ socket_t so = ncbsp->ncbs_so;
+ struct nfsm_chain nmreq, nmrep;
+ mbuf_t mhead = NULL, mrest = NULL, m;
+ struct msghdr msg;
+ struct nfsmount *nmp;
+ fhandle_t fh;
+ nfsnode_t np;
+ nfs_stateid stateid;
+ uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
+ uint32_t val, xid, procnum, taglen, cbid, numops, op, status;
+ uint32_t auth_type, auth_len;
+ uint32_t numres, *pnumres;
+ int error = 0, replen, len;
+ size_t sentlen = 0;
+
+ xid = numops = op = status = procnum = taglen = cbid = 0;
+
+ nfsm_chain_dissect_init(error, &nmreq, mreq);
+ nfsm_chain_get_32(error, &nmreq, xid); // RPC XID
+ nfsm_chain_get_32(error, &nmreq, val); // RPC Call
+ nfsm_assert(error, (val == RPC_CALL), EBADRPC);
+ nfsm_chain_get_32(error, &nmreq, val); // RPC Version
+ nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH);
+ nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number
+ nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL);
+ nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number
+ nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH);
+ nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number
+ nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL);
+
+ /* Handle authentication */
+ /* XXX just ignore auth for now - handling kerberos may be tricky */
+ nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor
+ nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length
+ nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
+ if (!error && (auth_len > 0))
+ nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
+ nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
+ nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length
+ nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
+ if (!error && (auth_len > 0))
+ nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
+ if (error) {
+ status = error;
+ error = 0;
+ goto nfsmout;
+ }
+
+ switch (procnum) {
+ case NFSPROC4_CB_NULL:
+ status = NFSERR_RETVOID;
+ break;
+ case NFSPROC4_CB_COMPOUND:
+ /* tag, minorversion, cb ident, numops, op array */
+ nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */
+ nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC);
+
+ /* start building the body of the response */
+ nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5*NFSX_UNSIGNED);
+ nfsm_chain_init(&nmrep, mrest);
+
+ /* copy tag from request to response */
+ nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */
+ for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) {
+ nfsm_chain_get_32(error, &nmreq, val);
+ nfsm_chain_add_32(error, &nmrep, val);
+ }
+
+ /* insert number of results placeholder */
+ numres = 0;
+ nfsm_chain_add_32(error, &nmrep, numres);
+ pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED);
+
+ nfsm_chain_get_32(error, &nmreq, val); /* minorversion */
+ nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH);
+ nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */
+ nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */
+ if (error) {
+ if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH))
+ status = error;
+ else if ((error == ENOBUFS) || (error == ENOMEM))
+ status = NFSERR_RESOURCE;
+ else
+ status = NFSERR_SERVERFAULT;
+ error = 0;
+ nfsm_chain_null(&nmrep);
+ goto nfsmout;
+ }
+ /* match the callback ID to a registered mount */
+ lck_mtx_lock(nfs_global_mutex);
+ TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
+ if (nmp->nm_cbid != cbid)
+ continue;
+ /* verify socket's source address matches this mount's server address */
+ if (!nmp->nm_saddr)
+ continue;
+ if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0)
+ break;
+ }
+ /* mark the NFS mount as busy */
+ if (nmp)
+ nmp->nm_cbrefs++;
+ lck_mtx_unlock(nfs_global_mutex);
+ if (!nmp) {
+ /* if no mount match, just drop socket. */
+ error = EPERM;
+ nfsm_chain_null(&nmrep);
+ goto out;
+ }
+
+ /* process ops, adding results to mrest */
+ while (numops > 0) {
+ numops--;
+ nfsm_chain_get_32(error, &nmreq, op);
+ if (error)
+ break;
+ switch (op) {
+ case NFS_OP_CB_GETATTR:
+ // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
+ np = NULL;
+ nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+ bmlen = NFS_ATTR_BITMAP_LEN;
+ nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
+ if (error) {
+ status = error;
+ error = 0;
+ numops = 0; /* don't process any more ops */
+ } else {
+ /* find the node for the file handle */
+ error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
+ if (error || !np) {
+ status = NFSERR_BADHANDLE;
+ error = 0;
+ np = NULL;
+ numops = 0; /* don't process any more ops */
+ }
+ }
+ nfsm_chain_add_32(error, &nmrep, op);
+ nfsm_chain_add_32(error, &nmrep, status);
+ if (!error && (status == EBADRPC))
+ error = status;
+ if (np) {
+ /* only allow returning size, change, and mtime attrs */
+ NFS_CLEAR_ATTRIBUTES(&rbitmap);
+ attrbytes = 0;
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
+ NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE);
+ attrbytes += 2 * NFSX_UNSIGNED;
+ }
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
+ NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE);
+ attrbytes += 2 * NFSX_UNSIGNED;
+ }
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
+ NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY);
+ attrbytes += 3 * NFSX_UNSIGNED;
+ }
+ nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN);
+ nfsm_chain_add_32(error, &nmrep, attrbytes);
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE))
+ nfsm_chain_add_64(error, &nmrep,
+ np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0));
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE))
+ nfsm_chain_add_64(error, &nmrep, np->n_size);
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
+ nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]);
+ nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]);
+ }
+ nfs_node_unlock(np);
+ vnode_put(NFSTOV(np));
+ np = NULL;
+ }
+ /*
+ * If we hit an error building the reply, we can't easily back up.
+ * So we'll just update the status and hope the server ignores the
+ * extra garbage.
+ */
+ break;
+ case NFS_OP_CB_RECALL:
+ // (STATEID, TRUNCATE, FH) -> (STATUS)
+ np = NULL;
+ nfsm_chain_get_stateid(error, &nmreq, &stateid);
+ nfsm_chain_get_32(error, &nmreq, truncate);
+ nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+ if (error) {
+ status = error;
+ error = 0;
+ numops = 0; /* don't process any more ops */
+ } else {
+ /* find the node for the file handle */
+ error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
+ if (error || !np) {
+ status = NFSERR_BADHANDLE;
+ error = 0;
+ np = NULL;
+ numops = 0; /* don't process any more ops */
+ } else if (!(np->n_openflags & N_DELEG_MASK) ||
+ bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) {
+ /* delegation stateid state doesn't match */
+ status = NFSERR_BAD_STATEID;
+ numops = 0; /* don't process any more ops */
+ }
+ if (!status) /* add node to recall queue, and wake socket thread */
+ nfs4_delegation_return_enqueue(np);
+ if (np) {
+ nfs_node_unlock(np);
+ vnode_put(NFSTOV(np));
+ }
+ }
+ nfsm_chain_add_32(error, &nmrep, op);
+ nfsm_chain_add_32(error, &nmrep, status);
+ if (!error && (status == EBADRPC))
+ error = status;
+ break;
+ case NFS_OP_CB_ILLEGAL:
+ default:
+ nfsm_chain_add_32(error, &nmrep, NFS_OP_CB_ILLEGAL);
+ status = NFSERR_OP_ILLEGAL;
+ nfsm_chain_add_32(error, &nmrep, status);
+ numops = 0; /* don't process any more ops */
+ break;
+ }
+ numres++;
+ }
+
+ if (!status && error) {
+ if (error == EBADRPC)
+ status = error;
+ else if ((error == ENOBUFS) || (error == ENOMEM))
+ status = NFSERR_RESOURCE;
+ else
+ status = NFSERR_SERVERFAULT;
+ error = 0;
+ }
+
+ /* Now, set the numres field */
+ *pnumres = txdr_unsigned(numres);
+ nfsm_chain_build_done(error, &nmrep);
+ nfsm_chain_null(&nmrep);
+
+ /* drop the callback reference on the mount */
+ lck_mtx_lock(nfs_global_mutex);
+ nmp->nm_cbrefs--;
+ if (!nmp->nm_cbid)
+ wakeup(&nmp->nm_cbrefs);
+ lck_mtx_unlock(nfs_global_mutex);
+ break;
+ }
+
+nfsmout:
+ if (status == EBADRPC)
+ OSAddAtomic64(1, &nfsstats.rpcinvalid);
+
+ /* build reply header */
+ error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead);
+ nfsm_chain_init(&nmrep, mhead);
+ nfsm_chain_add_32(error, &nmrep, 0); /* insert space for an RPC record mark */
+ nfsm_chain_add_32(error, &nmrep, xid);
+ nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
+ if ((status == ERPCMISMATCH) || (status & NFSERR_AUTHERR)) {
+ nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
+ if (status & NFSERR_AUTHERR) {
+ nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
+ nfsm_chain_add_32(error, &nmrep, (status & ~NFSERR_AUTHERR));
+ } else {
+ nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
+ nfsm_chain_add_32(error, &nmrep, RPC_VER2);
+ nfsm_chain_add_32(error, &nmrep, RPC_VER2);
+ }
+ } else {
+ /* reply status */
+ nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
+ /* XXX RPCAUTH_NULL verifier */
+ nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
+ nfsm_chain_add_32(error, &nmrep, 0);
+ /* accepted status */
+ switch (status) {
+ case EPROGUNAVAIL:
+ nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
+ break;
+ case EPROGMISMATCH:
+ nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
+ nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
+ nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
+ break;
+ case EPROCUNAVAIL:
+ nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
+ break;
+ case EBADRPC:
+ nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
+ break;
+ default:
+ nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
+ if (status != NFSERR_RETVOID)
+ nfsm_chain_add_32(error, &nmrep, status);
+ break;
+ }
+ }
+ nfsm_chain_build_done(error, &nmrep);
+ if (error) {
+ nfsm_chain_null(&nmrep);
+ goto out;
+ }
+ error = mbuf_setnext(nmrep.nmc_mcur, mrest);
+ if (error) {
+ printf("nfs cb: mbuf_setnext failed %d\n", error);
+ goto out;
+ }
+ mrest = NULL;
+ /* Calculate the size of the reply */
+ replen = 0;
+ for (m = nmrep.nmc_mhead; m; m = mbuf_next(m))
+ replen += mbuf_len(m);
+ mbuf_pkthdr_setlen(mhead, replen);
+ error = mbuf_pkthdr_setrcvif(mhead, NULL);
+ nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000);
+ nfsm_chain_null(&nmrep);
+
+ /* send the reply */
+ bzero(&msg, sizeof(msg));
+ error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen);
+ mhead = NULL;
+ if (!error && ((int)sentlen != replen))
+ error = EWOULDBLOCK;
+ if (error == EWOULDBLOCK) /* inability to send response is considered fatal */
+ error = ETIMEDOUT;
+out:
+ if (error)
+ nfsm_chain_cleanup(&nmrep);
+ if (mhead)
+ mbuf_freem(mhead);
+ if (mrest)
+ mbuf_freem(mrest);
+ if (mreq)
+ mbuf_freem(mreq);
+ return (error);
+}
+
+
+/*
+ * Initialize an nfs_rpc_record_state structure.
+ */
+void
+nfs_rpc_record_state_init(struct nfs_rpc_record_state *nrrsp)
+{
+ bzero(nrrsp, sizeof(*nrrsp));
+ nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
+}
+
+/*
+ * Clean up an nfs_rpc_record_state structure.
+ */
+void
+nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *nrrsp)
+{
+ if (nrrsp->nrrs_m) {
+ mbuf_freem(nrrsp->nrrs_m);
+ nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
+ }
+}
+
+/*
+ * Read the next (marked) RPC record from the socket.
+ *
+ * *recvp returns if any data was received.
+ * *mp returns the next complete RPC record
+ */
+int
+nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, int *recvp, mbuf_t *mp)
+{
+ struct iovec aio;
+ struct msghdr msg;
+ size_t rcvlen;
+ int error = 0;
+ mbuf_t m;
+
+ *recvp = 0;
+ *mp = NULL;
+
+ /* read the TCP RPC record marker */
+ while (!error && nrrsp->nrrs_markerleft) {
+ aio.iov_base = ((char*)&nrrsp->nrrs_fragleft +
+ sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft);
+ aio.iov_len = nrrsp->nrrs_markerleft;
+ bzero(&msg, sizeof(msg));
+ msg.msg_iov = &aio;
+ msg.msg_iovlen = 1;
+ error = sock_receive(so, &msg, flags, &rcvlen);
+ if (error || !rcvlen)
+ break;
+ *recvp = 1;
+ nrrsp->nrrs_markerleft -= rcvlen;
+ if (nrrsp->nrrs_markerleft)
+ continue;
+ /* record marker complete */
+ nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft);
+ if (nrrsp->nrrs_fragleft & 0x80000000) {
+ nrrsp->nrrs_lastfrag = 1;
+ nrrsp->nrrs_fragleft &= ~0x80000000;
+ }
+ nrrsp->nrrs_reclen += nrrsp->nrrs_fragleft;
+ if (nrrsp->nrrs_reclen > NFS_MAXPACKET) {
+ /* This is SERIOUS! We are out of sync with the sender. */
+ log(LOG_ERR, "impossible RPC record length (%d) on callback", nrrsp->nrrs_reclen);
+ error = EFBIG;
+ }
+ }
+
+ /* read the TCP RPC record fragment */
+ while (!error && !nrrsp->nrrs_markerleft && nrrsp->nrrs_fragleft) {
+ m = NULL;
+ rcvlen = nrrsp->nrrs_fragleft;
+ error = sock_receivembuf(so, NULL, &m, flags, &rcvlen);
+ if (error || !rcvlen || !m)
+ break;
+ *recvp = 1;
+ /* append mbufs to list */
+ nrrsp->nrrs_fragleft -= rcvlen;
+ if (!nrrsp->nrrs_m) {
+ nrrsp->nrrs_m = m;
+ } else {
+ error = mbuf_setnext(nrrsp->nrrs_mlast, m);
+ if (error) {
+ printf("nfs tcp rcv: mbuf_setnext failed %d\n", error);
+ mbuf_freem(m);
+ break;
+ }
+ }
+ while (mbuf_next(m))
+ m = mbuf_next(m);
+ nrrsp->nrrs_mlast = m;
+ }
+
+ /* done reading fragment? */
+ if (!error && !nrrsp->nrrs_markerleft && !nrrsp->nrrs_fragleft) {
+ /* reset socket fragment parsing state */
+ nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
+ if (nrrsp->nrrs_lastfrag) {
+ /* RPC record complete */
+ *mp = nrrsp->nrrs_m;
+ /* reset socket record parsing state */
+ nrrsp->nrrs_reclen = 0;
+ nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
+ nrrsp->nrrs_lastfrag = 0;
+ }
+ }
+
+ return (error);
+}
+
+
+
+/*
+ * The NFS client send routine.
+ *
+ * Send the given NFS request out the mount's socket.
+ * Holds nfs_sndlock() for the duration of this call.
+ *
+ * - check for request termination (sigintr)
+ * - wait for reconnect, if necessary
+ * - UDP: check the congestion window
+ * - make a copy of the request to send
+ * - UDP: update the congestion window
+ * - send the request
+ *
+ * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
+ * rexmit count is also updated if this isn't the first send.
+ *
+ * If the send is not successful, make sure R_MUSTRESEND is set.
+ * If this wasn't the first transmit, set R_RESENDERR.
+ * Also, undo any UDP congestion window changes made.
+ *
+ * If the error appears to indicate that the socket should
+ * be reconnected, mark the socket for reconnection.
+ *
+ * Only return errors when the request should be aborted.
+ */
+int
+nfs_send(struct nfsreq *req, int wait)
+{
+ struct nfsmount *nmp;
+ struct nfs_socket *nso;
+ int error, error2, sotype, rexmit, slpflag = 0, needrecon;
+ struct msghdr msg;
+ struct sockaddr *sendnam;
+ mbuf_t mreqcopy;
+ size_t sentlen = 0;
+ struct timespec ts = { 2, 0 };
+
+again:
+ error = nfs_sndlock(req);
+ if (error) {
+ lck_mtx_lock(&req->r_mtx);
+ req->r_error = error;
+ req->r_flags &= ~R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ return (error);
+ }
+
+ error = nfs_sigintr(req->r_nmp, req, NULL, 0);
+ if (error) {
+ nfs_sndunlock(req);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_error = error;
+ req->r_flags &= ~R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ return (error);
+ }
+ nmp = req->r_nmp;
+ sotype = nmp->nm_sotype;
+
+ /*
+ * If it's a setup RPC but we're not in SETUP... must need reconnect.
+ * If it's a recovery RPC but the socket's not ready... must need reconnect.
+ */
+ if (((req->r_flags & R_SETUP) && !(nmp->nm_sockflags & NMSOCK_SETUP)) ||
+ ((req->r_flags & R_RECOVER) && !(nmp->nm_sockflags & NMSOCK_READY))) {
+ error = ETIMEDOUT;
+ nfs_sndunlock(req);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_error = error;
+ req->r_flags &= ~R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ return (error);
+ }
+
+ /* If the socket needs reconnection, do that now. */
+ /* wait until socket is ready - unless this request is part of setup */
+ lck_mtx_lock(&nmp->nm_lock);
+ if (!(nmp->nm_sockflags & NMSOCK_READY) &&
+ !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) {
+ if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR))
+ slpflag |= PCATCH;
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_sndunlock(req);
+ if (!wait) {
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_SENDING;
+ req->r_flags |= R_MUSTRESEND;
+ req->r_rtt = 0;
+ lck_mtx_unlock(&req->r_mtx);
+ return (0);
+ }
+ NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req->r_xid);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_MUSTRESEND;
+ req->r_rtt = 0;
+ lck_mtx_unlock(&req->r_mtx);
+ lck_mtx_lock(&nmp->nm_lock);
+ while (!(nmp->nm_sockflags & NMSOCK_READY)) {
+ /* don't bother waiting if the socket thread won't be reconnecting it */
+ if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) {
+ error = EIO;
+ break;
+ }
+ if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (nmp->nm_reconnect_start > 0)) {
+ struct timeval now;
+ microuptime(&now);
+ if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) {
+ /* soft mount in reconnect for a while... terminate ASAP */
+ OSAddAtomic64(1, &nfsstats.rpctimeouts);
+ req->r_flags |= R_SOFTTERM;
+ req->r_error = error = ETIMEDOUT;
+ break;
+ }
+ }
+ /* make sure socket thread is running, then wait */
+ nfs_mount_sock_thread_wake(nmp);
+ if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1)))
+ break;
+ msleep(req, &nmp->nm_lock, slpflag|PSOCK, "nfsconnectwait", &ts);
+ slpflag = 0;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (error) {
+ lck_mtx_lock(&req->r_mtx);
+ req->r_error = error;
+ req->r_flags &= ~R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ return (error);
+ }
+ goto again;
+ }
+ nso = nmp->nm_nso;
+ /* note that we're using the mount's socket to do the send */
+ nmp->nm_state |= NFSSTA_SENDING; /* will be cleared by nfs_sndunlock() */
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (!nso) {
+ nfs_sndunlock(req);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_SENDING;
+ req->r_flags |= R_MUSTRESEND;
+ req->r_rtt = 0;
+ lck_mtx_unlock(&req->r_mtx);
+ return (0);
+ }
+
+ lck_mtx_lock(&req->r_mtx);
+ rexmit = (req->r_flags & R_SENT);
+
+ if (sotype == SOCK_DGRAM) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if (!(req->r_flags & R_CWND) && (nmp->nm_sent >= nmp->nm_cwnd)) {
+ /* if we can't send this out yet, wait on the cwnd queue */
+ slpflag = (NMFLAG(nmp, INTR) && req->r_thread) ? PCATCH : 0;
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_sndunlock(req);
+ req->r_flags &= ~R_SENDING;
+ req->r_flags |= R_MUSTRESEND;
+ lck_mtx_unlock(&req->r_mtx);
+ if (!wait) {
+ req->r_rtt = 0;
+ return (0);
+ }
+ lck_mtx_lock(&nmp->nm_lock);
+ while (nmp->nm_sent >= nmp->nm_cwnd) {
+ if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1)))
+ break;
+ TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain);
+ msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts);
+ slpflag = 0;
+ if ((req->r_cchain.tqe_next != NFSREQNOLIST)) {
+ TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
+ req->r_cchain.tqe_next = NFSREQNOLIST;
+ }
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ goto again;
+ }
+ /*
+ * We update these *before* the send to avoid racing
+ * against others who may be looking to send requests.
+ */
+ if (!rexmit) {
+ /* first transmit */
+ req->r_flags |= R_CWND;
+ nmp->nm_sent += NFS_CWNDSCALE;
+ } else {
+ /*
+ * When retransmitting, turn timing off
+ * and divide congestion window by 2.
+ */
+ req->r_flags &= ~R_TIMING;
+ nmp->nm_cwnd >>= 1;
+ if (nmp->nm_cwnd < NFS_CWNDSCALE)
+ nmp->nm_cwnd = NFS_CWNDSCALE;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+
+ req->r_flags &= ~R_MUSTRESEND;
+ lck_mtx_unlock(&req->r_mtx);
+
+ error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL,
+ wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy);
+ if (error) {
+ if (wait)
+ log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error);
+ nfs_sndunlock(req);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_SENDING;
+ req->r_flags |= R_MUSTRESEND;
+ req->r_rtt = 0;
+ lck_mtx_unlock(&req->r_mtx);
+ return (0);
+ }
+
+ bzero(&msg, sizeof(msg));
+ if ((sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so) && ((sendnam = nmp->nm_saddr))) {
+ msg.msg_name = (caddr_t)sendnam;
+ msg.msg_namelen = sendnam->sa_len;
+ }
+ error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen);
+ if (error || (sentlen != req->r_mreqlen)) {
+ NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
+ req->r_xid, (int)sentlen, (int)req->r_mreqlen, error);
+ }
+
+ if (!error && (sentlen != req->r_mreqlen))
+ error = EWOULDBLOCK;
+ needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen));
+
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_SENDING;
+ req->r_rtt = 0;
+ if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT))
+ req->r_rexmit = NFS_MAXREXMIT;
+
+ if (!error) {
+ /* SUCCESS */
+ req->r_flags &= ~R_RESENDERR;
+ if (rexmit)
+ OSAddAtomic64(1, &nfsstats.rpcretries);
+ req->r_flags |= R_SENT;
+ if (req->r_flags & R_WAITSENT) {
+ req->r_flags &= ~R_WAITSENT;
+ wakeup(req);
+ }
+ nfs_sndunlock(req);
+ lck_mtx_unlock(&req->r_mtx);
+ return (0);
+ }
+
+ /* send failed */
+ req->r_flags |= R_MUSTRESEND;
+ if (rexmit)
+ req->r_flags |= R_RESENDERR;
+ if ((error == EINTR) || (error == ERESTART))
+ req->r_error = error;
+ lck_mtx_unlock(&req->r_mtx);
+
+ if (sotype == SOCK_DGRAM) {
+ /*
+ * Note: even though a first send may fail, we consider
+ * the request sent for congestion window purposes.
+ * So we don't need to undo any of the changes made above.
+ */
+ /*
+ * Socket errors ignored for connectionless sockets??
+ * For now, ignore them all
+ */
+ if ((error != EINTR) && (error != ERESTART) &&
+ (error != EWOULDBLOCK) && (error != EIO) && (nso == nmp->nm_nso)) {
+ int clearerror = 0, optlen = sizeof(clearerror);
+ sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
+#ifdef NFS_SOCKET_DEBUGGING
+ if (clearerror)
+ NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
+ error, clearerror);
+#endif
+ }
+ }
+
+ /* check if it appears we should reconnect the socket */
+ switch (error) {
+ case EWOULDBLOCK:
+ /* if send timed out, reconnect if on TCP */
+ if (sotype != SOCK_STREAM)
+ break;
+ case EPIPE:
+ case EADDRNOTAVAIL:
+ case ENETDOWN:
+ case ENETUNREACH:
+ case ENETRESET:
+ case ECONNABORTED:
+ case ECONNRESET:
+ case ENOTCONN:
+ case ESHUTDOWN:
+ case ECONNREFUSED:
+ case EHOSTDOWN:
+ case EHOSTUNREACH:
+ needrecon = 1;
+ break;
+ }
+ if (needrecon && (nso == nmp->nm_nso)) { /* mark socket as needing reconnect */
+ NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req->r_xid, error);
+ nfs_need_reconnect(nmp);
+ }
+
+ nfs_sndunlock(req);
+
+ if (nfs_is_dead(error, nmp))
+ error = EIO;
+
+ /*
+ * Don't log some errors:
+ * EPIPE errors may be common with servers that drop idle connections.
+ * EADDRNOTAVAIL may occur on network transitions.
+ * ENOTCONN may occur under some network conditions.
+ */
+ if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN))
+ error = 0;
+ if (error && (error != EINTR) && (error != ERESTART))
+ log(LOG_INFO, "nfs send error %d for server %s\n", error,
+ !req->r_nmp ? "<unmounted>" :
+ vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname);
+
+ /* prefer request termination error over other errors */
+ error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
+ if (error2)
+ error = error2;
+
+ /* only allow the following errors to be returned */
+ if ((error != EINTR) && (error != ERESTART) && (error != EIO) &&
+ (error != ENXIO) && (error != ETIMEDOUT))
+ error = 0;
+ return (error);
+}
+
+/*
+ * NFS client socket upcalls
+ *
+ * Pull RPC replies out of an NFS mount's socket and match them
+ * up with the pending request.
+ *
+ * The datagram code is simple because we always get whole
+ * messages out of the socket.
+ *
+ * The stream code is more involved because we have to parse
+ * the RPC records out of the stream.
+ */
+
+/* NFS client UDP socket upcall */
+void
+nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag)
+{
+ struct nfsmount *nmp = arg;
+ struct nfs_socket *nso = nmp->nm_nso;
+ size_t rcvlen;
+ mbuf_t m;
+ int error = 0;
+
+ if (nmp->nm_sockflags & NMSOCK_CONNECTING)
+ return;
+
+ do {
+ /* make sure we're on the current socket */
+ if (!nso || (nso->nso_so != so))
+ return;
+
+ m = NULL;
+ rcvlen = 1000000;
+ error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
+ if (m)
+ nfs_request_match_reply(nmp, m);
+ } while (m && !error);
+
+ if (error && (error != EWOULDBLOCK)) {
+ /* problems with the socket... mark for reconnection */
+ NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error);
+ nfs_need_reconnect(nmp);
+ }
+}
+
+/* NFS client TCP socket upcall */
+void
+nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag)
+{
+ struct nfsmount *nmp = arg;
+ struct nfs_socket *nso = nmp->nm_nso;
+ struct nfs_rpc_record_state nrrs;
+ mbuf_t m;
+ int error = 0;
+ int recv = 1;
+ int wup = 0;
+
+ if (nmp->nm_sockflags & NMSOCK_CONNECTING)
+ return;
+
+ /* make sure we're on the current socket */
+ lck_mtx_lock(&nmp->nm_lock);
+ nso = nmp->nm_nso;
+ if (!nso || (nso->nso_so != so) || (nmp->nm_sockflags & (NMSOCK_DISCONNECTING))) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+
+ /* make sure this upcall should be trying to do work */
+ lck_mtx_lock(&nso->nso_lock);
+ if (nso->nso_flags & (NSO_UPCALL|NSO_DISCONNECTING|NSO_DEAD)) {
+ lck_mtx_unlock(&nso->nso_lock);
+ return;
+ }
+ nso->nso_flags |= NSO_UPCALL;
+ nrrs = nso->nso_rrs;
+ lck_mtx_unlock(&nso->nso_lock);
+
+ /* loop while we make error-free progress */
+ while (!error && recv) {
+ error = nfs_rpc_record_read(so, &nrrs, MSG_DONTWAIT, &recv, &m);
+ if (m) /* match completed response with request */
+ nfs_request_match_reply(nmp, m);
+ }
+
+ /* Update the sockets's rpc parsing state */
+ lck_mtx_lock(&nso->nso_lock);
+ nso->nso_rrs = nrrs;
+ if (nso->nso_flags & NSO_DISCONNECTING)
+ wup = 1;
+ nso->nso_flags &= ~NSO_UPCALL;
+ lck_mtx_unlock(&nso->nso_lock);
+ if (wup)
+ wakeup(&nso->nso_flags);
+
+#ifdef NFS_SOCKET_DEBUGGING
+ if (!recv && (error != EWOULDBLOCK))
+ NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error);
+#endif
+ /* note: no error and no data indicates server closed its end */
+ if ((error != EWOULDBLOCK) && (error || !recv)) {
+ /* problems with the socket... mark for reconnection */
+ NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error);
+ nfs_need_reconnect(nmp);
+ }
+}
+
+/*
+ * "poke" a socket to try to provoke any pending errors
+ */
+void
+nfs_sock_poke(struct nfsmount *nmp)
+{
+ struct iovec aio;
+ struct msghdr msg;
+ size_t len;
+ int error = 0;
+ int dummy;
+
+ lck_mtx_lock(&nmp->nm_lock);
+ if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) ||
+ !(nmp->nm_sockflags & NMSOCK_READY) || !nmp->nm_nso || !nmp->nm_nso->nso_so) {
+ /* Nothing to poke */
+ nmp->nm_sockflags &= ~NMSOCK_POKE;
+ wakeup(&nmp->nm_sockflags);
+ lck_mtx_unlock(&nmp->nm_lock);
+ return;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ aio.iov_base = &dummy;
+ aio.iov_len = 0;
+ len = 0;
+ bzero(&msg, sizeof(msg));
+ msg.msg_iov = &aio;
+ msg.msg_iovlen = 1;
+ error = sock_send(nmp->nm_nso->nso_so, &msg, MSG_DONTWAIT, &len);
+ NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error);
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_sockflags &= ~NMSOCK_POKE;
+ wakeup(&nmp->nm_sockflags);
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_is_dead(error, nmp);
+}
+
+/*
+ * Match an RPC reply with the corresponding request
+ */
+void
+nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
+{
+ struct nfsreq *req;
+ struct nfsm_chain nmrep;
+ u_int32_t reply = 0, rxid = 0;
+ int error = 0, asyncioq, t1;
+
+ /* Get the xid and check that it is an rpc reply */
+ nfsm_chain_dissect_init(error, &nmrep, mrep);
+ nfsm_chain_get_32(error, &nmrep, rxid);
+ nfsm_chain_get_32(error, &nmrep, reply);
+ if (error || (reply != RPC_REPLY)) {
+ OSAddAtomic64(1, &nfsstats.rpcinvalid);
+ mbuf_freem(mrep);
+ return;
+ }
+
+ /*
+ * Loop through the request list to match up the reply
+ * Iff no match, just drop it.
+ */
+ lck_mtx_lock(nfs_request_mutex);
+ TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
+ if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid)))
+ continue;
+ /* looks like we have it, grab lock and double check */
+ lck_mtx_lock(&req->r_mtx);
+ if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
+ lck_mtx_unlock(&req->r_mtx);
+ continue;
+ }
+ /* Found it.. */
+ req->r_nmrep = nmrep;
+ lck_mtx_lock(&nmp->nm_lock);
+ if (nmp->nm_sotype == SOCK_DGRAM) {
+ /*
+ * Update congestion window.
+ * Do the additive increase of one rpc/rtt.
+ */
+ FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
+ if (nmp->nm_cwnd <= nmp->nm_sent) {
+ nmp->nm_cwnd +=
+ ((NFS_CWNDSCALE * NFS_CWNDSCALE) +
+ (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
+ if (nmp->nm_cwnd > NFS_MAXCWND)
+ nmp->nm_cwnd = NFS_MAXCWND;
+ }
+ if (req->r_flags & R_CWND) {
+ nmp->nm_sent -= NFS_CWNDSCALE;
+ req->r_flags &= ~R_CWND;
+ }
+ if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
+ /* congestion window is open, poke the cwnd queue */
+ struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
+ TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
+ req2->r_cchain.tqe_next = NFSREQNOLIST;
+ wakeup(req2);
+ }
+ }
+ /*
+ * Update rtt using a gain of 0.125 on the mean
+ * and a gain of 0.25 on the deviation.
+ */
+ if (req->r_flags & R_TIMING) {
+ /*
+ * Since the timer resolution of
+ * NFS_HZ is so course, it can often
+ * result in r_rtt == 0. Since
+ * r_rtt == N means that the actual
+ * rtt is between N+dt and N+2-dt ticks,
+ * add 1.
+ */
+ if (proct[req->r_procnum] == 0)
+ panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum);
+ t1 = req->r_rtt + 1;
+ t1 -= (NFS_SRTT(req) >> 3);
+ NFS_SRTT(req) += t1;
+ if (t1 < 0)
+ t1 = -t1;
+ t1 -= (NFS_SDRTT(req) >> 2);
+ NFS_SDRTT(req) += t1;
+ }
+ nmp->nm_timeouts = 0;
+ lck_mtx_unlock(&nmp->nm_lock);
+ /* signal anyone waiting on this request */
+ wakeup(req);
+ asyncioq = (req->r_callback.rcb_func != NULL);
+ if (nfs_request_using_gss(req))
+ nfs_gss_clnt_rpcdone(req);
+ lck_mtx_unlock(&req->r_mtx);
+ lck_mtx_unlock(nfs_request_mutex);
+ /* if it's an async RPC with a callback, queue it up */
+ if (asyncioq)
+ nfs_asyncio_finish(req);
+ break;
+ }
+
+ if (!req) {
+ /* not matched to a request, so drop it. */
+ lck_mtx_unlock(nfs_request_mutex);
+ OSAddAtomic64(1, &nfsstats.rpcunexpected);
+ mbuf_freem(mrep);
+ }
+}
+
+/*
+ * Wait for the reply for a given request...
+ * ...potentially resending the request if necessary.
+ */
+int
+nfs_wait_reply(struct nfsreq *req)
+{
+ struct timespec ts = { 2, 0 };
+ int error = 0, slpflag, first = 1;
+
+ if (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR))
+ slpflag = PCATCH;
+ else
+ slpflag = 0;
+
+ lck_mtx_lock(&req->r_mtx);
+ while (!req->r_nmrep.nmc_mhead) {
+ if ((error = nfs_sigintr(req->r_nmp, req, first ? NULL : req->r_thread, 0)))
+ break;
+ if (((error = req->r_error)) || req->r_nmrep.nmc_mhead)
+ break;
+ /* check if we need to resend */
+ if (req->r_flags & R_MUSTRESEND) {
+ NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
+ req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
+ req->r_flags |= R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ if (nfs_request_using_gss(req)) {
+ /*
+ * It's an RPCSEC_GSS request.
+ * Can't just resend the original request
+ * without bumping the cred sequence number.
+ * Go back and re-build the request.
+ */
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags &= ~R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+ return (EAGAIN);
+ }
+ error = nfs_send(req, 1);
+ lck_mtx_lock(&req->r_mtx);
+ NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
+ req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error);
+ if (error)
+ break;
+ if (((error = req->r_error)) || req->r_nmrep.nmc_mhead)
+ break;
+ }
+ /* need to poll if we're P_NOREMOTEHANG */
+ if (nfs_noremotehang(req->r_thread))
+ ts.tv_sec = 1;
+ msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitreply", &ts);
+ first = slpflag = 0;
+ }
+ lck_mtx_unlock(&req->r_mtx);
+
+ return (error);
+}
+
+/*
+ * An NFS request goes something like this:
+ * (nb: always frees up mreq mbuf list)
+ * nfs_request_create()
+ * - allocates a request struct if one is not provided
+ * - initial fill-in of the request struct
+ * nfs_request_add_header()
+ * - add the RPC header
+ * nfs_request_send()
+ * - link it into list
+ * - call nfs_send() for first transmit
+ * nfs_request_wait()
+ * - call nfs_wait_reply() to wait for the reply
+ * nfs_request_finish()
+ * - break down rpc header and return with error or nfs reply
+ * pointed to by nmrep.
+ * nfs_request_rele()
+ * nfs_request_destroy()
+ * - clean up the request struct
+ * - free the request struct if it was allocated by nfs_request_create()
+ */
+
+/*
+ * Set up an NFS request struct (allocating if no request passed in).
+ */
+int
+nfs_request_create(
+ nfsnode_t np,
+ mount_t mp, /* used only if !np */
+ struct nfsm_chain *nmrest,
+ int procnum,
+ thread_t thd,
+ kauth_cred_t cred,
+ struct nfsreq **reqp)
+{
+ struct nfsreq *req, *newreq = NULL;
+ struct nfsmount *nmp;
+
+ req = *reqp;
+ if (!req) {
+ /* allocate a new NFS request structure */
+ MALLOC_ZONE(newreq, struct nfsreq*, sizeof(*newreq), M_NFSREQ, M_WAITOK);
+ if (!newreq) {
+ mbuf_freem(nmrest->nmc_mhead);
+ nmrest->nmc_mhead = NULL;
+ return (ENOMEM);
+ }
+ req = newreq;
+ }
+
+ bzero(req, sizeof(*req));
+ if (req == newreq)
+ req->r_flags = R_ALLOCATED;
+
+ nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
+ if (nfs_mount_gone(nmp)) {
+ if (newreq)
+ FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
+ return (ENXIO);
+ }
+ lck_mtx_lock(&nmp->nm_lock);
+ if ((nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) &&
+ (nmp->nm_state & NFSSTA_TIMEO)) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ mbuf_freem(nmrest->nmc_mhead);
+ nmrest->nmc_mhead = NULL;
+ if (newreq)
+ FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
+ return (ENXIO);
+ }
+
+ if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS))
+ OSAddAtomic64(1, &nfsstats.rpccnt[procnum]);
+ if ((nmp->nm_vers == NFS_VER4) && (procnum != NFSPROC4_COMPOUND) && (procnum != NFSPROC4_NULL))
+ panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum);
+
+ lck_mtx_init(&req->r_mtx, nfs_request_grp, LCK_ATTR_NULL);
+ req->r_nmp = nmp;
+ nmp->nm_ref++;
+ req->r_np = np;
+ req->r_thread = thd;
+ if (!thd)
+ req->r_flags |= R_NOINTR;
+ if (IS_VALID_CRED(cred)) {
+ kauth_cred_ref(cred);
+ req->r_cred = cred;
+ }
+ req->r_procnum = procnum;
+ if (proct[procnum] > 0)
+ req->r_flags |= R_TIMING;
+ req->r_nmrep.nmc_mhead = NULL;
+ SLIST_INIT(&req->r_gss_seqlist);
+ req->r_achain.tqe_next = NFSREQNOLIST;
+ req->r_rchain.tqe_next = NFSREQNOLIST;
+ req->r_cchain.tqe_next = NFSREQNOLIST;
+
+ /* set auth flavor to use for request */
+ if (!req->r_cred)
+ req->r_auth = RPCAUTH_NONE;
+ else if (req->r_np && (req->r_np->n_auth != RPCAUTH_INVALID))
+ req->r_auth = req->r_np->n_auth;
+ else
+ req->r_auth = nmp->nm_auth;
+
+ lck_mtx_unlock(&nmp->nm_lock);
+
+ /* move the request mbuf chain to the nfsreq */
+ req->r_mrest = nmrest->nmc_mhead;
+ nmrest->nmc_mhead = NULL;
+
+ req->r_flags |= R_INITTED;
+ req->r_refs = 1;
+ if (newreq)
+ *reqp = req;
+ return (0);
+}
+
+/*
+ * Clean up and free an NFS request structure.
+ */
+void
+nfs_request_destroy(struct nfsreq *req)
+{
+ struct nfsmount *nmp;
+ struct gss_seq *gsp, *ngsp;
+ int clearjbtimeo = 0;
+
+ if (!req || !(req->r_flags & R_INITTED))
+ return;
+ nmp = req->r_nmp;
+ req->r_flags &= ~R_INITTED;
+ if (req->r_lflags & RL_QUEUED)
+ nfs_reqdequeue(req);
+
+ if (req->r_achain.tqe_next != NFSREQNOLIST) {
+ /*
+ * Still on an async I/O queue?
+ * %%% But which one, we may be on a local iod.
+ */
+ lck_mtx_lock(nfsiod_mutex);
+ if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) {
+ TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
+ req->r_achain.tqe_next = NFSREQNOLIST;
+ }
+ lck_mtx_unlock(nfsiod_mutex);
+ }
+
+ lck_mtx_lock(&req->r_mtx);
+ if (nmp) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if (req->r_flags & R_CWND) {
+ /* Decrement the outstanding request count. */
+ req->r_flags &= ~R_CWND;
+ nmp->nm_sent -= NFS_CWNDSCALE;
+ if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
+ /* congestion window is open, poke the cwnd queue */
+ struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
+ TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
+ req2->r_cchain.tqe_next = NFSREQNOLIST;
+ wakeup(req2);
+ }
+ }
+ assert((req->r_flags & R_RESENDQ) == 0);
+ /* XXX should we just remove this conditional, we should have a reference if we're resending */
+ if (req->r_rchain.tqe_next != NFSREQNOLIST) {
+ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ req->r_rchain.tqe_next = NFSREQNOLIST;
+ if (req->r_flags & R_RESENDQ)
+ req->r_flags &= ~R_RESENDQ;
+ }
+ if (req->r_cchain.tqe_next != NFSREQNOLIST) {
+ TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
+ req->r_cchain.tqe_next = NFSREQNOLIST;
+ }
+ if (req->r_flags & R_JBTPRINTFMSG) {
+ req->r_flags &= ~R_JBTPRINTFMSG;
+ nmp->nm_jbreqs--;
+ clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+ lck_mtx_unlock(&req->r_mtx);
+
+ if (clearjbtimeo)
+ nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
+ if (req->r_mhead)
+ mbuf_freem(req->r_mhead);
+ else if (req->r_mrest)
+ mbuf_freem(req->r_mrest);
+ if (req->r_nmrep.nmc_mhead)
+ mbuf_freem(req->r_nmrep.nmc_mhead);
+ if (IS_VALID_CRED(req->r_cred))
+ kauth_cred_unref(&req->r_cred);
+ if (nfs_request_using_gss(req))
+ nfs_gss_clnt_rpcdone(req);
+ SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp)
+ FREE(gsp, M_TEMP);
+ if (req->r_gss_ctx)
+ nfs_gss_clnt_ctx_unref(req);
+ if (req->r_wrongsec)
+ FREE(req->r_wrongsec, M_TEMP);
+ if (nmp)
+ nfs_mount_rele(nmp);
+ lck_mtx_destroy(&req->r_mtx, nfs_request_grp);
+ if (req->r_flags & R_ALLOCATED)
+ FREE_ZONE(req, sizeof(*req), M_NFSREQ);
+}
+
+void
+nfs_request_ref(struct nfsreq *req, int locked)
+{
+ if (!locked)
+ lck_mtx_lock(&req->r_mtx);
+ if (req->r_refs <= 0)
+ panic("nfsreq reference error");
+ req->r_refs++;
+ if (!locked)
+ lck_mtx_unlock(&req->r_mtx);
+}
+
+void
+nfs_request_rele(struct nfsreq *req)
+{
+ int destroy;
+
+ lck_mtx_lock(&req->r_mtx);
+ if (req->r_refs <= 0)
+ panic("nfsreq reference underflow");
+ req->r_refs--;
+ destroy = (req->r_refs == 0);
+ lck_mtx_unlock(&req->r_mtx);
+ if (destroy)
+ nfs_request_destroy(req);
+}
+
+
+/*
+ * Add an (updated) RPC header with authorization to an NFS request.
+ */
+int
+nfs_request_add_header(struct nfsreq *req)
+{
+ struct nfsmount *nmp;
+ int error = 0;
+ mbuf_t m;
+
+ /* free up any previous header */
+ if ((m = req->r_mhead)) {
+ while (m && (m != req->r_mrest))
+ m = mbuf_free(m);
+ req->r_mhead = NULL;
+ }
+
+ nmp = req->r_nmp;
+ if (nfs_mount_gone(nmp))
+ return (ENXIO);
+
+ error = nfsm_rpchead(req, req->r_mrest, &req->r_xid, &req->r_mhead);
+ if (error)
+ return (error);
+
+ req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead);
+ nmp = req->r_nmp;
+ if (nfs_mount_gone(nmp))
+ return (ENXIO);
+ lck_mtx_lock(&nmp->nm_lock);
+ if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT))
+ req->r_retry = nmp->nm_retry;
+ else
+ req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
+ lck_mtx_unlock(&nmp->nm_lock);
+
+ return (error);
+}
+
+
+/*
+ * Queue an NFS request up and send it out.
+ */
+int
+nfs_request_send(struct nfsreq *req, int wait)
+{
+ struct nfsmount *nmp;
+ struct timeval now;
+
+ lck_mtx_lock(&req->r_mtx);
+ req->r_flags |= R_SENDING;
+ lck_mtx_unlock(&req->r_mtx);
+
+ lck_mtx_lock(nfs_request_mutex);
+
+ nmp = req->r_nmp;
+ if (nfs_mount_gone(nmp)) {
+ lck_mtx_unlock(nfs_request_mutex);
+ return (ENXIO);
+ }
+
+ microuptime(&now);
+ if (!req->r_start) {
+ req->r_start = now.tv_sec;
+ req->r_lastmsg = now.tv_sec -
+ ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
+ }
+
+ OSAddAtomic64(1, &nfsstats.rpcrequests);
+
+ /*
+ * Chain request into list of outstanding requests. Be sure
+ * to put it LAST so timer finds oldest requests first.
+ * Make sure that the request queue timer is running
+ * to check for possible request timeout.
+ */
+ TAILQ_INSERT_TAIL(&nfs_reqq, req, r_chain);
+ req->r_lflags |= RL_QUEUED;
+ if (!nfs_request_timer_on) {
+ nfs_request_timer_on = 1;
+ nfs_interval_timer_start(nfs_request_timer_call,
+ NFS_REQUESTDELAY);
+ }
+ lck_mtx_unlock(nfs_request_mutex);
+
+ /* Send the request... */
+ return (nfs_send(req, wait));
+}