/*
- * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
nso->nso_version = RPCBVERS3;
}
} else if (nso->nso_protocol == NFS_PROG) {
- if ((minvers > NFS_VER4) || (maxvers < NFS_VER2))
- error = EPROGMISMATCH;
- else if ((NFS_VER3 >= minvers) && (NFS_VER3 <= maxvers))
- nso->nso_version = NFS_VER3;
- else if ((NFS_VER2 >= minvers) && (NFS_VER2 <= maxvers))
- nso->nso_version = NFS_VER2;
- else if ((NFS_VER4 >= minvers) && (NFS_VER4 <= maxvers))
- nso->nso_version = NFS_VER4;
+ int vers;
+
+ /*
+ * N.B. Both portmapper and rpcbind V3 are happy to return
+ * addresses for other versions than the one you ask (getport or
+ * getaddr) and thus we may have fallen to this code path. So if
+ * we get a version that we support, use highest supported
+ * version. This assumes that the server supports all versions
+ * between minvers and maxvers. Note for IPv6 we will try and
+ * use rpcbind V4 which has getversaddr and we should not get
+ * here if that was successful.
+ */
+ for (vers = nso->nso_nfs_max_vers; vers >= (int)nso->nso_nfs_min_vers; vers--) {
+ if (vers >= (int)minvers && vers <= (int)maxvers)
+ break;
+ }
+ nso->nso_version = (vers < (int)nso->nso_nfs_min_vers) ? 0 : vers;
}
if (!error && nso->nso_version)
accepted_status = RPC_SUCCESS;
*/
int
nfs_socket_create(
- __unused struct nfsmount *nmp,
+ struct nfsmount *nmp,
struct sockaddr *sa,
int sotype,
in_port_t port,
((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
nso->nso_protocol = protocol;
nso->nso_version = vers;
+ nso->nso_nfs_min_vers = PVER2MAJOR(nmp->nm_min_vers);
+ nso->nso_nfs_max_vers = PVER2MAJOR(nmp->nm_max_vers);
error = sock_socket(sa->sa_family, nso->nso_sotype, 0, NULL, NULL, &nso->nso_so);
if (nso->nso_protocol == PMAPPROG)
vers = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
else if (nso->nso_protocol == NFS_PROG)
- vers = NFS_VER3;
+ vers = PVER2MAJOR(nmp->nm_max_vers);
}
lck_mtx_unlock(&nso->nso_lock);
error = nfsm_rpchead2(nmp, nso->nso_sotype, nso->nso_protocol, vers, 0, RPCAUTH_SYS,
* Set the nfs socket protocol and version if needed.
*/
void
-nfs_connect_search_socket_found(struct nfsmount *nmp __unused, struct nfs_socket_search *nss, struct nfs_socket *nso)
+nfs_connect_search_socket_found(struct nfsmount *nmp, struct nfs_socket_search *nss, struct nfs_socket *nso)
{
NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
if (nso->nso_protocol == PMAPPROG)
nso->nso_version = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
if (nso->nso_protocol == NFS_PROG)
- nso->nso_version = NFS_VER3;
+ nso->nso_version = PVER2MAJOR(nmp->nm_max_vers);
}
TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
nss->nss_sockcnt--;
* A mount's initial connection may require negotiating some parameters such
* as socket type and NFS version.
*/
+
int
nfs_connect(struct nfsmount *nmp, int verbose, int timeo)
{
sock_upcall upcall;
struct timeval now, start;
int error, savederror, nfsvers;
+ int tryv4 = 1;
uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM;
fhandle_t *fh = NULL;
char *path = NULL;
if (!nmp->nm_vers) {
/* No NFS version specified... */
if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
- /* ...connect to portmapper first if we (may) need any ports. */
- nss.nss_port = PMAPPORT;
- nss.nss_protocol = PMAPPROG;
- nss.nss_version = 0;
+ if (PVER2MAJOR(nmp->nm_max_vers) >= NFS_VER4 && tryv4) {
+ nss.nss_port = NFS_PORT;
+ nss.nss_protocol = NFS_PROG;
+ nss.nss_version = 4;
+ nss.nss_flags |= NSS_FALLBACK2PMAP;
+ } else {
+ /* ...connect to portmapper first if we (may) need any ports. */
+ nss.nss_port = PMAPPORT;
+ nss.nss_protocol = PMAPPROG;
+ nss.nss_version = 0;
+ }
} else {
/* ...connect to NFS port first. */
nss.nss_port = nmp->nm_nfsport;
nss.nss_version = 0;
}
} else if (nmp->nm_vers >= NFS_VER4) {
- /* For NFSv4, we use the given (or default) port. */
- nss.nss_port = nmp->nm_nfsport ? nmp->nm_nfsport : NFS_PORT;
- nss.nss_protocol = NFS_PROG;
- nss.nss_version = 4;
+ if (tryv4) {
+ /* For NFSv4, we use the given (or default) port. */
+ nss.nss_port = nmp->nm_nfsport ? nmp->nm_nfsport : NFS_PORT;
+ nss.nss_protocol = NFS_PROG;
+ nss.nss_version = 4;
+ /*
+ * set NSS_FALLBACK2PMAP here to pick up any non standard port
+ * if no port is specified on the mount;
+ * Note nm_vers is set so we will only try NFS_VER4.
+ */
+ if (!nmp->nm_nfsport)
+ nss.nss_flags |= NSS_FALLBACK2PMAP;
+ } else {
+ nss.nss_port = PMAPPORT;
+ nss.nss_protocol = PMAPPROG;
+ nss.nss_version = 0;
+ }
} else {
/* For NFSv3/v2... */
if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
if (error || !nss.nss_sock) {
/* search failed */
nfs_socket_search_cleanup(&nss);
+ if (nss.nss_flags & NSS_FALLBACK2PMAP) {
+ tryv4 = 0;
+ NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
+ goto tryagain;
+ }
+
if (!error && (nss.nss_sotype == SOCK_STREAM) && !nmp->nm_sotype && (nmp->nm_vers < NFS_VER4)) {
/* Try using UDP */
sotype = SOCK_DGRAM;
/* Set up socket address and port for NFS socket. */
bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
- /* If NFS version not set, try NFSv3 then NFSv2. */
- nfsvers = nmp->nm_vers ? nmp->nm_vers : NFS_VER3;
-
+ /* If NFS version not set, try nm_max_vers down to nm_min_vers */
+ nfsvers = nmp->nm_vers ? nmp->nm_vers : PVER2MAJOR(nmp->nm_max_vers);
if (!(port = nmp->nm_nfsport)) {
if (ss.ss_family == AF_INET)
((struct sockaddr_in*)&ss)->sin_port = htons(0);
else if (ss.ss_family == AF_INET6)
((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
- error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
- nso->nso_so, NFS_PROG, nfsvers,
- (nso->nso_sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP, timeo);
- if (!error) {
- if (ss.ss_family == AF_INET)
- port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
- else if (ss.ss_family == AF_INET6)
- port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
- if (!port)
- error = EPROGUNAVAIL;
- }
- if (error && !nmp->nm_vers) {
- nfsvers = NFS_VER2;
+ for (; nfsvers >= (int)PVER2MAJOR(nmp->nm_min_vers); nfsvers--) {
+ if (nmp->nm_vers && nmp->nm_vers != nfsvers)
+ continue; /* Wrong version */
+ if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM)
+ continue; /* NFSv4 does not do UDP */
error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
- nso->nso_so, NFS_PROG, nfsvers,
- (nso->nso_sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP, timeo);
+ nso->nso_so, NFS_PROG, nfsvers,
+ (nso->nso_sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP, timeo);
if (!error) {
if (ss.ss_family == AF_INET)
port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
if (!port)
error = EPROGUNAVAIL;
+ if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0)
+ continue; /* We already tried this */
}
+ if (!error)
+ break;
}
+ if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0)
+ error = EPROGUNAVAIL;
if (error) {
nfs_socket_search_update_error(&nss, error);
nfs_socket_destroy(nso);
}
}
/* Create NFS protocol socket and add it to the list of sockets. */
+ /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nso->nso_sotype, port,
NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs);
if (error) {
microuptime(&now);
if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) {
lastmsg = now.tv_sec;
- nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect");
+ nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect", 0);
wentdown = 1;
}
lck_mtx_lock(&nmp->nm_lock);
NFS_SOCK_DBG("Not mounted returning %d\n", error);
return (error);
}
- nfs_mount_check_dead_timeout(nmp);
+
+ if (nfs_mount_check_dead_timeout(nmp)) {
+ nfs_mount_make_zombie(nmp);
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (ENXIO);
+ }
+
if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
lck_mtx_unlock(&nmp->nm_lock);
return (error);
rq->r_flags |= R_MUSTRESEND;
rq->r_rtt = -1;
wakeup(rq);
- if ((rq->r_flags & (R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
+ if ((rq->r_flags & (R_IOD|R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
nfs_asyncio_resend(rq);
}
lck_mtx_unlock(&rq->r_mtx);
rq->r_flags |= R_MUSTRESEND;
rq->r_rtt = -1;
wakeup(rq);
- if ((rq->r_flags & (R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
+ if ((rq->r_flags & (R_IOD|R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
nfs_asyncio_resend(rq);
}
lck_mtx_unlock(&rq->r_mtx);
int do_reconnect_sleep = 0;
lck_mtx_lock(&nmp->nm_lock);
-
while (!(nmp->nm_sockflags & NMSOCK_READY) ||
!TAILQ_EMPTY(&nmp->nm_resendq) ||
!LIST_EMPTY(&nmp->nm_monlist) ||
if (nmp->nm_sockflags & NMSOCK_UNMOUNT)
break;
/* do reconnect, if necessary */
- if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_FORCE)) {
+ if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) {
if (nmp->nm_reconnect_start <= 0) {
microuptime(&now);
nmp->nm_reconnect_start = now.tv_sec;
if ((nmp->nm_sockflags & NMSOCK_READY) &&
(nmp->nm_state & NFSSTA_RECOVER) &&
!(nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
- !(nmp->nm_state & NFSSTA_FORCE)) {
+ !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) {
/* perform state recovery */
lck_mtx_unlock(&nmp->nm_lock);
nfs_recover(nmp);
lck_mtx_lock(&nmp->nm_lock);
}
/* handle NFSv4 delegation returns */
- while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & NFSSTA_FORCE) &&
+ while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) &&
(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) &&
((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) {
lck_mtx_unlock(&nmp->nm_lock);
lck_mtx_lock(&nmp->nm_lock);
}
/* do resends, if necessary/possible */
- while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) || (nmp->nm_state & NFSSTA_FORCE)) &&
+ while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) ||
+ (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) &&
((req = TAILQ_FIRST(&nmp->nm_resendq)))) {
if (req->r_resendtime)
microuptime(&now);
- while (req && !(nmp->nm_state & NFSSTA_FORCE) && req->r_resendtime && (now.tv_sec < req->r_resendtime))
+ while (req && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime))
req = TAILQ_NEXT(req, r_rchain);
if (!req)
break;
req->r_rchain.tqe_next = NFSREQNOLIST;
lck_mtx_unlock(&nmp->nm_lock);
lck_mtx_lock(&req->r_mtx);
+ /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
if (req->r_error || req->r_nmrep.nmc_mhead) {
dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
req->r_flags &= ~R_RESENDQ;
lck_mtx_unlock(&req->r_mtx);
if (dofinish)
nfs_asyncio_finish(req);
+ nfs_request_rele(req);
lck_mtx_lock(&nmp->nm_lock);
continue;
}
NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid,
req->r_flags, req->r_rtt);
- error = !req->r_nmp ? ENXIO : 0; /* unmounted? */
- if (!error)
- error = nfs_sigintr(nmp, req, req->r_thread, 0);
+ error = nfs_sigintr(nmp, req, req->r_thread, 0);
if (!error)
error = nfs_request_add_header(req);
if (!error)
lck_mtx_unlock(&req->r_mtx);
if (dofinish)
nfs_asyncio_finish(req);
+ nfs_request_rele(req);
lck_mtx_lock(&nmp->nm_lock);
error = 0;
continue;
}
NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
- error = !req->r_nmp ? ENXIO : 0; /* unmounted? */
- if (!error)
- error = nfs_sigintr(nmp, req, req->r_thread, 0);
+ error = nfs_sigintr(nmp, req, req->r_thread, 0);
if (!error) {
req->r_flags |= R_SENDING;
lck_mtx_unlock(&req->r_mtx);
req->r_flags &= ~R_RESENDQ;
wakeup(req);
lck_mtx_unlock(&req->r_mtx);
+ nfs_request_rele(req);
lck_mtx_lock(&nmp->nm_lock);
continue;
}
lck_mtx_unlock(&req->r_mtx);
if (dofinish)
nfs_asyncio_finish(req);
+ nfs_request_rele(req);
lck_mtx_lock(&nmp->nm_lock);
}
- if (nmp->nm_deadto_start)
- nfs_mount_check_dead_timeout(nmp);
+ if (nfs_mount_check_dead_timeout(nmp)) {
+ nfs_mount_make_zombie(nmp);
+ break;
+ }
+
if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))
break;
/* check monitored nodes, if necessary/possible */
if (!LIST_EMPTY(&nmp->nm_monlist)) {
nmp->nm_state |= NFSSTA_MONITOR_SCAN;
LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) {
- if (!(nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING|NFSSTA_FORCE)))
+ if (!(nmp->nm_sockflags & NMSOCK_READY) ||
+ (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING|NFSSTA_FORCE|NFSSTA_DEAD)))
break;
np->n_mflag |= NMMONSCANINPROG;
lck_mtx_unlock(&nmp->nm_lock);
np->n_mflag &= ~NMMONSCANWANT;
wakeup(&np->n_mflag);
}
- if (error || !(nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING|NFSSTA_FORCE)))
+ if (error || !(nmp->nm_sockflags & NMSOCK_READY) ||
+ (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING|NFSSTA_FORCE|NFSSTA_DEAD)))
break;
}
nmp->nm_state &= ~NFSSTA_MONITOR_SCAN;
* unresponsive mount has reached the dead timeout.
* (must be called with nmp locked)
*/
-void
+int
nfs_mount_check_dead_timeout(struct nfsmount *nmp)
{
struct timeval now;
- if (nmp->nm_deadto_start == 0)
- return;
if (nmp->nm_state & NFSSTA_DEAD)
- return;
+ return 1;
+ if (nmp->nm_deadto_start == 0)
+ return 0;
nfs_is_squishy(nmp);
if (nmp->nm_curdeadtimeout <= 0)
- return;
+ return 0;
microuptime(&now);
if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout)
+ return 0;
+ return 1;
+}
+
+/*
+ * Call nfs_mount_zombie to remove most of the
+ * nfs state for the mount, and then ask to be forcibly unmounted.
+ *
+ * Assumes the nfs mount structure lock nm_lock is held.
+ */
+
+void
+nfs_mount_make_zombie(struct nfsmount *nmp)
+{
+ fsid_t fsid;
+
+ if (!nmp)
return;
+
+ if (nmp->nm_state & NFSSTA_DEAD)
+ return;
+
printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
(nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
- nmp->nm_state |= NFSSTA_DEAD;
- vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_DEAD, 0);
+ fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_mount_zombie(nmp, NFSSTA_DEAD);
+ vfs_event_signal(&fsid, VQ_DEAD, 0);
+ lck_mtx_lock(&nmp->nm_lock);
}
+
/*
* NFS callback channel socket state
*/
lck_mtx_lock(&nmp->nm_lock);
while (!(nmp->nm_sockflags & NMSOCK_READY)) {
/* don't bother waiting if the socket thread won't be reconnecting it */
- if (nmp->nm_state & NFSSTA_FORCE) {
+ if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) {
error = EIO;
break;
}
- if (NMFLAG(nmp, SOFT) && (nmp->nm_reconnect_start > 0)) {
+ if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (nmp->nm_reconnect_start > 0)) {
struct timeval now;
microuptime(&now);
if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) {
msg.msg_namelen = sendnam->sa_len;
}
error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen);
-#ifdef NFS_SOCKET_DEBUGGING
- if (error || (sentlen != req->r_mreqlen))
+ if (error || (sentlen != req->r_mreqlen)) {
NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
- req->r_xid, (int)sentlen, (int)req->r_mreqlen, error);
-#endif
+ req->r_xid, (int)sentlen, (int)req->r_mreqlen, error);
+ }
+
if (!error && (sentlen != req->r_mreqlen))
error = EWOULDBLOCK;
needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen));
nfs_sndunlock(req);
+ if (nfs_is_dead(error, nmp))
+ error = EIO;
+
/*
* Don't log some errors:
* EPIPE errors may be common with servers that drop idle connections.
!req->r_nmp ? "<unmounted>" :
vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname);
- if (nfs_is_dead(error, nmp))
- error = EIO;
-
/* prefer request termination error over other errors */
error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
if (error2)
mbuf_t m;
int error = 0;
int recv = 1;
+ int wup = 0;
if (nmp->nm_sockflags & NMSOCK_CONNECTING)
return;
nfs_request_match_reply(nmp, m);
}
- lck_mtx_lock(&nmp->nm_lock);
- if (nmp->nm_nso == nso) {
- /* still the same socket, so update socket's RPC parsing state */
- lck_mtx_unlock(&nmp->nm_lock);
- lck_mtx_lock(&nso->nso_lock);
- nso->nso_rrs = nrrs;
- nso->nso_flags &= ~NSO_UPCALL;
- lck_mtx_unlock(&nso->nso_lock);
- if (nmp->nm_sockflags & NMSOCK_DISCONNECTING)
- wakeup(&nmp->nm_sockflags);
- } else {
- lck_mtx_unlock(&nmp->nm_lock);
- }
+ /* Update the sockets's rpc parsing state */
+ lck_mtx_lock(&nso->nso_lock);
+ nso->nso_rrs = nrrs;
+ if (nso->nso_flags & NSO_DISCONNECTING)
+ wup = 1;
+ nso->nso_flags &= ~NSO_UPCALL;
+ lck_mtx_unlock(&nso->nso_lock);
+ if (wup)
+ wakeup(&nso->nso_flags);
+
#ifdef NFS_SOCKET_DEBUGGING
if (!recv && (error != EWOULDBLOCK))
NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error);
lck_mtx_lock(&nmp->nm_lock);
if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) ||
!(nmp->nm_sockflags & NMSOCK_READY) || !nmp->nm_nso || !nmp->nm_nso->nso_so) {
+ /* Nothing to poke */
+ nmp->nm_sockflags &= ~NMSOCK_POKE;
+ wakeup(&nmp->nm_sockflags);
lck_mtx_unlock(&nmp->nm_lock);
return;
}
msg.msg_iovlen = 1;
error = sock_send(nmp->nm_nso->nso_so, &msg, MSG_DONTWAIT, &len);
NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error);
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_sockflags &= ~NMSOCK_POKE;
+ wakeup(&nmp->nm_sockflags);
+ lck_mtx_unlock(&nmp->nm_lock);
nfs_is_dead(error, nmp);
}
req->r_flags = R_ALLOCATED;
nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
if (newreq)
FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
return (ENXIO);
}
lck_mtx_lock(&nmp->nm_lock);
- if ((nmp->nm_state & (NFSSTA_FORCE|NFSSTA_TIMEO)) ==
- (NFSSTA_FORCE|NFSSTA_TIMEO)) {
+ if ((nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) &&
+ (nmp->nm_state & NFSSTA_TIMEO)) {
lck_mtx_unlock(&nmp->nm_lock);
mbuf_freem(nmrest->nmc_mhead);
nmrest->nmc_mhead = NULL;
FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
return (ENXIO);
}
-
+
if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS))
OSAddAtomic64(1, &nfsstats.rpccnt[procnum]);
if ((nmp->nm_vers == NFS_VER4) && (procnum != NFSPROC4_COMPOUND) && (procnum != NFSPROC4_NULL))
lck_mtx_init(&req->r_mtx, nfs_request_grp, LCK_ATTR_NULL);
req->r_nmp = nmp;
+ nmp->nm_ref++;
req->r_np = np;
req->r_thread = thd;
if (!thd)
void
nfs_request_destroy(struct nfsreq *req)
{
- struct nfsmount *nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
+ struct nfsmount *nmp;
struct gss_seq *gsp, *ngsp;
- struct timespec ts = { 1, 0 };
int clearjbtimeo = 0;
if (!req || !(req->r_flags & R_INITTED))
return;
+ nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
req->r_flags &= ~R_INITTED;
if (req->r_lflags & RL_QUEUED)
nfs_reqdequeue(req);
+
if (req->r_achain.tqe_next != NFSREQNOLIST) {
- /* still on an async I/O queue? */
+ /*
+ * Still on an async I/O queue?
+ * %%% But which one, we may be on a local iod.
+ */
lck_mtx_lock(nfsiod_mutex);
- if (nmp && (req->r_achain.tqe_next != NFSREQNOLIST)) {
+ if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) {
TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
req->r_achain.tqe_next = NFSREQNOLIST;
}
lck_mtx_unlock(nfsiod_mutex);
}
+
lck_mtx_lock(&req->r_mtx);
if (nmp) {
lck_mtx_lock(&nmp->nm_lock);
wakeup(req2);
}
}
+ assert((req->r_flags & R_RESENDQ) == 0);
+ /* XXX should we just remove this conditional, we should have a reference if we're resending */
if (req->r_rchain.tqe_next != NFSREQNOLIST) {
TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
req->r_rchain.tqe_next = NFSREQNOLIST;
}
lck_mtx_unlock(&nmp->nm_lock);
}
- while (req->r_flags & R_RESENDQ)
- msleep(req, &req->r_mtx, (PZERO - 1), "nfsresendqwait", &ts);
lck_mtx_unlock(&req->r_mtx);
+
if (clearjbtimeo)
nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
if (req->r_mhead)
nfs_gss_clnt_ctx_unref(req);
if (req->r_wrongsec)
FREE(req->r_wrongsec, M_TEMP);
-
+ if (nmp)
+ nfs_mount_rele(nmp);
lck_mtx_destroy(&req->r_mtx, nfs_request_grp);
if (req->r_flags & R_ALLOCATED)
FREE_ZONE(req, sizeof(*req), M_NFSREQ);
}
nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
error = nfsm_rpchead(req, req->r_mrest, &req->r_xid, &req->r_mhead);
req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead);
nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
lck_mtx_lock(&nmp->nm_lock);
- if (NMFLAG(nmp, SOFT))
+ if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT))
req->r_retry = nmp->nm_retry;
else
req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
lck_mtx_lock(nfs_request_mutex);
nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
lck_mtx_unlock(nfs_request_mutex);
return (ENXIO);
}
*/
if (!error) {
if ((req->r_flags & R_TPRINTFMSG) ||
- (nmp && NMFLAG(nmp, SOFT) &&
- ((nmp->nm_state & (NFSSTA_TIMEO|NFSSTA_FORCE)) == NFSSTA_TIMEO)))
+ (nmp && (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) &&
+ ((nmp->nm_state & (NFSSTA_TIMEO|NFSSTA_FORCE|NFSSTA_DEAD)) == NFSSTA_TIMEO)))
nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, "is alive again");
else
nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, NULL);
lck_mtx_unlock(&nmp->nm_lock);
}
nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_JUKEBOXTIMEO,
- "resource temporarily unavailable (jukebox)");
+ "resource temporarily unavailable (jukebox)", 0);
}
- if (NMFLAG(nmp, SOFT) && (req->r_delay == 30) && !(req->r_flags & R_NOINTR)) {
+ if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (req->r_delay == 30) &&
+ !(req->r_flags & R_NOINTR)) {
/* for soft mounts, just give up after a short while */
OSAddAtomic64(1, &nfsstats.rpctimeouts);
nfs_softterm(req);
/* find the next flavor to try */
for(i=0; i < NX_MAX_SEC_FLAVORS; i++)
if (req->r_wrongsec[i] != RPCAUTH_INVALID) {
- if (((req->r_wrongsec[i] == RPCAUTH_KRB5P) ||
- (req->r_wrongsec[i] == RPCAUTH_KRB5I) ||
- (req->r_wrongsec[i] == RPCAUTH_KRB5)) && (req->r_gss_ctx &&
- (req->r_gss_ctx->gss_clnt_service == RPCSEC_GSS_SVC_SYS))) {
- /* don't bother trying Kerberos if we've already got a fallback context */
- req->r_wrongsec[i] = RPCAUTH_INVALID;
- continue;
- }
if (!srvcount) /* no server list, just try it */
break;
/* check that it's in the server's list */
if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req)))
return (error);
- req->r_flags |= (flags & R_OPTMASK);
+ req->r_flags |= (flags & (R_OPTMASK | R_SOFT));
if (si)
req->r_secinfo = *si;
int *status)
{
struct nfsreq rq, *req = &rq;
- int error;
+ int error, wait = 1;
if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req)))
return (error);
}
nfs_gss_clnt_ctx_ref(req, cp);
+ /*
+ * Don't wait for a reply to a context destroy advisory
+ * to avoid hanging on a dead server.
+ */
+ if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY)
+ wait = 0;
+
FSDBG_TOP(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, 0);
do {
req->r_error = 0;
if ((error = nfs_request_add_header(req)))
break;
- if ((error = nfs_request_send(req, 1)))
+ if ((error = nfs_request_send(req, wait)))
break;
+ if (!wait)
+ break;
+
nfs_request_wait(req);
if ((error = nfs_request_finish(req, nmrepp, status)))
break;
} while (req->r_flags & R_RESTART);
FSDBG_BOT(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, error);
+
+ nfs_gss_clnt_ctx_unref(req);
nfs_request_rele(req);
+
return (error);
}
int slpflag = (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
struct timespec ts = { 2, 0 };
while (!(req->r_flags & R_SENT)) {
- if ((req->r_flags & R_RESENDQ) && ((nmp = req->r_nmp))) {
+ nmp = req->r_nmp;
+ if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) {
lck_mtx_lock(&nmp->nm_lock);
if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
/*
req->r_flags |= R_SENDING;
lck_mtx_unlock(&req->r_mtx);
error = nfs_send(req, 1);
+ /* Remove the R_RESENDQ reference */
+ nfs_request_rele(req);
lck_mtx_lock(&req->r_mtx);
if (error)
break;
}
sent = req->r_flags & R_SENT;
lck_mtx_unlock(&req->r_mtx);
- if (error && req->r_callback.rcb_func && !sent)
+ if (error && req->r_callback.rcb_func && !sent) {
nfs_request_rele(req);
+ }
}
FSDBG(274, R_XID32(req->r_xid), np, procnum, error);
if (error || req->r_callback.rcb_func)
nfs_request_rele(req);
+
return (error);
}
req->r_flags |= R_ASYNCWAIT;
while (req->r_flags & R_RESENDQ) { /* wait until the request is off the resend queue */
struct timespec ts = { 2, 0 };
+
if ((nmp = req->r_nmp)) {
lck_mtx_lock(&nmp->nm_lock);
if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
req->r_rchain.tqe_next = NFSREQNOLIST;
if (req->r_flags & R_RESENDQ)
req->r_flags &= ~R_RESENDQ;
+ /* Remove the R_RESENDQ reference */
+ assert(req->r_refs > 0);
+ req->r_refs--;
lck_mtx_unlock(&nmp->nm_lock);
break;
}
}
while (!error && (req->r_flags & R_RESTART)) {
- if (asyncio && req->r_resendtime) { /* send later */
+ if (asyncio) {
+ assert(req->r_achain.tqe_next == NFSREQNOLIST);
lck_mtx_lock(&req->r_mtx);
- nfs_asyncio_resend(req);
+ req->r_flags &= ~R_IOD;
+ if (req->r_resendtime) { /* send later */
+ nfs_asyncio_resend(req);
+ lck_mtx_unlock(&req->r_mtx);
+ return (EINPROGRESS);
+ }
lck_mtx_unlock(&req->r_mtx);
- return (EINPROGRESS);
}
req->r_error = 0;
req->r_flags &= ~R_RESTART;
void
nfs_request_async_cancel(struct nfsreq *req)
{
- nfs_reqdequeue(req);
FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, 0xD1ED1E);
nfs_request_rele(req);
}
struct nfsmount *nmp = req->r_nmp;
req->r_flags |= R_SOFTTERM;
req->r_error = ETIMEDOUT;
- if (!(req->r_flags & R_CWND) || !nmp)
+ if (!(req->r_flags & R_CWND) || nfs_mount_gone(nmp))
return;
/* update congestion window */
req->r_flags &= ~R_CWND;
struct timeval now;
TAILQ_HEAD(nfs_mount_pokeq, nfsmount) nfs_mount_poke_queue;
+restart:
lck_mtx_lock(nfs_request_mutex);
req = TAILQ_FIRST(&nfs_reqq);
if (req == NULL) { /* no requests - turn timer off */
microuptime(&now);
for ( ; req != NULL ; req = nfs_reqnext(req)) {
nmp = req->r_nmp;
- if (!nmp) /* unmounted */
+ if (nmp == NULL) {
+ NFS_SOCK_DBG("Found a request with out a mount!\n");
continue;
+ }
if (req->r_error || req->r_nmrep.nmc_mhead)
continue;
if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) {
((req->r_lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) {
req->r_lastmsg = now.tv_sec;
nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
- "not responding");
+ "not responding", 1);
req->r_flags |= R_TPRINTFMSG;
lck_mtx_lock(&nmp->nm_lock);
if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
* Put a reasonable limit on the maximum timeout,
* and reduce that limit when soft mounts get timeouts or are in reconnect.
*/
- if (!NMFLAG(nmp, SOFT) && !nfs_can_squish(nmp))
+ if (!(NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && !nfs_can_squish(nmp))
maxtime = NFS_MAXTIMEO;
else if ((req->r_flags & (R_SETUP|R_RECOVER)) ||
((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8)))
if (timeo > maxtime)
timeo = maxtime;
if (req->r_rtt <= timeo) {
+ NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req->r_rtt, timeo);
lck_mtx_unlock(&nmp->nm_lock);
lck_mtx_unlock(&req->r_mtx);
continue;
(now.tv_sec - req->r_start)*NFS_HZ, maxtime);
if (nmp->nm_timeouts < 8)
nmp->nm_timeouts++;
- nfs_mount_check_dead_timeout(nmp);
+ if (nfs_mount_check_dead_timeout(nmp)) {
+ /* Unbusy this request */
+ req->r_lflags &= ~RL_BUSY;
+ if (req->r_lflags & RL_WAITING) {
+ req->r_lflags &= ~RL_WAITING;
+ wakeup(&req->r_lflags);
+ }
+ lck_mtx_unlock(&req->r_mtx);
+
+ /* No need to poke this mount */
+ if (nmp->nm_sockflags & NMSOCK_POKE) {
+ nmp->nm_sockflags &= ~NMSOCK_POKE;
+ TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
+ }
+ /* Release our lock state, so we can become a zombie */
+ lck_mtx_unlock(nfs_request_mutex);
+
+ /*
+ * Note nfs_mount_make zombie(nmp) must be
+ * called with nm_lock held. After doing some
+ * work we release nm_lock in
+ * nfs_make_mount_zombie with out acquiring any
+ * other locks. (Later, in nfs_mount_zombie we
+ * will acquire nfs_request_mutex, r_mtx,
+ * nm_lock in that order). So we should not be
+ * introducing deadlock here. We take a reference
+ * on the mount so that its still there when we
+ * release the lock.
+ */
+ nmp->nm_ref++;
+ nfs_mount_make_zombie(nmp);
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_mount_rele(nmp);
+
+ /*
+ * All the request for this mount have now been
+ * removed from the request queue. Restart to
+ * process the remaining mounts
+ */
+ goto restart;
+ }
+
/* if it's been a few seconds, try poking the socket */
if ((nmp->nm_sotype == SOCK_STREAM) &&
((now.tv_sec - req->r_start) >= 3) &&
}
/* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
- if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP|R_RECOVER))) &&
+ if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP|R_RECOVER|R_SOFT))) &&
((req->r_rexmit >= req->r_retry) || /* too many */
((now.tv_sec - req->r_start)*NFS_HZ > maxtime))) { /* too long */
OSAddAtomic64(1, &nfsstats.rpctimeouts);
/* make sure we note the unresponsive server */
/* (maxtime may be less than tprintf delay) */
nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
- "not responding");
+ "not responding", 1);
req->r_lastmsg = now.tv_sec;
req->r_flags |= R_TPRINTFMSG;
} else {
req->r_flags |= R_MUSTRESEND;
req->r_rtt = -1;
wakeup(req);
- if ((req->r_flags & (R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
+ if ((req->r_flags & (R_IOD|R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC)
nfs_asyncio_resend(req);
lck_mtx_unlock(&req->r_mtx);
}
while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
nfs_sock_poke(nmp);
- lck_mtx_lock(&nmp->nm_lock);
- nmp->nm_sockflags &= ~NMSOCK_POKE;
- wakeup(&nmp->nm_sockflags);
- lck_mtx_unlock(&nmp->nm_lock);
}
nfs_interval_timer_start(nfs_request_timer_call, NFS_REQUESTDELAY);
proc_t p;
int error = 0;
- if (nmp == NULL)
+ if (!nmp)
return (ENXIO);
if (req && (req->r_flags & R_SOFTTERM))
if (nmp->nm_state & NFSSTA_FORCE) {
/* If a force unmount is in progress then fail. */
error = EIO;
- } else if (nmp->nm_mountp->mnt_kern_flag & MNTK_FRCUNMOUNT) {
+ } else if (vfs_isforce(nmp->nm_mountp)) {
/* Someone is unmounting us, go soft and mark it. */
NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT);
nmp->nm_state |= NFSSTA_FORCE;
int error = 0, slpflag = 0;
struct timespec ts = { 0, 0 };
- if (nmp == NULL)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
lck_mtx_lock(&nmp->nm_lock);
struct nfsmount *nmp = req->r_nmp;
int *statep, wake = 0;
- if (nmp == NULL)
+ if (!nmp)
return;
lck_mtx_lock(&nmp->nm_lock);
statep = &nmp->nm_state;
}
if (sotype == SOCK_STREAM) {
- on = 4; /* don't wait too long for the socket to connect */
- sock_setsockopt(newso, IPPROTO_TCP, TCP_CONNECTIONTIMEOUT, &on, sizeof(on));
- error = sock_connect(newso, saddr, 0);
+# define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
+ int count = 0;
+
+ error = sock_connect(newso, saddr, MSG_DONTWAIT);
+ if (error == EINPROGRESS)
+ error = 0;
+ nfsmout_if(error);
+
+ while ((error = sock_connectwait(newso, &tv)) == EINPROGRESS) {
+ /* After NFS_AUX_CONNECTION_TIMEOUT bail */
+ if (++count >= NFS_AUX_CONNECTION_TIMEOUT) {
+ error = ETIMEDOUT;
+ break;
+ }
+ }
nfsmout_if(error);
}
if (((error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) ||
int squishy = 0;
int timeo = (nfs_squishy_flags & NFS_SQUISH_QUICK) ? NFS_SQUISHY_QUICKTIMEOUT : NFS_SQUISHY_DEADTIMEOUT;
- NFS_SOCK_DBG("%s: nm_curdeadtiemout = %d, nfs_is_mobile = %d\n",
+ NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
vfs_statfs(mp)->f_mntfromname, nmp->nm_curdeadtimeout, nfs_is_mobile);
if (!nfs_can_squish(nmp))
* and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
* and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
*/
-static int
-nfs_is_dead_lock(int error, struct nfsmount *nmp)
+int
+nfs_is_dead(int error, struct nfsmount *nmp)
{
- if (nmp->nm_state & NFSSTA_DEAD)
+ fsid_t fsid;
+
+ lck_mtx_lock(&nmp->nm_lock);
+ if (nmp->nm_state & NFSSTA_DEAD) {
+ lck_mtx_unlock(&nmp->nm_lock);
return (1);
+ }
if ((error != ENETUNREACH && error != EHOSTUNREACH && error != EADDRNOTAVAIL) ||
- !(nmp->nm_locations.nl_numlocs == 1 && nmp->nm_locations.nl_locations[0]->nl_servcount == 1))
+ !(nmp->nm_locations.nl_numlocs == 1 && nmp->nm_locations.nl_locations[0]->nl_servcount == 1)) {
+ lck_mtx_unlock(&nmp->nm_lock);
return (0);
+ }
if ((nfs_squishy_flags & NFS_SQUISH_QUICK) && nfs_is_squishy(nmp)) {
printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
- nmp->nm_state |= NFSSTA_DEAD;
- vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_DEAD, 0);
+ fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
+ lck_mtx_unlock(&nmp->nm_lock);
+ nfs_mount_zombie(nmp, NFSSTA_DEAD);
+ vfs_event_signal(&fsid, VQ_DEAD, 0);
return (1);
}
+ lck_mtx_unlock(&nmp->nm_lock);
return (0);
}
+/*
+ * If we've experienced timeouts and we're not really a
+ * classic hard mount, then just return cached data to
+ * the caller instead of likely hanging on an RPC.
+ */
int
-nfs_is_dead(int error, struct nfsmount *nmp)
+nfs_use_cache(struct nfsmount *nmp)
{
- int is_dead;
+ /*
+ *%%% We always let mobile users goto the cache,
+ * perhaps we should not even require them to have
+ * a timeout?
+ */
+ int cache_ok = (nfs_is_mobile || NMFLAG(nmp, SOFT) ||
+ nfs_can_squish(nmp) || nmp->nm_deadtimeout);
- lck_mtx_lock(&nmp->nm_lock);
- is_dead = nfs_is_dead_lock(error, nmp);
- lck_mtx_unlock(&nmp->nm_lock);
+ int timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
- return (is_dead);
+ /*
+ * So if we have a timeout and we're not really a hard hard-mount,
+ * return 1 to not get things out of the cache.
+ */
+
+ return ((nmp->nm_state & timeoutmask) && cache_ok);
}
+/*
+ * Log a message that nfs or lockd server is unresponsive. Check if we
+ * can be squished and if we can, or that our dead timeout has
+ * expired, and we're not holding state, set our mount as dead, remove
+ * our mount state and ask to be unmounted. If we are holding state
+ * we're being called from the nfs_request_timer and will soon detect
+ * that we need to unmount.
+ */
void
-nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg)
+nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg, int holding_state)
{
int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
- uint32_t do_vfs_signal;
+ uint32_t do_vfs_signal = 0;
struct timeval now;
- if (nmp == NULL)
+ if (nfs_mount_gone(nmp))
return;
lck_mtx_lock(&nmp->nm_lock);
if (!wasunresponsive) {
nmp->nm_deadto_start = now.tv_sec;
nfs_mount_sock_thread_wake(nmp);
- } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_curdeadtimeout) {
+ } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_curdeadtimeout && !holding_state) {
if (!(nmp->nm_state & NFSSTA_DEAD))
printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
(nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
- nmp->nm_state |= NFSSTA_DEAD;
+ do_vfs_signal = VQ_DEAD;
}
}
lck_mtx_unlock(&nmp->nm_lock);
- if (nmp->nm_state & NFSSTA_DEAD)
- do_vfs_signal = VQ_DEAD;
+ if (do_vfs_signal == VQ_DEAD && !(nmp->nm_state & NFSSTA_DEAD))
+ nfs_mount_zombie(nmp, NFSSTA_DEAD);
else if (softnobrowse || wasunresponsive || !unresponsive)
do_vfs_signal = 0;
else
int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
int do_vfs_signal;
- if (nmp == NULL)
+ if (nfs_mount_gone(nmp))
return;
if (msg)