case ECONNREFUSED:
case EHOSTDOWN:
case EHOSTUNREACH:
+ /* case ECANCELED??? */
needrecon = 1;
break;
}
/* only allow the following errors to be returned */
if ((error != EINTR) && (error != ERESTART) && (error != EIO) &&
(error != ENXIO) && (error != ETIMEDOUT))
- error = 0;
+ /*
+ * We got some error we don't know what do do with,
+ * i.e., we're not reconnecting, we map it to
+ * EIO. Presumably our send failed and we better tell
+ * the caller so they don't wait for a reply that is
+ * never going to come. If we are reconnecting we
+ * return 0 and the request will be resent.
+ */
+ error = needrecon ? 0 : EIO;
return (error);
}
if (!req || !(req->r_flags & R_INITTED))
return;
- nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
+ nmp = req->r_nmp;
req->r_flags &= ~R_INITTED;
if (req->r_lflags & RL_QUEUED)
nfs_reqdequeue(req);
req->r_mhead = NULL;
}
- nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
+ nmp = req->r_nmp;
if (nfs_mount_gone(nmp))
return (ENXIO);
return (error);
req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead);
- nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
+ nmp = req->r_nmp;
if (nfs_mount_gone(nmp))
return (ENXIO);
lck_mtx_lock(&nmp->nm_lock);
lck_mtx_lock(nfs_request_mutex);
- nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
+ nmp = req->r_nmp;
if (nfs_mount_gone(nmp)) {
lck_mtx_unlock(nfs_request_mutex);
return (ENXIO);
mrep = req->r_nmrep.nmc_mhead;
- nmp = req->r_np ? NFSTONMP(req->r_np) : req->r_nmp;
+ nmp = req->r_nmp;
if ((req->r_flags & R_CWND) && nmp) {
/*
int timeo, maxtime, finish_asyncio, error;
struct timeval now;
TAILQ_HEAD(nfs_mount_pokeq, nfsmount) nfs_mount_poke_queue;
+ TAILQ_INIT(&nfs_mount_poke_queue);
restart:
lck_mtx_lock(nfs_request_mutex);
}
nfs_reqbusy(req);
- TAILQ_INIT(&nfs_mount_poke_queue);
microuptime(&now);
for ( ; req != NULL ; req = nfs_reqnext(req)) {
!(nmp->nm_sockflags & (NMSOCK_POKE|NMSOCK_UNMOUNT)) &&
(nmp->nm_sockflags & NMSOCK_READY)) {
nmp->nm_sockflags |= NMSOCK_POKE;
+ /*
+ * We take a ref on the mount so that we know the mount will still be there
+ * when we process the nfs_mount_poke_queue. An unmount request will block
+ * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
+ * the reference after calling nfs_sock_poke below;
+ */
+ nmp->nm_ref++;
TAILQ_INSERT_TAIL(&nfs_mount_poke_queue, nmp, nm_pokeq);
}
lck_mtx_unlock(&nmp->nm_lock);
while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
nfs_sock_poke(nmp);
+ nfs_mount_rele(nmp);
}
nfs_interval_timer_start(nfs_request_timer_call, NFS_REQUESTDELAY);
struct sockaddr *saddr = (struct sockaddr*)&ss;
struct nfsm_chain nmreq, nmrep;
mbuf_t mreq;
- int error = 0, ip, pmprog, pmvers, pmproc, ualen = 0;
+ int error = 0, ip, pmprog, pmvers, pmproc;
+ uint32_t ualen = 0;
uint32_t port;
uint64_t xid = 0;
char uaddr[MAX_IPv6_STR_LEN+16];
/* get uaddr string and convert to sockaddr */
nfsm_chain_get_32(error, &nmrep, ualen);
if (!error) {
- if (ualen > ((int)sizeof(uaddr)-1))
+ if (ualen > (sizeof(uaddr)-1))
error = EIO;
if (ualen < 1) {
/* program is not available, just return a zero port */