/*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/buf_internal.h>
#include <libkern/OSAtomic.h>
+#define NFS_BIO_DBG(...) NFS_DBG(NFS_FAC_BIO, 7, ## __VA_ARGS__)
+
kern_return_t thread_terminate(thread_t); /* XXX */
#define NFSBUFHASH(np, lbn) \
struct nfsbuf *bp;
int error = 0;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
lck_mtx_lock(nfs_buf_mutex);
*/
upl_flags |= UPL_WILL_MODIFY;
}
- kret = ubc_create_upl(NFSTOV(bp->nb_np), NBOFF(bp), bp->nb_bufsize,
- &upl, NULL, upl_flags);
+ kret = ubc_create_upl_kernel(NFSTOV(bp->nb_np), NBOFF(bp), bp->nb_bufsize,
+ &upl, NULL, upl_flags, VM_KERN_MEMORY_FILE);
if (kret == KERN_INVALID_ARGUMENT) {
/* vm object probably doesn't exist any more */
bp->nb_pagelist = NULL;
if (bufsize > NFS_MAXBSIZE)
panic("nfs_buf_get: buffer larger than NFS_MAXBSIZE requested");
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
FSDBG_BOT(541, np, blkno, 0, ENXIO);
return (ENXIO);
}
return;
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return;
if (!ISSET(bp->nb_flags, NB_STALEWVERF) && (bp->nb_verf == nmp->nm_verf))
return;
struct nfsreq_cbinfo cb;
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
bp->nb_error = error = ENXIO;
SET(bp->nb_flags, NB_ERROR);
nfs_buf_iodone(bp);
nfs_request_ref(req, 0);
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
SET(bp->nb_flags, NB_ERROR);
bp->nb_error = error = ENXIO;
}
int error = 0;
uint32_t nra;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
if (nmp->nm_readahead <= 0)
return (0);
thd = async ? NULL : current_thread();
/* We need to make sure the pages are locked before doing I/O. */
- if (!ISSET(bp->nb_flags, NB_META) && UBCINFOEXISTS(NFSTOV(np))) {
- if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
- error = nfs_buf_upl_setup(bp);
- if (error) {
- printf("nfs_buf_write: upl create failed %d\n", error);
- SET(bp->nb_flags, NB_ERROR);
- bp->nb_error = error = EIO;
- nfs_buf_iodone(bp);
- goto out;
+ if (!ISSET(bp->nb_flags, NB_META)) {
+ if (UBCINFOEXISTS(NFSTOV(np))) {
+ if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
+ error = nfs_buf_upl_setup(bp);
+ if (error) {
+ printf("nfs_buf_write: upl create failed %d\n", error);
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error = EIO;
+ nfs_buf_iodone(bp);
+ goto out;
+ }
+ nfs_buf_upl_check(bp);
}
- nfs_buf_upl_check(bp);
+ } else {
+ /* We should never be in nfs_buf_write() with no UBCINFO. */
+ printf("nfs_buf_write: ubcinfo already gone\n");
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error = EIO;
+ nfs_buf_iodone(bp);
+ goto out;
}
}
nfs_buf_check_write_verifier(np, bp);
if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
struct nfsmount *nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
SET(bp->nb_flags, NB_ERROR);
bp->nb_error = error = EIO;
nfs_buf_iodone(bp);
char uio_buf [ UIO_SIZEOF(1) ];
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
bp->nb_error = error = ENXIO;
SET(bp->nb_flags, NB_ERROR);
nfs_buf_iodone(bp);
nfs_request_ref(req, 0);
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
SET(bp->nb_flags, NB_ERROR);
bp->nb_error = error = ENXIO;
}
LIST_INIT(&commitlist);
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
error = ENXIO;
goto done;
}
FSDBG_TOP(517, np, waitfor, ignore_writeerr, 0);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
error = ENXIO;
goto out;
}
nfsnode_t np = VTONFS(vp);
struct nfsmount *nmp = VTONMP(vp);
int error, slpflag, slptimeo, nflags, retry = 0;
+ int ubcflags = UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE;
struct timespec ts = { 2, 0 };
off_t size;
FSDBG_TOP(554, np, flags, intrflg, 0);
+ /*
+ * If the mount is gone no sense to try and write anything.
+ * and hang trying to do IO.
+ */
+ if (nfs_mount_gone(nmp)) {
+ flags &= ~V_SAVE;
+ ubcflags &= ~UBC_PUSHALL;
+ }
+
if (nmp && !NMFLAG(nmp, INTR))
intrflg = 0;
if (intrflg) {
/* get the pages out of vm also */
if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp)))
- if ((error = ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE))) {
+ if ((error = ubc_msync(vp, 0, size, NULL, ubcflags))) {
if (error == EINVAL)
panic("nfs_vinvalbuf(): ubc_msync failed!, error %d", error);
- if (retry++ < 10) /* retry invalidating a few times */
+ if (retry++ < 10) { /* retry invalidating a few times */
+ if (retry > 1 || error == ENXIO)
+ ubcflags &= ~UBC_PUSHALL;
goto again;
+ }
/* give up */
- printf("nfs_vinvalbuf(): ubc_msync failed!, error %d", error);
-
+ printf("nfs_vinvalbuf(): ubc_msync failed!, error %d\n", error);
}
done:
lck_mtx_lock(nfs_buf_mutex);
FSDBG_TOP(552, nmp, 0, 0, 0);
again:
- if (((nmp = req->r_nmp)) == NULL)
+ nmp = req->r_nmp;
+
+ if (nmp == NULL)
return;
+
lck_mtx_lock(nfsiod_mutex);
niod = nmp->nm_niod;
}
}
+ /*
+ * If we got here while being on the resendq we need to get off. This
+ * happens when the timer fires and errors out requests from nfs_sigintr
+ * or we receive a reply (UDP case) while being on the resend queue so
+ * we're just finishing up and are not going to be resent.
+ */
+ lck_mtx_lock(&req->r_mtx);
+ if (req->r_flags & R_RESENDQ) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if (req->r_rchain.tqe_next != NFSREQNOLIST) {
+ NFS_BIO_DBG("Proccessing async request on resendq. Removing");
+ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ req->r_rchain.tqe_next = NFSREQNOLIST;
+ assert(req->r_refs > 1);
+ /* Remove resendq reference */
+ req->r_refs--;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ req->r_flags &= ~R_RESENDQ;
+ }
+ lck_mtx_unlock(&req->r_mtx);
+
if (req->r_achain.tqe_next == NFSREQNOLIST)
TAILQ_INSERT_TAIL(&nmp->nm_iodq, req, r_achain);
lck_mtx_unlock(nfsiod_mutex);
wakeup(niod);
} else if (nfsiod_thread_count > 0) {
- /* just queue it up on nfsiod mounts queue */
- TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink);
+ /* just queue it up on nfsiod mounts queue if needed */
+ if (nmp->nm_iodlink.tqe_next == NFSNOLIST)
+ TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink);
lck_mtx_unlock(nfsiod_mutex);
} else {
printf("nfs_asyncio(): no nfsiods? %d %d (%d)\n", nfsiod_thread_count, NFSIOD_MAX, started);
{
struct nfsmount *nmp = req->r_nmp;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return;
+
nfs_gss_clnt_rpcdone(req);
lck_mtx_lock(&nmp->nm_lock);
if (!(req->r_flags & R_RESENDQ)) {
TAILQ_INSERT_TAIL(&nmp->nm_resendq, req, r_rchain);
req->r_flags |= R_RESENDQ;
+ /*
+ * We take a reference on this request so that it can't be
+ * destroyed while a resend is queued or in progress.
+ */
+ nfs_request_ref(req, 1);
}
nfs_mount_sock_thread_wake(nmp);
lck_mtx_unlock(&nmp->nm_lock);
struct nfsmount *nmp = NFSTONMP(np);
int error = 0;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
if (nmp->nm_vers < NFS_VER4)