/*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/kernel.h>
#include <sys/ubc_internal.h>
#include <sys/uio_internal.h>
+#include <sys/kpi_mbuf.h>
#include <sys/vm.h>
#include <sys/vmparam.h>
#include <sys/buf_internal.h>
#include <libkern/OSAtomic.h>
+#define NFS_BIO_DBG(...) NFS_DBG(NFS_FAC_BIO, 7, ## __VA_ARGS__)
+
kern_return_t thread_terminate(thread_t); /* XXX */
#define NFSBUFHASH(np, lbn) \
struct nfsbuf *bp;
int error = 0;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
lck_mtx_lock(nfs_buf_mutex);
if (bufsize > NFS_MAXBSIZE)
panic("nfs_buf_get: buffer larger than NFS_MAXBSIZE requested");
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
FSDBG_BOT(541, np, blkno, 0, ENXIO);
return (ENXIO);
}
loop:
lck_mtx_lock(nfs_buf_mutex);
+ /* wait for any buffer invalidation/flushing to complete */
+ while (np->n_bflag & NBINVALINPROG) {
+ np->n_bflag |= NBINVALWANT;
+ ts.tv_sec = 2;
+ ts.tv_nsec = 0;
+ msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_buf_get_invalwait", &ts);
+ if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) {
+ lck_mtx_unlock(nfs_buf_mutex);
+ FSDBG_BOT(541, np, blkno, 0, error);
+ return (error);
+ }
+ if (np->n_bflag & NBINVALINPROG)
+ slpflag = 0;
+ }
+
/* check for existence of nfsbuf in cache */
if ((bp = nfs_buf_incore(np, blkno))) {
/* if busy, set wanted and wait */
if (start < NBOFF(bp))
start = NBOFF(bp);
if (end > start) {
- if (!(rv = ubc_sync_range(vp, start, end, UBC_INVALIDATE)))
- printf("nfs_buf_release(): ubc_sync_range failed!\n");
+ if ((rv = ubc_msync(vp, start, end, NULL, UBC_INVALIDATE)))
+ printf("nfs_buf_release(): ubc_msync failed!, error %d\n", rv);
}
}
CLR(bp->nb_flags, NB_PAGELIST);
return;
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return;
if (!ISSET(bp->nb_flags, NB_STALEWVERF) && (bp->nb_verf == nmp->nm_verf))
return;
NFS_BUF_MAP(bp);
- OSAddAtomic(1, &nfsstats.read_bios);
+ OSAddAtomic64(1, &nfsstats.read_bios);
error = nfs_buf_read_rpc(bp, thd, cred);
/*
bp->nb_valid = (1 << (round_page_32(bp->nb_validend) / PAGE_SIZE)) - 1;
if (bp->nb_validend & PAGE_MASK) {
/* zero-fill remainder of last page */
- bzero(bp->nb_data + bp->nb_validend, bp->nb_bufsize - bp->nb_validend);
+ bzero(bp->nb_data + bp->nb_validend, PAGE_SIZE - (bp->nb_validend & PAGE_MASK));
}
}
nfs_buf_iodone(bp);
struct nfsreq_cbinfo cb;
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
bp->nb_error = error = ENXIO;
SET(bp->nb_flags, NB_ERROR);
nfs_buf_iodone(bp);
kauth_cred_ref(cred);
cb = req->r_callback;
bp = cb.rcb_bp;
+ if (cb.rcb_func) /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */
+ nfs_request_ref(req, 0);
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
SET(bp->nb_flags, NB_ERROR);
bp->nb_error = error = ENXIO;
}
error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req, auio, &rlen, &eof);
if ((error == EINPROGRESS) && cb.rcb_func) {
/* async request restarted */
+ if (cb.rcb_func)
+ nfs_request_rele(req);
if (IS_VALID_CRED(cred))
kauth_cred_unref(&cred);
return;
}
if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) {
lck_mtx_lock(&nmp->nm_lock);
- if ((error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid) && !(nmp->nm_state & NFSSTA_RECOVER)) {
- printf("nfs_buf_read_rpc_finish: error %d, initiating recovery\n", error);
- nmp->nm_state |= NFSSTA_RECOVER;
- nfs_mount_sock_thread_wake(nmp);
+ if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid)) {
+ NP(np, "nfs_buf_read_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery",
+ error, NBOFF(bp)+offset, cb.rcb_args[2], nmp->nm_stategenid);
+ nfs_need_recover(nmp, error);
}
lck_mtx_unlock(&nmp->nm_lock);
- if (error == NFSERR_GRACE)
- tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
- if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
- rlen = 0;
- goto readagain;
+ if (np->n_flag & NREVOKE) {
+ error = EIO;
+ } else {
+ if (error == NFSERR_GRACE) {
+ if (cb.rcb_func) {
+ /*
+ * For an async I/O request, handle a grace delay just like
+ * jukebox errors. Set the resend time and queue it up.
+ */
+ struct timeval now;
+ if (req->r_nmrep.nmc_mhead) {
+ mbuf_freem(req->r_nmrep.nmc_mhead);
+ req->r_nmrep.nmc_mhead = NULL;
+ }
+ req->r_error = 0;
+ microuptime(&now);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_resendtime = now.tv_sec + 2;
+ req->r_xid = 0; // get a new XID
+ req->r_flags |= R_RESTART;
+ req->r_start = 0;
+ nfs_asyncio_resend(req);
+ lck_mtx_unlock(&req->r_mtx);
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ /* Note: nfsreq reference taken will be dropped later when finished */
+ return;
+ }
+ /* otherwise, just pause a couple seconds and retry */
+ tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
+ }
+ if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
+ rlen = 0;
+ goto readagain;
+ }
}
}
if (error) {
rreq = NULL;
goto finish;
}
+ nfs_request_rele(req);
/*
* We're done here.
* Outstanding RPC count is unchanged.
}
out:
+ if (cb.rcb_func)
+ nfs_request_rele(req);
if (IS_VALID_CRED(cred))
kauth_cred_unref(&cred);
{
struct nfsmount *nmp = NFSTONMP(np);
struct nfsbuf *bp;
- int error = 0, nra;
+ int error = 0;
+ uint32_t nra;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
if (nmp->nm_readahead <= 0)
return (0);
{
vnode_t vp = NFSTOV(np);
struct nfsbuf *bp = NULL;
- struct nfs_vattr nvattr;
struct nfsmount *nmp = VTONMP(vp);
daddr64_t lbn, rabn = 0, lastrabn, maxrabn = -1;
off_t diff;
modified = (np->n_flag & NMODIFIED);
nfs_node_unlock(np);
/* nfs_getattr() will check changed and purge caches */
- error = nfs_getattr(np, &nvattr, ctx, modified ? NGA_UNCACHED : NGA_CACHED);
+ error = nfs_getattr(np, NULL, ctx, modified ? NGA_UNCACHED : NGA_CACHED);
if (error) {
FSDBG_BOT(514, np, 0xd1e0004, 0, error);
return (error);
}
/* count any biocache reads that we just copied directly */
if (lbn != (uio_offset(uio)/biosize)) {
- OSAddAtomic((uio_offset(uio)/biosize) - lbn, &nfsstats.biocache_reads);
+ OSAddAtomic64((uio_offset(uio)/biosize) - lbn, &nfsstats.biocache_reads);
FSDBG(514, np, 0xcacefeed, uio_offset(uio), error);
}
}
np->n_lastread = (uio_offset(uio) - 1) / biosize;
nfs_node_unlock(np);
+ if ((uio_resid(uio) <= 0) || (uio_offset(uio) >= (off_t)np->n_size)) {
+ nfs_data_unlock(np);
+ FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), 0xaaaaaaaa);
+ return (0);
+ }
+
/* adjust readahead block number, if necessary */
if (rabn < lbn)
rabn = lbn;
readaheads = 1;
}
- if ((uio_resid(uio) <= 0) || (uio_offset(uio) >= (off_t)np->n_size)) {
- nfs_data_unlock(np);
- FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), 0xaaaaaaaa);
- return (0);
- }
-
- OSAddAtomic(1, &nfsstats.biocache_reads);
+ OSAddAtomic64(1, &nfsstats.biocache_reads);
/*
* If the block is in the cache and has the required data
int
nfs_async_write_start(struct nfsmount *nmp)
{
- int error = 0, slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
+ int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
struct timespec ts = {1, 0};
if (nfs_max_async_writes <= 0)
thd = async ? NULL : current_thread();
/* We need to make sure the pages are locked before doing I/O. */
- if (!ISSET(bp->nb_flags, NB_META) && UBCINFOEXISTS(NFSTOV(np))) {
- if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
- error = nfs_buf_upl_setup(bp);
- if (error) {
- printf("nfs_buf_write: upl create failed %d\n", error);
- SET(bp->nb_flags, NB_ERROR);
- bp->nb_error = error = EIO;
- nfs_buf_iodone(bp);
- goto out;
+ if (!ISSET(bp->nb_flags, NB_META)) {
+ if (UBCINFOEXISTS(NFSTOV(np))) {
+ if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
+ error = nfs_buf_upl_setup(bp);
+ if (error) {
+ printf("nfs_buf_write: upl create failed %d\n", error);
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error = EIO;
+ nfs_buf_iodone(bp);
+ goto out;
+ }
+ nfs_buf_upl_check(bp);
}
- nfs_buf_upl_check(bp);
+ } else {
+ /* We should never be in nfs_buf_write() with no UBCINFO. */
+ printf("nfs_buf_write: ubcinfo already gone\n");
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error = EIO;
+ nfs_buf_iodone(bp);
+ goto out;
}
}
nfs_buf_check_write_verifier(np, bp);
if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
struct nfsmount *nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
SET(bp->nb_flags, NB_ERROR);
bp->nb_error = error = EIO;
nfs_buf_iodone(bp);
}
SET(bp->nb_flags, NB_WRITEINPROG);
error = nmp->nm_funcs->nf_commit_rpc(np, NBOFF(bp) + bp->nb_dirtyoff,
- bp->nb_dirtyend - bp->nb_dirtyoff, bp->nb_wcred);
+ bp->nb_dirtyend - bp->nb_dirtyoff, bp->nb_wcred, bp->nb_verf);
CLR(bp->nb_flags, NB_WRITEINPROG);
if (error) {
if (error != NFSERR_STALEWRITEVERF) {
bp->nb_offio = doff;
bp->nb_endio = dend;
- OSAddAtomic(1, &nfsstats.write_bios);
+ OSAddAtomic64(1, &nfsstats.write_bios);
SET(bp->nb_flags, NB_WRITEINPROG);
error = nfs_buf_write_rpc(bp, iomode, thd, cred);
return (0);
/* there are pages marked dirty that need to be written out */
- OSAddAtomic(1, &nfsstats.write_bios);
+ OSAddAtomic64(1, &nfsstats.write_bios);
NFS_BUF_MAP(bp);
SET(bp->nb_flags, NB_WRITEINPROG);
npages = bp->nb_bufsize / PAGE_SIZE;
CLR(bp->nb_flags, NB_WRITEINPROG);
if (!error && (commit != NFS_WRITE_FILESYNC)) {
- error = nmp->nm_funcs->nf_commit_rpc(np, NBOFF(bp), bp->nb_bufsize, cred);
+ error = nmp->nm_funcs->nf_commit_rpc(np, NBOFF(bp), bp->nb_bufsize, cred, wverf);
if (error == NFSERR_STALEWRITEVERF) {
/* verifier changed, so we need to restart all the writes */
iomode = NFS_WRITE_FILESYNC;
char uio_buf [ UIO_SIZEOF(1) ];
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
bp->nb_error = error = ENXIO;
SET(bp->nb_flags, NB_ERROR);
nfs_buf_iodone(bp);
} else {
nfs_buf_write_finish(bp, thd, cred);
}
+ /* It may have just been an interrupt... that's OK */
+ if (!ISSET(bp->nb_flags, NB_ERROR))
+ error = 0;
}
return (error);
kauth_cred_ref(cred);
cb = req->r_callback;
bp = cb.rcb_bp;
+ if (cb.rcb_func) /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */
+ nfs_request_ref(req, 0);
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
SET(bp->nb_flags, NB_ERROR);
bp->nb_error = error = ENXIO;
}
error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req, &committed, &rlen, &wverf);
if ((error == EINPROGRESS) && cb.rcb_func) {
/* async request restarted */
+ if (cb.rcb_func)
+ nfs_request_rele(req);
if (IS_VALID_CRED(cred))
kauth_cred_unref(&cred);
return;
}
if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) {
lck_mtx_lock(&nmp->nm_lock);
- if ((error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid) && !(nmp->nm_state & NFSSTA_RECOVER)) {
- printf("nfs_buf_write_rpc_finish: error %d, initiating recovery\n", error);
- nmp->nm_state |= NFSSTA_RECOVER;
- nfs_mount_sock_thread_wake(nmp);
+ if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid)) {
+ NP(np, "nfs_buf_write_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery",
+ error, NBOFF(bp)+offset, cb.rcb_args[2], nmp->nm_stategenid);
+ nfs_need_recover(nmp, error);
}
lck_mtx_unlock(&nmp->nm_lock);
- if (error == NFSERR_GRACE)
- tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
- if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
- rlen = 0;
- goto writeagain;
+ if (np->n_flag & NREVOKE) {
+ error = EIO;
+ } else {
+ if (error == NFSERR_GRACE) {
+ if (cb.rcb_func) {
+ /*
+ * For an async I/O request, handle a grace delay just like
+ * jukebox errors. Set the resend time and queue it up.
+ */
+ struct timeval now;
+ if (req->r_nmrep.nmc_mhead) {
+ mbuf_freem(req->r_nmrep.nmc_mhead);
+ req->r_nmrep.nmc_mhead = NULL;
+ }
+ req->r_error = 0;
+ microuptime(&now);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_resendtime = now.tv_sec + 2;
+ req->r_xid = 0; // get a new XID
+ req->r_flags |= R_RESTART;
+ req->r_start = 0;
+ nfs_asyncio_resend(req);
+ lck_mtx_unlock(&req->r_mtx);
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ /* Note: nfsreq reference taken will be dropped later when finished */
+ return;
+ }
+ /* otherwise, just pause a couple seconds and retry */
+ tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
+ }
+ if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
+ rlen = 0;
+ goto writeagain;
+ }
}
}
if (error) {
wreq = NULL;
goto finish;
}
+ nfs_request_rele(req);
/*
* We're done here.
* Outstanding RPC count is unchanged.
}
out:
- if (cb.rcb_func)
+ if (cb.rcb_func) {
nfs_async_write_done(nmp);
+ nfs_request_rele(req);
+ }
/*
* Decrement outstanding RPC count on buffer
* and call nfs_buf_write_finish on last RPC.
struct nfsbuflists blist, commitlist;
int error = 0, retv, wcred_set, flags, dirty;
u_quad_t off, endoff, toff;
+ uint64_t wverf;
u_int32_t count;
kauth_cred_t wcred = NULL;
LIST_INIT(&commitlist);
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
error = ENXIO;
goto done;
}
if (nowait)
flags |= NBI_NOWAIT;
lck_mtx_lock(nfs_buf_mutex);
+ wverf = nmp->nm_verf;
if (!nfs_buf_iterprepare(np, &blist, flags)) {
while ((bp = LIST_FIRST(&blist))) {
LIST_REMOVE(bp, nb_vnbufs);
continue;
if (ISSET(bp->nb_flags, NB_NEEDCOMMIT))
nfs_buf_check_write_verifier(np, bp);
- if (((bp->nb_flags & (NB_DELWRI | NB_NEEDCOMMIT))
- != (NB_DELWRI | NB_NEEDCOMMIT))) {
+ if (((bp->nb_flags & (NB_DELWRI | NB_NEEDCOMMIT)) != (NB_DELWRI | NB_NEEDCOMMIT)) ||
+ (bp->nb_verf != wverf)) {
nfs_buf_drop(bp);
continue;
}
count = 0;
else
count = (endoff - off);
- retv = nmp->nm_funcs->nf_commit_rpc(np, off, count, wcred);
+ retv = nmp->nm_funcs->nf_commit_rpc(np, off, count, wcred, wverf);
} else {
retv = 0;
LIST_FOREACH(bp, &commitlist, nb_vnbufs) {
toff = NBOFF(bp) + bp->nb_dirtyoff;
count = bp->nb_dirtyend - bp->nb_dirtyoff;
- retv = nmp->nm_funcs->nf_commit_rpc(np, toff, count, bp->nb_wcred);
+ retv = nmp->nm_funcs->nf_commit_rpc(np, toff, count, bp->nb_wcred, wverf);
if (retv)
break;
}
FSDBG_TOP(517, np, waitfor, ignore_writeerr, 0);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
error = ENXIO;
goto out;
}
nfsvers = nmp->nm_vers;
- if (nmp->nm_flag & NFSMNT_INT)
+ if (NMFLAG(nmp, INTR))
slpflag = PCATCH;
if (!LIST_EMPTY(&np->n_dirtyblkhd)) {
lck_mtx_lock(nfs_buf_mutex);
while (np->n_bflag & NBFLUSHINPROG) {
np->n_bflag |= NBFLUSHWANT;
- msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_flush", NULL);
- if ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0))) {
+ error = msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_flush", NULL);
+ if ((error && (error != EWOULDBLOCK)) ||
+ ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0)))) {
lck_mtx_unlock(nfs_buf_mutex);
goto out;
}
if (error) {
FSDBG(554, bp, 0xd00dee, 0xbad, error);
nfs_node_lock_force(np);
- np->n_error = error;
- np->n_flag |= NWRITEERR;
+ if ((error != EINTR) && (error != ERESTART)) {
+ np->n_error = error;
+ np->n_flag |= NWRITEERR;
+ }
/*
* There was a write error and we need to
* invalidate attrs to sync with server.
*/
NATTRINVALIDATE(np);
nfs_node_unlock(np);
- if (error == EINTR) {
+ if ((error == EINTR) || (error == ERESTART)) {
/*
* Abort on EINTR. If we don't, we could
* be stuck in this loop forever because
{
nfsnode_t np = VTONFS(vp);
struct nfsmount *nmp = VTONMP(vp);
- int error, rv, slpflag, slptimeo, nflags;
+ int error, slpflag, slptimeo, nflags, retry = 0;
+ int ubcflags = UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE;
+ struct timespec ts = { 2, 0 };
off_t size;
FSDBG_TOP(554, np, flags, intrflg, 0);
- if (nmp && !(nmp->nm_flag & NFSMNT_INT))
+ /*
+ * If the mount is gone no sense to try and write anything.
+ * and hang trying to do IO.
+ */
+ if (nfs_mount_gone(nmp)) {
+ flags &= ~V_SAVE;
+ ubcflags &= ~UBC_PUSHALL;
+ }
+
+ if (nmp && !NMFLAG(nmp, INTR))
intrflg = 0;
if (intrflg) {
slpflag = PCATCH;
lck_mtx_lock(nfs_buf_mutex);
while (np->n_bflag & NBINVALINPROG) {
np->n_bflag |= NBINVALWANT;
- msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_vinvalbuf", NULL);
+ msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_vinvalbuf", &ts);
if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) {
lck_mtx_unlock(nfs_buf_mutex);
return (error);
}
+ if (np->n_bflag & NBINVALINPROG)
+ slpflag = 0;
}
np->n_bflag |= NBINVALINPROG;
lck_mtx_unlock(nfs_buf_mutex);
/* Now, flush as required. */
+again:
error = nfs_vinvalbuf_internal(np, flags, thd, cred, slpflag, 0);
while (error) {
FSDBG(554, np, 0, 0, error);
/* get the pages out of vm also */
if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp)))
- if (!(rv = ubc_sync_range(vp, 0, size, UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE)))
- panic("nfs_vinvalbuf(): ubc_sync_range failed!");
+ if ((error = ubc_msync(vp, 0, size, NULL, ubcflags))) {
+ if (error == EINVAL)
+ panic("nfs_vinvalbuf(): ubc_msync failed!, error %d", error);
+ if (retry++ < 10) { /* retry invalidating a few times */
+ if (retry > 1 || error == ENXIO)
+ ubcflags &= ~UBC_PUSHALL;
+ goto again;
+ }
+ /* give up */
+ printf("nfs_vinvalbuf(): ubc_msync failed!, error %d\n", error);
+ }
done:
lck_mtx_lock(nfs_buf_mutex);
nflags = np->n_bflag;
return (error);
}
+/*
+ * Wait for any busy buffers to complete.
+ */
+void
+nfs_wait_bufs(nfsnode_t np)
+{
+ struct nfsbuf *bp;
+ struct nfsbuflists blist;
+ int error = 0;
+
+ lck_mtx_lock(nfs_buf_mutex);
+ if (!nfs_buf_iterprepare(np, &blist, NBI_CLEAN)) {
+ while ((bp = LIST_FIRST(&blist))) {
+ LIST_REMOVE(bp, nb_vnbufs);
+ LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs);
+ nfs_buf_refget(bp);
+ while ((error = nfs_buf_acquire(bp, 0, 0, 0))) {
+ if (error != EAGAIN) {
+ nfs_buf_refrele(bp);
+ nfs_buf_itercomplete(np, &blist, NBI_CLEAN);
+ lck_mtx_unlock(nfs_buf_mutex);
+ return;
+ }
+ }
+ nfs_buf_refrele(bp);
+ nfs_buf_drop(bp);
+ }
+ nfs_buf_itercomplete(np, &blist, NBI_CLEAN);
+ }
+ if (!nfs_buf_iterprepare(np, &blist, NBI_DIRTY)) {
+ while ((bp = LIST_FIRST(&blist))) {
+ LIST_REMOVE(bp, nb_vnbufs);
+ LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs);
+ nfs_buf_refget(bp);
+ while ((error = nfs_buf_acquire(bp, 0, 0, 0))) {
+ if (error != EAGAIN) {
+ nfs_buf_refrele(bp);
+ nfs_buf_itercomplete(np, &blist, NBI_DIRTY);
+ lck_mtx_unlock(nfs_buf_mutex);
+ return;
+ }
+ }
+ nfs_buf_refrele(bp);
+ nfs_buf_drop(bp);
+ }
+ nfs_buf_itercomplete(np, &blist, NBI_DIRTY);
+ }
+ lck_mtx_unlock(nfs_buf_mutex);
+}
+
+
/*
* Add an async I/O request to the mount's async I/O queue and make
* sure that an nfsiod will service it.
FSDBG_TOP(552, nmp, 0, 0, 0);
again:
- if (((nmp = req->r_nmp)) == NULL)
+ nmp = req->r_nmp;
+
+ if (nmp == NULL)
return;
+
lck_mtx_lock(nfsiod_mutex);
niod = nmp->nm_niod;
}
}
+ /*
+ * If we got here while being on the resendq we need to get off. This
+ * happens when the timer fires and errors out requests from nfs_sigintr
+ * or we receive a reply (UDP case) while being on the resend queue so
+ * we're just finishing up and are not going to be resent.
+ */
+ lck_mtx_lock(&req->r_mtx);
+ if (req->r_flags & R_RESENDQ) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if (req->r_rchain.tqe_next != NFSREQNOLIST) {
+ NFS_BIO_DBG("Proccessing async request on resendq. Removing");
+ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ req->r_rchain.tqe_next = NFSREQNOLIST;
+ assert(req->r_refs > 1);
+ /* Remove resendq reference */
+ req->r_refs--;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ req->r_flags &= ~R_RESENDQ;
+ }
+ lck_mtx_unlock(&req->r_mtx);
+
if (req->r_achain.tqe_next == NFSREQNOLIST)
TAILQ_INSERT_TAIL(&nmp->nm_iodq, req, r_achain);
lck_mtx_unlock(nfsiod_mutex);
wakeup(niod);
} else if (nfsiod_thread_count > 0) {
- /* just queue it up on nfsiod mounts queue */
- TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink);
+ /* just queue it up on nfsiod mounts queue if needed */
+ if (nmp->nm_iodlink.tqe_next == NFSNOLIST)
+ TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink);
lck_mtx_unlock(nfsiod_mutex);
} else {
printf("nfs_asyncio(): no nfsiods? %d %d (%d)\n", nfsiod_thread_count, NFSIOD_MAX, started);
{
struct nfsmount *nmp = req->r_nmp;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return;
+
nfs_gss_clnt_rpcdone(req);
lck_mtx_lock(&nmp->nm_lock);
if (!(req->r_flags & R_RESENDQ)) {
TAILQ_INSERT_TAIL(&nmp->nm_resendq, req, r_rchain);
req->r_flags |= R_RESENDQ;
+ /*
+ * We take a reference on this request so that it can't be
+ * destroyed while a resend is queued or in progress.
+ */
+ nfs_request_ref(req, 1);
}
nfs_mount_sock_thread_wake(nmp);
lck_mtx_unlock(&nmp->nm_lock);
struct nfsmount *nmp = NFSTONMP(np);
int error = 0;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
if (nmp->nm_vers < NFS_VER4)