/*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
struct nfsbuf *bp;
int error = 0;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
lck_mtx_lock(nfs_buf_mutex);
if (bufsize > NFS_MAXBSIZE)
panic("nfs_buf_get: buffer larger than NFS_MAXBSIZE requested");
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
FSDBG_BOT(541, np, blkno, 0, ENXIO);
return (ENXIO);
}
return;
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return;
if (!ISSET(bp->nb_flags, NB_STALEWVERF) && (bp->nb_verf == nmp->nm_verf))
return;
NFS_BUF_MAP(bp);
- OSAddAtomic(1, &nfsstats.read_bios);
+ OSAddAtomic64(1, &nfsstats.read_bios);
error = nfs_buf_read_rpc(bp, thd, cred);
/*
struct nfsreq_cbinfo cb;
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
bp->nb_error = error = ENXIO;
SET(bp->nb_flags, NB_ERROR);
nfs_buf_iodone(bp);
nfs_request_ref(req, 0);
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
SET(bp->nb_flags, NB_ERROR);
bp->nb_error = error = ENXIO;
}
int error = 0;
uint32_t nra;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
if (nmp->nm_readahead <= 0)
return (0);
}
/* count any biocache reads that we just copied directly */
if (lbn != (uio_offset(uio)/biosize)) {
- OSAddAtomic((uio_offset(uio)/biosize) - lbn, &nfsstats.biocache_reads);
+ OSAddAtomic64((uio_offset(uio)/biosize) - lbn, &nfsstats.biocache_reads);
FSDBG(514, np, 0xcacefeed, uio_offset(uio), error);
}
}
readaheads = 1;
}
- OSAddAtomic(1, &nfsstats.biocache_reads);
+ OSAddAtomic64(1, &nfsstats.biocache_reads);
/*
* If the block is in the cache and has the required data
thd = async ? NULL : current_thread();
/* We need to make sure the pages are locked before doing I/O. */
- if (!ISSET(bp->nb_flags, NB_META) && UBCINFOEXISTS(NFSTOV(np))) {
- if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
- error = nfs_buf_upl_setup(bp);
- if (error) {
- printf("nfs_buf_write: upl create failed %d\n", error);
- SET(bp->nb_flags, NB_ERROR);
- bp->nb_error = error = EIO;
- nfs_buf_iodone(bp);
- goto out;
+ if (!ISSET(bp->nb_flags, NB_META)) {
+ if (UBCINFOEXISTS(NFSTOV(np))) {
+ if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
+ error = nfs_buf_upl_setup(bp);
+ if (error) {
+ printf("nfs_buf_write: upl create failed %d\n", error);
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error = EIO;
+ nfs_buf_iodone(bp);
+ goto out;
+ }
+ nfs_buf_upl_check(bp);
}
- nfs_buf_upl_check(bp);
+ } else {
+ /* We should never be in nfs_buf_write() with no UBCINFO. */
+ printf("nfs_buf_write: ubcinfo already gone\n");
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error = EIO;
+ nfs_buf_iodone(bp);
+ goto out;
}
}
nfs_buf_check_write_verifier(np, bp);
if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
struct nfsmount *nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
SET(bp->nb_flags, NB_ERROR);
bp->nb_error = error = EIO;
nfs_buf_iodone(bp);
bp->nb_offio = doff;
bp->nb_endio = dend;
- OSAddAtomic(1, &nfsstats.write_bios);
+ OSAddAtomic64(1, &nfsstats.write_bios);
SET(bp->nb_flags, NB_WRITEINPROG);
error = nfs_buf_write_rpc(bp, iomode, thd, cred);
return (0);
/* there are pages marked dirty that need to be written out */
- OSAddAtomic(1, &nfsstats.write_bios);
+ OSAddAtomic64(1, &nfsstats.write_bios);
NFS_BUF_MAP(bp);
SET(bp->nb_flags, NB_WRITEINPROG);
npages = bp->nb_bufsize / PAGE_SIZE;
char uio_buf [ UIO_SIZEOF(1) ];
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
bp->nb_error = error = ENXIO;
SET(bp->nb_flags, NB_ERROR);
nfs_buf_iodone(bp);
nfs_request_ref(req, 0);
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
SET(bp->nb_flags, NB_ERROR);
bp->nb_error = error = ENXIO;
}
LIST_INIT(&commitlist);
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
error = ENXIO;
goto done;
}
FSDBG_TOP(517, np, waitfor, ignore_writeerr, 0);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
error = ENXIO;
goto out;
}
nfsnode_t np = VTONFS(vp);
struct nfsmount *nmp = VTONMP(vp);
int error, slpflag, slptimeo, nflags, retry = 0;
+ int ubcflags = UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE;
struct timespec ts = { 2, 0 };
off_t size;
FSDBG_TOP(554, np, flags, intrflg, 0);
+ /*
+ * If the mount is gone no sense to try and write anything.
+ * and hang trying to do IO.
+ */
+ if (nfs_mount_gone(nmp)) {
+ flags &= ~V_SAVE;
+ ubcflags &= ~UBC_PUSHALL;
+ }
+
if (nmp && !NMFLAG(nmp, INTR))
intrflg = 0;
if (intrflg) {
/* get the pages out of vm also */
if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp)))
- if ((error = ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE))) {
+ if ((error = ubc_msync(vp, 0, size, NULL, ubcflags))) {
if (error == EINVAL)
panic("nfs_vinvalbuf(): ubc_msync failed!, error %d", error);
- if (retry++ < 10) /* retry invalidating a few times */
+ if (retry++ < 10) { /* retry invalidating a few times */
+ if (retry > 1 || error == ENXIO)
+ ubcflags &= ~UBC_PUSHALL;
goto again;
+ }
/* give up */
- printf("nfs_vinvalbuf(): ubc_msync failed!, error %d", error);
-
+ printf("nfs_vinvalbuf(): ubc_msync failed!, error %d\n", error);
}
done:
lck_mtx_lock(nfs_buf_mutex);
FSDBG_TOP(552, nmp, 0, 0, 0);
again:
- if (((nmp = req->r_nmp)) == NULL)
+ nmp = req->r_nmp;
+
+ if (nmp == NULL)
return;
+
lck_mtx_lock(nfsiod_mutex);
niod = nmp->nm_niod;
lck_mtx_unlock(nfsiod_mutex);
wakeup(niod);
} else if (nfsiod_thread_count > 0) {
- /* just queue it up on nfsiod mounts queue */
- TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink);
+ /* just queue it up on nfsiod mounts queue if needed */
+ if (nmp->nm_iodlink.tqe_next == NFSNOLIST)
+ TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink);
lck_mtx_unlock(nfsiod_mutex);
} else {
printf("nfs_asyncio(): no nfsiods? %d %d (%d)\n", nfsiod_thread_count, NFSIOD_MAX, started);
{
struct nfsmount *nmp = req->r_nmp;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return;
nfs_gss_clnt_rpcdone(req);
lck_mtx_lock(&nmp->nm_lock);
struct nfsmount *nmp = NFSTONMP(np);
int error = 0;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
if (nmp->nm_vers < NFS_VER4)