X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6d2010ae8f7a6078e10b361c6962983bab233e0f..fe8ab488e9161c46dd9885d58fc52996dc0249ff:/bsd/nfs/nfs_bio.c diff --git a/bsd/nfs/nfs_bio.c b/bsd/nfs/nfs_bio.c index 4bd1bff61..a58e5d866 100644 --- a/bsd/nfs/nfs_bio.c +++ b/bsd/nfs/nfs_bio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -346,7 +346,7 @@ nfs_buf_page_inval(vnode_t vp, off_t offset) struct nfsbuf *bp; int error = 0; - if (!nmp) + if (nfs_mount_gone(nmp)) return (ENXIO); lck_mtx_lock(nfs_buf_mutex); @@ -658,7 +658,7 @@ nfs_buf_get( if (bufsize > NFS_MAXBSIZE) panic("nfs_buf_get: buffer larger than NFS_MAXBSIZE requested"); - if (!nmp) { + if (nfs_mount_gone(nmp)) { FSDBG_BOT(541, np, blkno, 0, ENXIO); return (ENXIO); } @@ -1291,7 +1291,7 @@ nfs_buf_check_write_verifier(nfsnode_t np, struct nfsbuf *bp) return; nmp = NFSTONMP(np); - if (!nmp) + if (nfs_mount_gone(nmp)) return; if (!ISSET(bp->nb_flags, NB_STALEWVERF) && (bp->nb_verf == nmp->nm_verf)) return; @@ -1475,7 +1475,7 @@ nfs_buf_read(struct nfsbuf *bp) NFS_BUF_MAP(bp); - OSAddAtomic(1, &nfsstats.read_bios); + OSAddAtomic64(1, &nfsstats.read_bios); error = nfs_buf_read_rpc(bp, thd, cred); /* @@ -1546,7 +1546,7 @@ nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) struct nfsreq_cbinfo cb; nmp = NFSTONMP(np); - if (!nmp) { + if (nfs_mount_gone(nmp)) { bp->nb_error = error = ENXIO; SET(bp->nb_flags, NB_ERROR); nfs_buf_iodone(bp); @@ -1669,7 +1669,7 @@ finish: nfs_request_ref(req, 0); nmp = NFSTONMP(np); - if (!nmp) { + if (nfs_mount_gone(nmp)) { SET(bp->nb_flags, NB_ERROR); bp->nb_error = error = ENXIO; } @@ -1842,7 +1842,7 @@ nfs_buf_readahead(nfsnode_t np, int ioflag, daddr64_t *rabnp, daddr64_t lastrabn int error = 0; uint32_t nra; - if (!nmp) + if (nfs_mount_gone(nmp)) return (ENXIO); if (nmp->nm_readahead <= 0) return (0); @@ -2028,7 +2028,7 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) } /* count any biocache reads that we just copied directly */ if (lbn != (uio_offset(uio)/biosize)) { - OSAddAtomic((uio_offset(uio)/biosize) - lbn, &nfsstats.biocache_reads); + OSAddAtomic64((uio_offset(uio)/biosize) - lbn, &nfsstats.biocache_reads); FSDBG(514, np, 0xcacefeed, uio_offset(uio), error); } } @@ -2059,7 +2059,7 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) readaheads = 1; } - OSAddAtomic(1, &nfsstats.biocache_reads); + OSAddAtomic64(1, &nfsstats.biocache_reads); /* * If the block is in the cache and has the required data @@ -2327,17 +2327,26 @@ nfs_buf_write(struct nfsbuf *bp) thd = async ? NULL : current_thread(); /* We need to make sure the pages are locked before doing I/O. */ - if (!ISSET(bp->nb_flags, NB_META) && UBCINFOEXISTS(NFSTOV(np))) { - if (!ISSET(bp->nb_flags, NB_PAGELIST)) { - error = nfs_buf_upl_setup(bp); - if (error) { - printf("nfs_buf_write: upl create failed %d\n", error); - SET(bp->nb_flags, NB_ERROR); - bp->nb_error = error = EIO; - nfs_buf_iodone(bp); - goto out; + if (!ISSET(bp->nb_flags, NB_META)) { + if (UBCINFOEXISTS(NFSTOV(np))) { + if (!ISSET(bp->nb_flags, NB_PAGELIST)) { + error = nfs_buf_upl_setup(bp); + if (error) { + printf("nfs_buf_write: upl create failed %d\n", error); + SET(bp->nb_flags, NB_ERROR); + bp->nb_error = error = EIO; + nfs_buf_iodone(bp); + goto out; + } + nfs_buf_upl_check(bp); } - nfs_buf_upl_check(bp); + } else { + /* We should never be in nfs_buf_write() with no UBCINFO. */ + printf("nfs_buf_write: ubcinfo already gone\n"); + SET(bp->nb_flags, NB_ERROR); + bp->nb_error = error = EIO; + nfs_buf_iodone(bp); + goto out; } } @@ -2346,7 +2355,7 @@ nfs_buf_write(struct nfsbuf *bp) nfs_buf_check_write_verifier(np, bp); if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { struct nfsmount *nmp = NFSTONMP(np); - if (!nmp) { + if (nfs_mount_gone(nmp)) { SET(bp->nb_flags, NB_ERROR); bp->nb_error = error = EIO; nfs_buf_iodone(bp); @@ -2425,7 +2434,7 @@ nfs_buf_write(struct nfsbuf *bp) bp->nb_offio = doff; bp->nb_endio = dend; - OSAddAtomic(1, &nfsstats.write_bios); + OSAddAtomic64(1, &nfsstats.write_bios); SET(bp->nb_flags, NB_WRITEINPROG); error = nfs_buf_write_rpc(bp, iomode, thd, cred); @@ -2613,7 +2622,7 @@ nfs_buf_write_dirty_pages(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) return (0); /* there are pages marked dirty that need to be written out */ - OSAddAtomic(1, &nfsstats.write_bios); + OSAddAtomic64(1, &nfsstats.write_bios); NFS_BUF_MAP(bp); SET(bp->nb_flags, NB_WRITEINPROG); npages = bp->nb_bufsize / PAGE_SIZE; @@ -2696,7 +2705,7 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred char uio_buf [ UIO_SIZEOF(1) ]; nmp = NFSTONMP(np); - if (!nmp) { + if (nfs_mount_gone(nmp)) { bp->nb_error = error = ENXIO; SET(bp->nb_flags, NB_ERROR); nfs_buf_iodone(bp); @@ -2825,7 +2834,7 @@ finish: nfs_request_ref(req, 0); nmp = NFSTONMP(np); - if (!nmp) { + if (nfs_mount_gone(nmp)) { SET(bp->nb_flags, NB_ERROR); bp->nb_error = error = ENXIO; } @@ -3037,7 +3046,7 @@ nfs_flushcommits(nfsnode_t np, int nowait) LIST_INIT(&commitlist); nmp = NFSTONMP(np); - if (!nmp) { + if (nfs_mount_gone(nmp)) { error = ENXIO; goto done; } @@ -3251,7 +3260,7 @@ nfs_flush(nfsnode_t np, int waitfor, thread_t thd, int ignore_writeerr) FSDBG_TOP(517, np, waitfor, ignore_writeerr, 0); - if (!nmp) { + if (nfs_mount_gone(nmp)) { error = ENXIO; goto out; } @@ -3620,11 +3629,21 @@ nfs_vinvalbuf2(vnode_t vp, int flags, thread_t thd, kauth_cred_t cred, int intrf nfsnode_t np = VTONFS(vp); struct nfsmount *nmp = VTONMP(vp); int error, slpflag, slptimeo, nflags, retry = 0; + int ubcflags = UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE; struct timespec ts = { 2, 0 }; off_t size; FSDBG_TOP(554, np, flags, intrflg, 0); + /* + * If the mount is gone no sense to try and write anything. + * and hang trying to do IO. + */ + if (nfs_mount_gone(nmp)) { + flags &= ~V_SAVE; + ubcflags &= ~UBC_PUSHALL; + } + if (nmp && !NMFLAG(nmp, INTR)) intrflg = 0; if (intrflg) { @@ -3662,14 +3681,16 @@ again: /* get the pages out of vm also */ if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) - if ((error = ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE))) { + if ((error = ubc_msync(vp, 0, size, NULL, ubcflags))) { if (error == EINVAL) panic("nfs_vinvalbuf(): ubc_msync failed!, error %d", error); - if (retry++ < 10) /* retry invalidating a few times */ + if (retry++ < 10) { /* retry invalidating a few times */ + if (retry > 1 || error == ENXIO) + ubcflags &= ~UBC_PUSHALL; goto again; + } /* give up */ - printf("nfs_vinvalbuf(): ubc_msync failed!, error %d", error); - + printf("nfs_vinvalbuf(): ubc_msync failed!, error %d\n", error); } done: lck_mtx_lock(nfs_buf_mutex); @@ -3747,8 +3768,11 @@ nfs_asyncio_finish(struct nfsreq *req) FSDBG_TOP(552, nmp, 0, 0, 0); again: - if (((nmp = req->r_nmp)) == NULL) + nmp = req->r_nmp; + + if (nmp == NULL) return; + lck_mtx_lock(nfsiod_mutex); niod = nmp->nm_niod; @@ -3783,8 +3807,9 @@ again: lck_mtx_unlock(nfsiod_mutex); wakeup(niod); } else if (nfsiod_thread_count > 0) { - /* just queue it up on nfsiod mounts queue */ - TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink); + /* just queue it up on nfsiod mounts queue if needed */ + if (nmp->nm_iodlink.tqe_next == NFSNOLIST) + TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink); lck_mtx_unlock(nfsiod_mutex); } else { printf("nfs_asyncio(): no nfsiods? %d %d (%d)\n", nfsiod_thread_count, NFSIOD_MAX, started); @@ -3808,7 +3833,7 @@ nfs_asyncio_resend(struct nfsreq *req) { struct nfsmount *nmp = req->r_nmp; - if (!nmp) + if (nfs_mount_gone(nmp)) return; nfs_gss_clnt_rpcdone(req); lck_mtx_lock(&nmp->nm_lock); @@ -3833,7 +3858,7 @@ nfs_buf_readdir(struct nfsbuf *bp, vfs_context_t ctx) struct nfsmount *nmp = NFSTONMP(np); int error = 0; - if (!nmp) + if (nfs_mount_gone(nmp)) return (ENXIO); if (nmp->nm_vers < NFS_VER4)