+extern int nfs_numasync;
+extern int nfs_ioddelwri;
+extern struct nfsstats nfsstats;
+
+#define NFSBUFHASH(np, lbn) \
+ (&nfsbufhashtbl[((long)(np) / sizeof(*(np)) + (int)(lbn)) & nfsbufhash])
+LIST_HEAD(nfsbufhashhead, nfsbuf) *nfsbufhashtbl;
+struct nfsbuffreehead nfsbuffree, nfsbuffreemeta, nfsbufdelwri;
+u_long nfsbufhash;
+int nfsbufcnt, nfsbufmin, nfsbufmax, nfsbufmetacnt, nfsbufmetamax;
+int nfsbuffreecnt, nfsbuffreemetacnt, nfsbufdelwricnt, nfsneedbuffer;
+int nfs_nbdwrite;
+time_t nfsbuffreeuptimestamp;
+
+lck_grp_t *nfs_buf_lck_grp;
+lck_grp_attr_t *nfs_buf_lck_grp_attr;
+lck_attr_t *nfs_buf_lck_attr;
+lck_mtx_t *nfs_buf_mutex;
+
+#define NFSBUFWRITE_THROTTLE 9
+#define NFSBUF_LRU_STALE 120
+#define NFSBUF_META_STALE 240
+
+/* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list */
+#define LRU_TO_FREEUP 6
+/* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list */
+#define META_TO_FREEUP 3
+/* total number of nfsbufs nfs_buf_freeup() should attempt to free */
+#define TOTAL_TO_FREEUP (LRU_TO_FREEUP+META_TO_FREEUP)
+/* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list when called from nfs_timer() */
+#define LRU_FREEUP_FRAC_ON_TIMER 8
+/* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list when called from nfs_timer() */
+#define META_FREEUP_FRAC_ON_TIMER 16
+/* fraction of total nfsbufs that nfsbuffreecnt should exceed before bothering to call nfs_buf_freeup() */
+#define LRU_FREEUP_MIN_FRAC 4
+/* fraction of total nfsbufs that nfsbuffreemetacnt should exceed before bothering to call nfs_buf_freeup() */
+#define META_FREEUP_MIN_FRAC 2
+
+#define NFS_BUF_FREEUP() \
+ do { \
+ /* only call nfs_buf_freeup() if it has work to do: */ \
+ if (((nfsbuffreecnt > nfsbufcnt/LRU_FREEUP_MIN_FRAC) || \
+ (nfsbuffreemetacnt > nfsbufcnt/META_FREEUP_MIN_FRAC)) && \
+ ((nfsbufcnt - TOTAL_TO_FREEUP) > nfsbufmin)) \
+ nfs_buf_freeup(0); \
+ } while (0)
+
+/*
+ * Initialize nfsbuf lists
+ */
+void
+nfs_nbinit(void)
+{
+ nfs_buf_lck_grp_attr = lck_grp_attr_alloc_init();
+ lck_grp_attr_setstat(nfs_buf_lck_grp_attr);
+ nfs_buf_lck_grp = lck_grp_alloc_init("nfs_buf", nfs_buf_lck_grp_attr);
+
+ nfs_buf_lck_attr = lck_attr_alloc_init();
+
+ nfs_buf_mutex = lck_mtx_alloc_init(nfs_buf_lck_grp, nfs_buf_lck_attr);
+
+ nfsbufcnt = nfsbufmetacnt =
+ nfsbuffreecnt = nfsbuffreemetacnt = nfsbufdelwricnt = 0;
+ nfsbufmin = 128;
+ nfsbufmax = (sane_size >> PAGE_SHIFT) / 4;
+ nfsbufmetamax = (sane_size >> PAGE_SHIFT) / 16;
+ nfsneedbuffer = 0;
+ nfs_nbdwrite = 0;
+ nfsbuffreeuptimestamp = 0;
+
+ nfsbufhashtbl = hashinit(nfsbufmax/4, M_TEMP, &nfsbufhash);
+ TAILQ_INIT(&nfsbuffree);
+ TAILQ_INIT(&nfsbuffreemeta);
+ TAILQ_INIT(&nfsbufdelwri);
+
+}
+
+/*
+ * try to free up some excess, unused nfsbufs
+ */
+void
+nfs_buf_freeup(int timer)
+{
+ struct nfsbuf *fbp;
+ struct timeval now;
+ int count;
+ struct nfsbuffreehead nfsbuffreeup;
+
+ TAILQ_INIT(&nfsbuffreeup);
+
+ lck_mtx_lock(nfs_buf_mutex);
+
+ microuptime(&now);
+ nfsbuffreeuptimestamp = now.tv_sec;
+
+ FSDBG(320, nfsbufcnt, nfsbuffreecnt, nfsbuffreemetacnt, 0);
+
+ count = timer ? nfsbuffreecnt/LRU_FREEUP_FRAC_ON_TIMER : LRU_TO_FREEUP;
+ while ((nfsbufcnt > nfsbufmin) && (count-- > 0)) {
+ fbp = TAILQ_FIRST(&nfsbuffree);
+ if (!fbp)
+ break;
+ if (fbp->nb_refs)
+ break;
+ if (NBUFSTAMPVALID(fbp) &&
+ (fbp->nb_timestamp + (2*NFSBUF_LRU_STALE)) > now.tv_sec)
+ break;
+ nfs_buf_remfree(fbp);
+ /* disassociate buffer from any vnode */
+ if (fbp->nb_vp) {
+ if (fbp->nb_vnbufs.le_next != NFSNOLIST) {
+ LIST_REMOVE(fbp, nb_vnbufs);
+ fbp->nb_vnbufs.le_next = NFSNOLIST;
+ }
+ fbp->nb_vp = NULL;
+ }
+ LIST_REMOVE(fbp, nb_hash);
+ TAILQ_INSERT_TAIL(&nfsbuffreeup, fbp, nb_free);
+ nfsbufcnt--;
+ }
+
+ count = timer ? nfsbuffreemetacnt/META_FREEUP_FRAC_ON_TIMER : META_TO_FREEUP;
+ while ((nfsbufcnt > nfsbufmin) && (count-- > 0)) {
+ fbp = TAILQ_FIRST(&nfsbuffreemeta);
+ if (!fbp)
+ break;
+ if (fbp->nb_refs)
+ break;
+ if (NBUFSTAMPVALID(fbp) &&
+ (fbp->nb_timestamp + (2*NFSBUF_META_STALE)) > now.tv_sec)
+ break;
+ nfs_buf_remfree(fbp);
+ /* disassociate buffer from any vnode */
+ if (fbp->nb_vp) {
+ if (fbp->nb_vnbufs.le_next != NFSNOLIST) {
+ LIST_REMOVE(fbp, nb_vnbufs);
+ fbp->nb_vnbufs.le_next = NFSNOLIST;
+ }
+ fbp->nb_vp = NULL;
+ }
+ LIST_REMOVE(fbp, nb_hash);
+ TAILQ_INSERT_TAIL(&nfsbuffreeup, fbp, nb_free);
+ nfsbufcnt--;
+ nfsbufmetacnt--;
+ }
+
+ FSDBG(320, nfsbufcnt, nfsbuffreecnt, nfsbuffreemetacnt, 0);
+ NFSBUFCNTCHK(1);
+
+ lck_mtx_unlock(nfs_buf_mutex);
+
+ while ((fbp = TAILQ_FIRST(&nfsbuffreeup))) {
+ TAILQ_REMOVE(&nfsbuffreeup, fbp, nb_free);
+ /* nuke any creds */
+ if (fbp->nb_rcred != NOCRED) {
+ kauth_cred_rele(fbp->nb_rcred);
+ fbp->nb_rcred = NOCRED;
+ }
+ if (fbp->nb_wcred != NOCRED) {
+ kauth_cred_rele(fbp->nb_wcred);
+ fbp->nb_wcred = NOCRED;
+ }
+ /* if buf was NB_META, dump buffer */
+ if (ISSET(fbp->nb_flags, NB_META) && fbp->nb_data)
+ kfree(fbp->nb_data, fbp->nb_bufsize);
+ FREE(fbp, M_TEMP);
+ }
+
+}
+
+/*
+ * remove a buffer from the freelist
+ * (must be called with nfs_buf_mutex held)
+ */
+void
+nfs_buf_remfree(struct nfsbuf *bp)
+{
+ if (bp->nb_free.tqe_next == NFSNOLIST)
+ panic("nfsbuf not on free list");
+ if (ISSET(bp->nb_flags, NB_DELWRI)) {
+ nfsbufdelwricnt--;
+ TAILQ_REMOVE(&nfsbufdelwri, bp, nb_free);
+ } else if (ISSET(bp->nb_flags, NB_META)) {
+ nfsbuffreemetacnt--;
+ TAILQ_REMOVE(&nfsbuffreemeta, bp, nb_free);
+ } else {
+ nfsbuffreecnt--;
+ TAILQ_REMOVE(&nfsbuffree, bp, nb_free);
+ }
+ bp->nb_free.tqe_next = NFSNOLIST;
+ NFSBUFCNTCHK(1);
+}
+
+/*
+ * check for existence of nfsbuf in cache
+ */
+boolean_t
+nfs_buf_is_incore(vnode_t vp, daddr64_t blkno)
+{
+ boolean_t rv;
+ lck_mtx_lock(nfs_buf_mutex);
+ if (nfs_buf_incore(vp, blkno))
+ rv = TRUE;
+ else
+ rv = FALSE;
+ lck_mtx_unlock(nfs_buf_mutex);
+ return (rv);
+}
+
+/*
+ * return incore buffer (must be called with nfs_buf_mutex held)
+ */
+struct nfsbuf *
+nfs_buf_incore(vnode_t vp, daddr64_t blkno)
+{
+ /* Search hash chain */
+ struct nfsbuf * bp = NFSBUFHASH(VTONFS(vp), blkno)->lh_first;
+ for (; bp != NULL; bp = bp->nb_hash.le_next)
+ if (bp->nb_lblkno == blkno && bp->nb_vp == vp) {
+ if (!ISSET(bp->nb_flags, NB_INVAL)) {
+ FSDBG(547, bp, blkno, bp->nb_flags, bp->nb_vp);
+ return (bp);
+ }
+ }
+ return (NULL);
+}
+
+/*
+ * Check if it's OK to drop a page.
+ *
+ * Called by vnode_pager() on pageout request of non-dirty page.
+ * We need to make sure that it's not part of a delayed write.
+ * If it is, we can't let the VM drop it because we may need it
+ * later when/if we need to write the data (again).
+ */
+int
+nfs_buf_page_inval(vnode_t vp, off_t offset)
+{
+ struct nfsbuf *bp;
+ int error = 0;
+
+ lck_mtx_lock(nfs_buf_mutex);
+ bp = nfs_buf_incore(vp, ubc_offtoblk(vp, offset));
+ if (!bp)
+ goto out;
+ FSDBG(325, bp, bp->nb_flags, bp->nb_dirtyoff, bp->nb_dirtyend);
+ if (ISSET(bp->nb_lflags, NBL_BUSY)) {
+ error = EBUSY;
+ goto out;
+ }
+ /*
+ * If there's a dirty range in the buffer, check to
+ * see if this page intersects with the dirty range.
+ * If it does, we can't let the pager drop the page.
+ */
+ if (bp->nb_dirtyend > 0) {
+ int start = offset - NBOFF(bp);
+ if (bp->nb_dirtyend <= start ||
+ bp->nb_dirtyoff >= (start + PAGE_SIZE))
+ error = 0;
+ else
+ error = EBUSY;
+ }
+out:
+ lck_mtx_unlock(nfs_buf_mutex);
+ return (error);
+}
+
+/*
+ * set up the UPL for a buffer
+ * (must NOT be called with nfs_buf_mutex held)
+ */
+int
+nfs_buf_upl_setup(struct nfsbuf *bp)
+{
+ kern_return_t kret;
+ upl_t upl;
+ int upl_flags;
+
+ if (ISSET(bp->nb_flags, NB_PAGELIST))
+ return (0);
+
+ upl_flags = UPL_PRECIOUS;
+ if (! ISSET(bp->nb_flags, NB_READ)) {
+ /*
+ * We're doing a "write", so we intend to modify
+ * the pages we're gathering.
+ */
+ upl_flags |= UPL_WILL_MODIFY;
+ }
+ kret = ubc_create_upl(bp->nb_vp, NBOFF(bp), bp->nb_bufsize,
+ &upl, NULL, upl_flags);
+ if (kret == KERN_INVALID_ARGUMENT) {
+ /* vm object probably doesn't exist any more */
+ bp->nb_pagelist = NULL;
+ return (EINVAL);
+ }
+ if (kret != KERN_SUCCESS) {
+ printf("nfs_buf_upl_setup(): failed to get pagelist %d\n", kret);
+ bp->nb_pagelist = NULL;
+ return (EIO);
+ }
+
+ FSDBG(538, bp, NBOFF(bp), bp->nb_bufsize, bp->nb_vp);
+
+ bp->nb_pagelist = upl;
+ SET(bp->nb_flags, NB_PAGELIST);
+ return (0);
+}
+
+/*
+ * update buffer's valid/dirty info from UBC
+ * (must NOT be called with nfs_buf_mutex held)
+ */
+void
+nfs_buf_upl_check(struct nfsbuf *bp)
+{
+ upl_page_info_t *pl;
+ off_t filesize, fileoffset;
+ int i, npages;
+
+ if (!ISSET(bp->nb_flags, NB_PAGELIST))
+ return;
+
+ npages = round_page_32(bp->nb_bufsize) / PAGE_SIZE;
+ filesize = ubc_getsize(bp->nb_vp);
+ fileoffset = NBOFF(bp);
+ if (fileoffset < filesize)
+ SET(bp->nb_flags, NB_CACHE);
+ else
+ CLR(bp->nb_flags, NB_CACHE);
+
+ pl = ubc_upl_pageinfo(bp->nb_pagelist);
+ bp->nb_valid = bp->nb_dirty = 0;
+
+ for (i=0; i < npages; i++, fileoffset += PAGE_SIZE_64) {
+ /* anything beyond the end of the file is not valid or dirty */
+ if (fileoffset >= filesize)
+ break;
+ if (!upl_valid_page(pl, i)) {
+ CLR(bp->nb_flags, NB_CACHE);
+ continue;
+ }
+ NBPGVALID_SET(bp,i);
+ if (upl_dirty_page(pl, i)) {
+ NBPGDIRTY_SET(bp, i);
+ if (!ISSET(bp->nb_flags, NB_WASDIRTY))
+ SET(bp->nb_flags, NB_WASDIRTY);
+ }
+ }
+ fileoffset = NBOFF(bp);
+ if (ISSET(bp->nb_flags, NB_CACHE)) {
+ bp->nb_validoff = 0;
+ bp->nb_validend = bp->nb_bufsize;
+ if (fileoffset + bp->nb_validend > filesize)
+ bp->nb_validend = filesize - fileoffset;
+ } else {
+ bp->nb_validoff = bp->nb_validend = -1;
+ }
+ FSDBG(539, bp, fileoffset, bp->nb_valid, bp->nb_dirty);
+ FSDBG(539, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, bp->nb_dirtyend);
+}
+
+/*
+ * make sure that a buffer is mapped
+ * (must NOT be called with nfs_buf_mutex held)
+ */
+static int
+nfs_buf_map(struct nfsbuf *bp)
+{
+ kern_return_t kret;
+
+ if (bp->nb_data)
+ return (0);
+ if (!ISSET(bp->nb_flags, NB_PAGELIST))
+ return (EINVAL);
+
+ kret = ubc_upl_map(bp->nb_pagelist, (vm_address_t *)&(bp->nb_data));
+ if (kret != KERN_SUCCESS)
+ panic("nfs_buf_map: ubc_upl_map() failed with (%d)", kret);
+ if (bp->nb_data == 0)
+ panic("ubc_upl_map mapped 0");
+ FSDBG(540, bp, bp->nb_flags, NBOFF(bp), bp->nb_data);
+ return (0);
+}
+
+/*
+ * check range of pages in nfsbuf's UPL for validity
+ */
+static int
+nfs_buf_upl_valid_range(struct nfsbuf *bp, int off, int size)
+{
+ off_t fileoffset, filesize;
+ int pg, lastpg;
+ upl_page_info_t *pl;
+
+ if (!ISSET(bp->nb_flags, NB_PAGELIST))
+ return (0);
+ pl = ubc_upl_pageinfo(bp->nb_pagelist);
+
+ size += off & PAGE_MASK;
+ off &= ~PAGE_MASK;
+ fileoffset = NBOFF(bp);
+ filesize = VTONFS(bp->nb_vp)->n_size;
+ if ((fileoffset + off + size) > filesize)
+ size = filesize - (fileoffset + off);
+
+ pg = off/PAGE_SIZE;
+ lastpg = (off + size - 1)/PAGE_SIZE;
+ while (pg <= lastpg) {
+ if (!upl_valid_page(pl, pg))
+ return (0);
+ pg++;
+ }
+ return (1);
+}
+
+/*
+ * normalize an nfsbuf's valid range
+ *
+ * the read/write code guarantees that we'll always have a valid
+ * region that is an integral number of pages. If either end
+ * of the valid range isn't page-aligned, it gets corrected
+ * here as we extend the valid range through all of the
+ * contiguous valid pages.
+ */
+static void
+nfs_buf_normalize_valid_range(struct nfsnode *np, struct nfsbuf *bp)
+{
+ int pg, npg;
+ /* pull validoff back to start of contiguous valid page range */
+ pg = bp->nb_validoff/PAGE_SIZE;
+ while (pg >= 0 && NBPGVALID(bp,pg))
+ pg--;
+ bp->nb_validoff = (pg+1) * PAGE_SIZE;
+ /* push validend forward to end of contiguous valid page range */
+ npg = bp->nb_bufsize/PAGE_SIZE;
+ pg = bp->nb_validend/PAGE_SIZE;
+ while (pg < npg && NBPGVALID(bp,pg))
+ pg++;
+ bp->nb_validend = pg * PAGE_SIZE;
+ /* clip to EOF */
+ if (NBOFF(bp) + bp->nb_validend > (off_t)np->n_size)
+ bp->nb_validend = np->n_size % bp->nb_bufsize;
+}
+
+/*
+ * try to push out some delayed/uncommitted writes
+ * ("locked" indicates whether nfs_buf_mutex is already held)
+ */
+static void
+nfs_buf_delwri_push(int locked)
+{
+ struct nfsbuf *bp;
+ int i, error;
+
+ if (TAILQ_EMPTY(&nfsbufdelwri))
+ return;
+
+ /* first try to tell the nfsiods to do it */
+ if (nfs_asyncio(NULL, NULL) == 0)
+ return;
+
+ /* otherwise, try to do some of the work ourselves */
+ i = 0;
+ if (!locked)
+ lck_mtx_lock(nfs_buf_mutex);
+ while (i < 8 && (bp = TAILQ_FIRST(&nfsbufdelwri)) != NULL) {
+ struct nfsnode *np = VTONFS(bp->nb_vp);
+ nfs_buf_remfree(bp);
+ nfs_buf_refget(bp);
+ while ((error = nfs_buf_acquire(bp, 0, 0, 0)) == EAGAIN);
+ nfs_buf_refrele(bp);
+ if (error)
+ break;
+ if (!bp->nb_vp) {
+ /* buffer is no longer valid */
+ nfs_buf_drop(bp);
+ continue;
+ }
+ if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
+ /* put buffer at end of delwri list */
+ TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free);
+ nfsbufdelwricnt++;
+ nfs_buf_drop(bp);
+ lck_mtx_unlock(nfs_buf_mutex);
+ nfs_flushcommits(np->n_vnode, NULL, 1);
+ } else {
+ SET(bp->nb_flags, NB_ASYNC);
+ lck_mtx_unlock(nfs_buf_mutex);
+ nfs_buf_write(bp);
+ }
+ i++;
+ lck_mtx_lock(nfs_buf_mutex);
+ }
+ if (!locked)
+ lck_mtx_unlock(nfs_buf_mutex);
+}
+
+/*
+ * Get an nfs buffer.
+ *
+ * Returns errno on error, 0 otherwise.
+ * Any buffer is returned in *bpp.
+ *
+ * If NBLK_ONLYVALID is set, only return buffer if found in cache.
+ * If NBLK_NOWAIT is set, don't wait for the buffer if it's marked BUSY.
+ *
+ * Check for existence of buffer in cache.
+ * Or attempt to reuse a buffer from one of the free lists.
+ * Or allocate a new buffer if we haven't already hit max allocation.
+ * Or wait for a free buffer.
+ *
+ * If available buffer found, prepare it, and return it.
+ *
+ * If the calling process is interrupted by a signal for
+ * an interruptible mount point, return EINTR.
+ */
+int
+nfs_buf_get(
+ vnode_t vp,
+ daddr64_t blkno,
+ int size,
+ proc_t p,
+ int flags,
+ struct nfsbuf **bpp)
+{
+ struct nfsnode *np = VTONFS(vp);
+ struct nfsbuf *bp;
+ int biosize, bufsize;
+ kauth_cred_t cred;
+ int slpflag = PCATCH;
+ int operation = (flags & NBLK_OPMASK);
+ int error = 0;
+ struct timespec ts;
+
+ FSDBG_TOP(541, vp, blkno, size, flags);
+ *bpp = NULL;
+
+ bufsize = size;
+ if (bufsize > MAXBSIZE)
+ panic("nfs_buf_get: buffer larger than MAXBSIZE requested");
+
+ biosize = vfs_statfs(vnode_mount(vp))->f_iosize;
+
+ if (UBCINVALID(vp) || !UBCINFOEXISTS(vp)) {
+ operation = NBLK_META;
+ } else if (bufsize < biosize) {
+ /* reg files should always have biosize blocks */
+ bufsize = biosize;
+ }
+
+ /* if NBLK_WRITE, check for too many delayed/uncommitted writes */
+ if ((operation == NBLK_WRITE) && (nfs_nbdwrite > ((nfsbufcnt*3)/4))) {
+ FSDBG_TOP(542, vp, blkno, nfs_nbdwrite, ((nfsbufcnt*3)/4));
+
+ /* poke the delwri list */
+ nfs_buf_delwri_push(0);
+
+ /* sleep to let other threads run... */
+ tsleep(&nfs_nbdwrite, PCATCH, "nfs_nbdwrite", 1);
+ FSDBG_BOT(542, vp, blkno, nfs_nbdwrite, ((nfsbufcnt*3)/4));
+ }
+
+loop:
+ lck_mtx_lock(nfs_buf_mutex);
+
+ /* check for existence of nfsbuf in cache */
+ if ((bp = nfs_buf_incore(vp, blkno))) {
+ /* if busy, set wanted and wait */
+ if (ISSET(bp->nb_lflags, NBL_BUSY)) {
+ if (flags & NBLK_NOWAIT) {
+ lck_mtx_unlock(nfs_buf_mutex);
+ FSDBG_BOT(541, vp, blkno, bp, 0xbcbcbcbc);
+ return (0);
+ }
+ FSDBG_TOP(543, vp, blkno, bp, bp->nb_flags);
+ SET(bp->nb_lflags, NBL_WANTED);
+
+ ts.tv_sec = 2;
+ ts.tv_nsec = 0;
+ msleep(bp, nfs_buf_mutex, slpflag|(PRIBIO+1)|PDROP,
+ "nfsbufget", (slpflag == PCATCH) ? 0 : &ts);
+ slpflag = 0;
+ FSDBG_BOT(543, vp, blkno, bp, bp->nb_flags);
+ if ((error = nfs_sigintr(VFSTONFS(vnode_mount(vp)), NULL, p))) {
+ FSDBG_BOT(541, vp, blkno, 0, error);
+ return (error);
+ }
+ goto loop;
+ }
+ if (bp->nb_bufsize != bufsize)
+ panic("nfsbuf size mismatch");
+ SET(bp->nb_lflags, NBL_BUSY);
+ SET(bp->nb_flags, NB_CACHE);
+ nfs_buf_remfree(bp);
+ /* additional paranoia: */
+ if (ISSET(bp->nb_flags, NB_PAGELIST))
+ panic("pagelist buffer was not busy");
+ goto buffer_setup;
+ }
+
+ if (flags & NBLK_ONLYVALID) {
+ lck_mtx_unlock(nfs_buf_mutex);
+ FSDBG_BOT(541, vp, blkno, 0, 0x0000cace);
+ return (0);
+ }
+
+ /*
+ * where to get a free buffer:
+ * - if meta and maxmeta reached, must reuse meta
+ * - alloc new if we haven't reached min bufs
+ * - if free lists are NOT empty
+ * - if free list is stale, use it
+ * - else if freemeta list is stale, use it
+ * - else if max bufs allocated, use least-time-to-stale
+ * - alloc new if we haven't reached max allowed
+ * - start clearing out delwri list and try again
+ */
+
+ if ((operation == NBLK_META) && (nfsbufmetacnt >= nfsbufmetamax)) {
+ /* if we've hit max meta buffers, must reuse a meta buffer */
+ bp = TAILQ_FIRST(&nfsbuffreemeta);
+ } else if ((nfsbufcnt > nfsbufmin) &&
+ (!TAILQ_EMPTY(&nfsbuffree) || !TAILQ_EMPTY(&nfsbuffreemeta))) {
+ /* try to pull an nfsbuf off a free list */
+ struct nfsbuf *lrubp, *metabp;
+ struct timeval now;
+ microuptime(&now);
+
+ /* if the next LRU or META buffer is invalid or stale, use it */
+ lrubp = TAILQ_FIRST(&nfsbuffree);
+ if (lrubp && (!NBUFSTAMPVALID(lrubp) ||
+ ((lrubp->nb_timestamp + NFSBUF_LRU_STALE) < now.tv_sec)))
+ bp = lrubp;
+ metabp = TAILQ_FIRST(&nfsbuffreemeta);
+ if (!bp && metabp && (!NBUFSTAMPVALID(metabp) ||
+ ((metabp->nb_timestamp + NFSBUF_META_STALE) < now.tv_sec)))
+ bp = metabp;
+
+ if (!bp && (nfsbufcnt >= nfsbufmax)) {
+ /* we've already allocated all bufs, so */
+ /* choose the buffer that'll go stale first */
+ if (!metabp)
+ bp = lrubp;
+ else if (!lrubp)
+ bp = metabp;
+ else {
+ int32_t lru_stale_time, meta_stale_time;
+ lru_stale_time = lrubp->nb_timestamp + NFSBUF_LRU_STALE;
+ meta_stale_time = metabp->nb_timestamp + NFSBUF_META_STALE;
+ if (lru_stale_time <= meta_stale_time)
+ bp = lrubp;
+ else
+ bp = metabp;
+ }
+ }
+ }
+
+ if (bp) {
+ /* we have a buffer to reuse */
+ FSDBG(544, vp, blkno, bp, bp->nb_flags);
+ nfs_buf_remfree(bp);
+ if (ISSET(bp->nb_flags, NB_DELWRI))
+ panic("nfs_buf_get: delwri");
+ SET(bp->nb_lflags, NBL_BUSY);
+ /* disassociate buffer from previous vnode */
+ if (bp->nb_vp) {
+ if (bp->nb_vnbufs.le_next != NFSNOLIST) {
+ LIST_REMOVE(bp, nb_vnbufs);
+ bp->nb_vnbufs.le_next = NFSNOLIST;
+ }
+ bp->nb_vp = NULL;
+ }
+ LIST_REMOVE(bp, nb_hash);
+ /* nuke any creds we're holding */
+ cred = bp->nb_rcred;
+ if (cred != NOCRED) {
+ bp->nb_rcred = NOCRED;
+ kauth_cred_rele(cred);
+ }
+ cred = bp->nb_wcred;
+ if (cred != NOCRED) {
+ bp->nb_wcred = NOCRED;
+ kauth_cred_rele(cred);
+ }
+ /* if buf will no longer be NB_META, dump old buffer */
+ if (operation == NBLK_META) {
+ if (!ISSET(bp->nb_flags, NB_META))
+ nfsbufmetacnt++;
+ } else if (ISSET(bp->nb_flags, NB_META)) {
+ if (bp->nb_data) {
+ kfree(bp->nb_data, bp->nb_bufsize);
+ bp->nb_data = NULL;
+ }
+ nfsbufmetacnt--;
+ }
+ /* re-init buf fields */
+ bp->nb_error = 0;
+ bp->nb_validoff = bp->nb_validend = -1;
+ bp->nb_dirtyoff = bp->nb_dirtyend = 0;
+ bp->nb_valid = 0;
+ bp->nb_dirty = 0;
+ } else {
+ /* no buffer to reuse */
+ if ((nfsbufcnt < nfsbufmax) &&
+ ((operation != NBLK_META) || (nfsbufmetacnt < nfsbufmetamax))) {
+ /* just alloc a new one */
+ MALLOC(bp, struct nfsbuf *, sizeof(struct nfsbuf), M_TEMP, M_WAITOK);
+ if (!bp) {
+ lck_mtx_unlock(nfs_buf_mutex);
+ FSDBG_BOT(541, vp, blkno, 0, error);
+ return (ENOMEM);
+ }
+ nfsbufcnt++;
+ if (operation == NBLK_META)
+ nfsbufmetacnt++;
+ NFSBUFCNTCHK(1);
+ /* init nfsbuf */
+ bzero(bp, sizeof(*bp));
+ bp->nb_free.tqe_next = NFSNOLIST;
+ bp->nb_validoff = bp->nb_validend = -1;
+ FSDBG(545, vp, blkno, bp, 0);
+ } else {
+ /* too many bufs... wait for buffers to free up */
+ FSDBG_TOP(546, vp, blkno, nfsbufcnt, nfsbufmax);
+
+ /* poke the delwri list */
+ nfs_buf_delwri_push(1);
+
+ nfsneedbuffer = 1;
+ msleep(&nfsneedbuffer, nfs_buf_mutex, PCATCH|PDROP,
+ "nfsbufget", 0);
+ FSDBG_BOT(546, vp, blkno, nfsbufcnt, nfsbufmax);
+ if ((error = nfs_sigintr(VFSTONFS(vnode_mount(vp)), NULL, p))) {
+ FSDBG_BOT(541, vp, blkno, 0, error);
+ return (error);
+ }
+ goto loop;
+ }
+ }
+
+ /* setup nfsbuf */
+ bp->nb_lflags = NBL_BUSY;
+ bp->nb_flags = 0;
+ bp->nb_lblkno = blkno;
+ /* insert buf in hash */
+ LIST_INSERT_HEAD(NFSBUFHASH(np, blkno), bp, nb_hash);
+ /* associate buffer with new vnode */
+ bp->nb_vp = vp;
+ LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs);
+
+buffer_setup:
+
+ /* unlock hash */
+ lck_mtx_unlock(nfs_buf_mutex);
+
+ switch (operation) {
+ case NBLK_META:
+ SET(bp->nb_flags, NB_META);
+ if ((bp->nb_bufsize != bufsize) && bp->nb_data) {
+ kfree(bp->nb_data, bp->nb_bufsize);
+ bp->nb_data = NULL;
+ bp->nb_validoff = bp->nb_validend = -1;
+ bp->nb_dirtyoff = bp->nb_dirtyend = 0;
+ bp->nb_valid = 0;
+ bp->nb_dirty = 0;
+ CLR(bp->nb_flags, NB_CACHE);
+ }
+ if (!bp->nb_data)
+ bp->nb_data = kalloc(bufsize);
+ if (!bp->nb_data) {
+ /* Ack! couldn't allocate the data buffer! */
+ /* cleanup buffer and return error */
+ lck_mtx_lock(nfs_buf_mutex);
+ LIST_REMOVE(bp, nb_vnbufs);
+ bp->nb_vnbufs.le_next = NFSNOLIST;
+ bp->nb_vp = NULL;
+ /* invalidate usage timestamp to allow immediate freeing */
+ NBUFSTAMPINVALIDATE(bp);
+ if (bp->nb_free.tqe_next != NFSNOLIST)
+ panic("nfsbuf on freelist");
+ TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free);
+ nfsbuffreecnt++;
+ lck_mtx_unlock(nfs_buf_mutex);
+ FSDBG_BOT(541, vp, blkno, 0xb00, ENOMEM);
+ return (ENOMEM);
+ }
+ bp->nb_bufsize = bufsize;
+ break;
+
+ case NBLK_READ:
+ case NBLK_WRITE:
+ /*
+ * Set or clear NB_READ now to let the UPL subsystem know
+ * if we intend to modify the pages or not.
+ */
+ if (operation == NBLK_READ) {
+ SET(bp->nb_flags, NB_READ);
+ } else {
+ CLR(bp->nb_flags, NB_READ);
+ }
+ if (bufsize < PAGE_SIZE)
+ bufsize = PAGE_SIZE;
+ bp->nb_bufsize = bufsize;
+ bp->nb_validoff = bp->nb_validend = -1;
+
+ if (UBCINFOEXISTS(vp)) {
+ /* setup upl */
+ if (nfs_buf_upl_setup(bp)) {
+ /* unable to create upl */
+ /* vm object must no longer exist */
+ /* cleanup buffer and return error */
+ lck_mtx_lock(nfs_buf_mutex);
+ LIST_REMOVE(bp, nb_vnbufs);
+ bp->nb_vnbufs.le_next = NFSNOLIST;
+ bp->nb_vp = NULL;
+ /* invalidate usage timestamp to allow immediate freeing */
+ NBUFSTAMPINVALIDATE(bp);
+ if (bp->nb_free.tqe_next != NFSNOLIST)
+ panic("nfsbuf on freelist");
+ TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free);
+ nfsbuffreecnt++;
+ lck_mtx_unlock(nfs_buf_mutex);
+ FSDBG_BOT(541, vp, blkno, 0x2bc, EIO);
+ return (EIO);
+ }
+ nfs_buf_upl_check(bp);
+ }
+ break;
+
+ default:
+ panic("nfs_buf_get: %d unknown operation", operation);
+ }
+
+ *bpp = bp;
+
+ FSDBG_BOT(541, vp, blkno, bp, bp->nb_flags);
+
+ return (0);
+}
+
+void
+nfs_buf_release(struct nfsbuf *bp, int freeup)
+{
+ vnode_t vp = bp->nb_vp;
+ struct timeval now;
+ int wakeup_needbuffer, wakeup_buffer, wakeup_nbdwrite;
+
+ FSDBG_TOP(548, bp, NBOFF(bp), bp->nb_flags, bp->nb_data);
+ FSDBG(548, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, bp->nb_dirtyend);
+ FSDBG(548, bp->nb_valid, 0, bp->nb_dirty, 0);
+
+ if (UBCINFOEXISTS(vp) && bp->nb_bufsize) {
+ int upl_flags;
+ upl_t upl;
+ int i, rv;
+
+ if (!ISSET(bp->nb_flags, NB_PAGELIST) && !ISSET(bp->nb_flags, NB_INVAL)) {
+ rv = nfs_buf_upl_setup(bp);
+ if (rv)
+ printf("nfs_buf_release: upl create failed %d\n", rv);
+ else
+ nfs_buf_upl_check(bp);
+ }
+ upl = bp->nb_pagelist;
+ if (!upl)
+ goto pagelist_cleanup_done;
+ if (bp->nb_data) {
+ if (ubc_upl_unmap(upl) != KERN_SUCCESS)
+ panic("ubc_upl_unmap failed");
+ bp->nb_data = NULL;
+ }
+ if (bp->nb_flags & (NB_ERROR | NB_INVAL | NB_NOCACHE)) {
+ if (bp->nb_flags & (NB_READ | NB_INVAL))
+ upl_flags = UPL_ABORT_DUMP_PAGES;
+ else
+ upl_flags = 0;
+ ubc_upl_abort(upl, upl_flags);
+ goto pagelist_cleanup_done;
+ }
+ for (i=0; i <= (bp->nb_bufsize - 1)/PAGE_SIZE; i++) {
+ if (!NBPGVALID(bp,i))
+ ubc_upl_abort_range(upl,
+ i*PAGE_SIZE, PAGE_SIZE,
+ UPL_ABORT_DUMP_PAGES |
+ UPL_ABORT_FREE_ON_EMPTY);
+ else {
+ if (NBPGDIRTY(bp,i))
+ upl_flags = UPL_COMMIT_SET_DIRTY;
+ else
+ upl_flags = UPL_COMMIT_CLEAR_DIRTY;
+ ubc_upl_commit_range(upl,
+ i*PAGE_SIZE, PAGE_SIZE,
+ upl_flags |
+ UPL_COMMIT_INACTIVATE |
+ UPL_COMMIT_FREE_ON_EMPTY);
+ }
+ }
+pagelist_cleanup_done:
+ /* was this the last buffer in the file? */
+ if (NBOFF(bp) + bp->nb_bufsize > (off_t)(VTONFS(vp)->n_size)) {
+ /* if so, invalidate all pages of last buffer past EOF */
+ int biosize = vfs_statfs(vnode_mount(vp))->f_iosize;
+ off_t start, end;
+ start = trunc_page_64(VTONFS(vp)->n_size) + PAGE_SIZE_64;
+ end = trunc_page_64(NBOFF(bp) + biosize);
+ if (end > start) {
+ if (!(rv = ubc_sync_range(vp, start, end, UBC_INVALIDATE)))
+ printf("nfs_buf_release(): ubc_sync_range failed!\n");
+ }
+ }
+ CLR(bp->nb_flags, NB_PAGELIST);
+ bp->nb_pagelist = NULL;
+ }
+
+ lck_mtx_lock(nfs_buf_mutex);
+
+ wakeup_needbuffer = wakeup_buffer = wakeup_nbdwrite = 0;
+
+ /* Wake up any processes waiting for any buffer to become free. */
+ if (nfsneedbuffer) {
+ nfsneedbuffer = 0;
+ wakeup_needbuffer = 1;
+ }
+ /* Wake up any processes waiting for _this_ buffer to become free. */
+ if (ISSET(bp->nb_lflags, NBL_WANTED)) {
+ CLR(bp->nb_lflags, NBL_WANTED);
+ wakeup_buffer = 1;
+ }
+
+ /* If it's not cacheable, or an error, mark it invalid. */
+ if (ISSET(bp->nb_flags, (NB_NOCACHE|NB_ERROR)))
+ SET(bp->nb_flags, NB_INVAL);
+
+ if ((bp->nb_bufsize <= 0) || ISSET(bp->nb_flags, NB_INVAL)) {
+ /* If it's invalid or empty, dissociate it from its vnode */
+ if (bp->nb_vnbufs.le_next != NFSNOLIST) {
+ LIST_REMOVE(bp, nb_vnbufs);
+ bp->nb_vnbufs.le_next = NFSNOLIST;
+ }
+ bp->nb_vp = NULL;
+ /* if this was a delayed write, wakeup anyone */
+ /* waiting for delayed writes to complete */
+ if (ISSET(bp->nb_flags, NB_DELWRI)) {
+ CLR(bp->nb_flags, NB_DELWRI);
+ OSAddAtomic(-1, (SInt32*)&nfs_nbdwrite);
+ NFSBUFCNTCHK(1);
+ wakeup_nbdwrite = 1;
+ }
+ /* invalidate usage timestamp to allow immediate freeing */
+ NBUFSTAMPINVALIDATE(bp);
+ /* put buffer at head of free list */
+ if (bp->nb_free.tqe_next != NFSNOLIST)
+ panic("nfsbuf on freelist");
+ SET(bp->nb_flags, NB_INVAL);
+ if (ISSET(bp->nb_flags, NB_META)) {
+ TAILQ_INSERT_HEAD(&nfsbuffreemeta, bp, nb_free);
+ nfsbuffreemetacnt++;
+ } else {
+ TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free);
+ nfsbuffreecnt++;
+ }
+ } else if (ISSET(bp->nb_flags, NB_DELWRI)) {
+ /* put buffer at end of delwri list */
+ if (bp->nb_free.tqe_next != NFSNOLIST)
+ panic("nfsbuf on freelist");
+ TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free);
+ nfsbufdelwricnt++;
+ freeup = 0;
+ } else {
+ /* update usage timestamp */
+ microuptime(&now);
+ bp->nb_timestamp = now.tv_sec;
+ /* put buffer at end of free list */
+ if (bp->nb_free.tqe_next != NFSNOLIST)
+ panic("nfsbuf on freelist");
+ if (ISSET(bp->nb_flags, NB_META)) {
+ TAILQ_INSERT_TAIL(&nfsbuffreemeta, bp, nb_free);
+ nfsbuffreemetacnt++;
+ } else {
+ TAILQ_INSERT_TAIL(&nfsbuffree, bp, nb_free);
+ nfsbuffreecnt++;
+ }
+ }
+
+ NFSBUFCNTCHK(1);
+
+ /* Unlock the buffer. */
+ CLR(bp->nb_flags, (NB_ASYNC | NB_NOCACHE | NB_STABLE | NB_IOD));
+ CLR(bp->nb_lflags, NBL_BUSY);
+
+ FSDBG_BOT(548, bp, NBOFF(bp), bp->nb_flags, bp->nb_data);
+
+ lck_mtx_unlock(nfs_buf_mutex);
+
+ if (wakeup_needbuffer)
+ wakeup(&nfsneedbuffer);
+ if (wakeup_buffer)
+ wakeup(bp);
+ if (wakeup_nbdwrite)
+ wakeup(&nfs_nbdwrite);
+ if (freeup)
+ NFS_BUF_FREEUP();
+}
+
+/*
+ * Wait for operations on the buffer to complete.
+ * When they do, extract and return the I/O's error value.
+ */
+int
+nfs_buf_iowait(struct nfsbuf *bp)
+{
+ FSDBG_TOP(549, bp, NBOFF(bp), bp->nb_flags, bp->nb_error);
+
+ lck_mtx_lock(nfs_buf_mutex);
+
+ while (!ISSET(bp->nb_flags, NB_DONE))
+ msleep(bp, nfs_buf_mutex, PRIBIO + 1, "nfs_buf_iowait", 0);
+
+ lck_mtx_unlock(nfs_buf_mutex);
+
+ FSDBG_BOT(549, bp, NBOFF(bp), bp->nb_flags, bp->nb_error);
+
+ /* check for interruption of I/O, then errors. */
+ if (ISSET(bp->nb_flags, NB_EINTR)) {
+ CLR(bp->nb_flags, NB_EINTR);
+ return (EINTR);
+ } else if (ISSET(bp->nb_flags, NB_ERROR))
+ return (bp->nb_error ? bp->nb_error : EIO);
+ return (0);
+}
+
+/*
+ * Mark I/O complete on a buffer.
+ */
+void
+nfs_buf_iodone(struct nfsbuf *bp)
+{
+
+ FSDBG_TOP(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error);
+
+ if (ISSET(bp->nb_flags, NB_DONE))
+ panic("nfs_buf_iodone already");
+ /*
+ * I/O was done, so don't believe
+ * the DIRTY state from VM anymore
+ */
+ CLR(bp->nb_flags, NB_WASDIRTY);
+
+ if (!ISSET(bp->nb_flags, NB_READ)) {
+ CLR(bp->nb_flags, NB_WRITEINPROG);
+ /*
+ * vnode_writedone() takes care of waking up
+ * any throttled write operations
+ */
+ vnode_writedone(bp->nb_vp);
+ }
+ if (ISSET(bp->nb_flags, NB_ASYNC)) { /* if async, release it */
+ SET(bp->nb_flags, NB_DONE); /* note that it's done */
+ nfs_buf_release(bp, 1);
+ } else { /* or just wakeup the buffer */
+ lck_mtx_lock(nfs_buf_mutex);
+ SET(bp->nb_flags, NB_DONE); /* note that it's done */
+ CLR(bp->nb_lflags, NBL_WANTED);
+ lck_mtx_unlock(nfs_buf_mutex);
+ wakeup(bp);
+ }
+
+ FSDBG_BOT(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error);
+}
+
+void
+nfs_buf_write_delayed(struct nfsbuf *bp, proc_t p)
+{
+ vnode_t vp = bp->nb_vp;
+
+ FSDBG_TOP(551, bp, NBOFF(bp), bp->nb_flags, 0);
+ FSDBG(551, bp, bp->nb_dirtyoff, bp->nb_dirtyend, bp->nb_dirty);
+
+ /*
+ * If the block hasn't been seen before:
+ * (1) Mark it as having been seen,
+ * (2) Charge for the write.
+ * (3) Make sure it's on its vnode's correct block list,
+ */
+ if (!ISSET(bp->nb_flags, NB_DELWRI)) {
+ SET(bp->nb_flags, NB_DELWRI);
+ if (p && p->p_stats)
+ p->p_stats->p_ru.ru_oublock++; /* XXX */
+ OSAddAtomic(1, (SInt32*)&nfs_nbdwrite);
+ NFSBUFCNTCHK(0);
+ /* move to dirty list */
+ lck_mtx_lock(nfs_buf_mutex);
+ if (bp->nb_vnbufs.le_next != NFSNOLIST)
+ LIST_REMOVE(bp, nb_vnbufs);
+ LIST_INSERT_HEAD(&VTONFS(vp)->n_dirtyblkhd, bp, nb_vnbufs);
+ lck_mtx_unlock(nfs_buf_mutex);
+ }
+
+ /*
+ * If the vnode has "too many" write operations in progress
+ * wait for them to finish the IO
+ */
+ (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "nfs_buf_write_delayed");
+
+ /*
+ * If we have too many delayed write buffers,
+ * more than we can "safely" handle, just fall back to
+ * doing the async write
+ */
+ if (nfs_nbdwrite < 0)
+ panic("nfs_buf_write_delayed: Negative nfs_nbdwrite");
+
+ if (nfs_nbdwrite > ((nfsbufcnt/4)*3)) {
+ /* issue async write */
+ SET(bp->nb_flags, NB_ASYNC);
+ nfs_buf_write(bp);
+ FSDBG_BOT(551, bp, NBOFF(bp), bp->nb_flags, bp->nb_error);
+ return;
+ }
+
+ /* Otherwise, the "write" is done, so mark and release the buffer. */
+ SET(bp->nb_flags, NB_DONE);
+ nfs_buf_release(bp, 1);
+ FSDBG_BOT(551, bp, NBOFF(bp), bp->nb_flags, 0);
+ return;
+}
+
+/*
+ * add a reference to a buffer so it doesn't disappear while being used
+ * (must be called with nfs_buf_mutex held)
+ */
+void
+nfs_buf_refget(struct nfsbuf *bp)
+{
+ bp->nb_refs++;
+}
+/*
+ * release a reference on a buffer
+ * (must be called with nfs_buf_mutex held)
+ */
+void
+nfs_buf_refrele(struct nfsbuf *bp)
+{
+ bp->nb_refs--;
+}
+
+/*
+ * mark a particular buffer as BUSY
+ * (must be called with nfs_buf_mutex held)
+ */
+errno_t
+nfs_buf_acquire(struct nfsbuf *bp, int flags, int slpflag, int slptimeo)
+{
+ errno_t error;
+ struct timespec ts;
+
+ if (ISSET(bp->nb_lflags, NBL_BUSY)) {
+ /*
+ * since the mutex_lock may block, the buffer
+ * may become BUSY, so we need to recheck for
+ * a NOWAIT request
+ */
+ if (flags & NBAC_NOWAIT)
+ return (EBUSY);
+ SET(bp->nb_lflags, NBL_WANTED);
+
+ ts.tv_sec = (slptimeo/100);
+ /* the hz value is 100; which leads to 10ms */
+ ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000;
+
+ error = msleep(bp, nfs_buf_mutex, slpflag | (PRIBIO + 1),
+ "nfs_buf_acquire", &ts);
+ if (error)
+ return (error);
+ return (EAGAIN);
+ }
+ if (flags & NBAC_REMOVE)
+ nfs_buf_remfree(bp);
+ SET(bp->nb_lflags, NBL_BUSY);
+
+ return (0);
+}
+
+/*
+ * simply drop the BUSY status of a buffer
+ * (must be called with nfs_buf_mutex held)
+ */
+void
+nfs_buf_drop(struct nfsbuf *bp)
+{
+ int need_wakeup = 0;
+
+ if (!ISSET(bp->nb_lflags, NBL_BUSY))
+ panic("nfs_buf_drop: buffer not busy!");
+ if (ISSET(bp->nb_lflags, NBL_WANTED)) {
+ /*
+ * delay the actual wakeup until after we
+ * clear NBL_BUSY and we've dropped nfs_buf_mutex
+ */
+ need_wakeup = 1;
+ }
+ /* Unlock the buffer. */
+ CLR(bp->nb_lflags, (NBL_BUSY | NBL_WANTED));
+
+ if (need_wakeup)
+ wakeup(bp);
+}
+
+/*
+ * prepare for iterating over an nfsnode's buffer list
+ * this lock protects the queue manipulation
+ * (must be called with nfs_buf_mutex held)
+ */
+int
+nfs_buf_iterprepare(struct nfsnode *np, struct nfsbuflists *iterheadp, int flags)
+{
+ struct nfsbuflists *listheadp;
+
+ if (flags & NBI_DIRTY)
+ listheadp = &np->n_dirtyblkhd;
+ else
+ listheadp = &np->n_cleanblkhd;
+
+ if ((flags & NBI_NOWAIT) && (np->n_bufiterflags & NBI_ITER)) {
+ LIST_INIT(iterheadp);
+ return(EWOULDBLOCK);
+ }
+
+ while (np->n_bufiterflags & NBI_ITER) {
+ np->n_bufiterflags |= NBI_ITERWANT;
+ msleep(&np->n_bufiterflags, nfs_buf_mutex, 0, "nfs_buf_iterprepare", 0);
+ }
+ if (LIST_EMPTY(listheadp)) {
+ LIST_INIT(iterheadp);
+ return(EINVAL);
+ }
+ np->n_bufiterflags |= NBI_ITER;
+
+ iterheadp->lh_first = listheadp->lh_first;
+ listheadp->lh_first->nb_vnbufs.le_prev = &iterheadp->lh_first;
+ LIST_INIT(listheadp);
+
+ return(0);
+}
+
+/*
+ * cleanup after iterating over an nfsnode's buffer list
+ * this lock protects the queue manipulation
+ * (must be called with nfs_buf_mutex held)
+ */
+void
+nfs_buf_itercomplete(struct nfsnode *np, struct nfsbuflists *iterheadp, int flags)
+{
+ struct nfsbuflists * listheadp;
+ struct nfsbuf *bp;
+
+ if (flags & NBI_DIRTY)
+ listheadp = &np->n_dirtyblkhd;
+ else
+ listheadp = &np->n_cleanblkhd;
+
+ while (!LIST_EMPTY(iterheadp)) {
+ bp = LIST_FIRST(iterheadp);
+ LIST_REMOVE(bp, nb_vnbufs);
+ LIST_INSERT_HEAD(listheadp, bp, nb_vnbufs);
+ }
+
+ np->n_bufiterflags &= ~NBI_ITER;
+ if (np->n_bufiterflags & NBI_ITERWANT) {
+ np->n_bufiterflags &= ~NBI_ITERWANT;
+ wakeup(&np->n_bufiterflags);
+ }
+}