+/*
+ * finish up an NFS READ RPC on a buffer
+ */
+void
+nfs_buf_read_rpc_finish(struct nfsreq *req)
+{
+ struct nfsmount *nmp;
+ size_t rlen;
+ struct nfsreq_cbinfo cb;
+ struct nfsbuf *bp;
+ int error = 0, nfsvers, offset, length, eof = 0, multasyncrpc, finished;
+ void *wakeme = NULL;
+ struct nfsreq *rreq = NULL;
+ nfsnode_t np;
+ thread_t thd;
+ kauth_cred_t cred;
+ uio_t auio;
+ char uio_buf [ UIO_SIZEOF(1) ];
+
+finish:
+ np = req->r_np;
+ thd = req->r_thread;
+ cred = req->r_cred;
+ if (IS_VALID_CRED(cred))
+ kauth_cred_ref(cred);
+ cb = req->r_callback;
+ bp = cb.rcb_bp;
+ if (cb.rcb_func) /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */
+ nfs_request_ref(req, 0);
+
+ nmp = NFSTONMP(np);
+ if (!nmp) {
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error = ENXIO;
+ }
+ if (error || ISSET(bp->nb_flags, NB_ERROR)) {
+ /* just drop it */
+ nfs_request_async_cancel(req);
+ goto out;
+ }
+
+ nfsvers = nmp->nm_vers;
+ offset = cb.rcb_args[0];
+ rlen = length = cb.rcb_args[1];
+
+ auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE,
+ UIO_READ, &uio_buf, sizeof(uio_buf));
+ uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length);
+
+ /* finish the RPC */
+ error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req, auio, &rlen, &eof);
+ if ((error == EINPROGRESS) && cb.rcb_func) {
+ /* async request restarted */
+ if (cb.rcb_func)
+ nfs_request_rele(req);
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ return;
+ }
+ if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid)) {
+ NP(np, "nfs_buf_read_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery",
+ error, NBOFF(bp)+offset, cb.rcb_args[2], nmp->nm_stategenid);
+ nfs_need_recover(nmp, error);
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ if (np->n_flag & NREVOKE) {
+ error = EIO;
+ } else {
+ if (error == NFSERR_GRACE) {
+ if (cb.rcb_func) {
+ /*
+ * For an async I/O request, handle a grace delay just like
+ * jukebox errors. Set the resend time and queue it up.
+ */
+ struct timeval now;
+ if (req->r_nmrep.nmc_mhead) {
+ mbuf_freem(req->r_nmrep.nmc_mhead);
+ req->r_nmrep.nmc_mhead = NULL;
+ }
+ req->r_error = 0;
+ microuptime(&now);
+ lck_mtx_lock(&req->r_mtx);
+ req->r_resendtime = now.tv_sec + 2;
+ req->r_xid = 0; // get a new XID
+ req->r_flags |= R_RESTART;
+ req->r_start = 0;
+ nfs_asyncio_resend(req);
+ lck_mtx_unlock(&req->r_mtx);
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ /* Note: nfsreq reference taken will be dropped later when finished */
+ return;
+ }
+ /* otherwise, just pause a couple seconds and retry */
+ tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
+ }
+ if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
+ rlen = 0;
+ goto readagain;
+ }
+ }
+ }
+ if (error) {
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error;
+ goto out;
+ }
+
+ if ((rlen > 0) && (bp->nb_endio < (offset + (int)rlen)))
+ bp->nb_endio = offset + rlen;
+
+ if ((nfsvers == NFS_VER2) || eof || (rlen == 0)) {
+ /* zero out the remaining data (up to EOF) */
+ off_t rpcrem, eofrem, rem;
+ rpcrem = (length - rlen);
+ eofrem = np->n_size - (NBOFF(bp) + offset + rlen);
+ rem = (rpcrem < eofrem) ? rpcrem : eofrem;
+ if (rem > 0)
+ bzero(bp->nb_data + offset + rlen, rem);
+ } else if (((int)rlen < length) && !ISSET(bp->nb_flags, NB_ERROR)) {
+ /*
+ * short read
+ *
+ * We haven't hit EOF and we didn't get all the data
+ * requested, so we need to issue another read for the rest.
+ * (Don't bother if the buffer already hit an error.)
+ */
+readagain:
+ offset += rlen;
+ length -= rlen;
+ cb.rcb_args[0] = offset;
+ cb.rcb_args[1] = length;
+ if (nmp->nm_vers >= NFS_VER4)
+ cb.rcb_args[2] = nmp->nm_stategenid;
+ error = nmp->nm_funcs->nf_read_rpc_async(np, NBOFF(bp) + offset, length, thd, cred, &cb, &rreq);
+ if (!error) {
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ if (!cb.rcb_func) {
+ /* if !async we'll need to wait for this RPC to finish */
+ req = rreq;
+ rreq = NULL;
+ goto finish;
+ }
+ nfs_request_rele(req);
+ /*
+ * We're done here.
+ * Outstanding RPC count is unchanged.
+ * Callback will be called when RPC is done.
+ */
+ return;
+ }
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error;
+ }
+
+out:
+ if (cb.rcb_func)
+ nfs_request_rele(req);
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+
+ /*
+ * Decrement outstanding RPC count on buffer
+ * and call nfs_buf_read_finish on last RPC.
+ *
+ * (Note: when there are multiple async RPCs issued for a
+ * buffer we need nfs_buffer_mutex to avoid problems when
+ * aborting a partially-initiated set of RPCs)
+ */
+
+ multasyncrpc = ISSET(bp->nb_flags, NB_MULTASYNCRPC);
+ if (multasyncrpc)
+ lck_mtx_lock(nfs_buf_mutex);
+
+ bp->nb_rpcs--;
+ finished = (bp->nb_rpcs == 0);
+
+ if (multasyncrpc)
+ lck_mtx_unlock(nfs_buf_mutex);
+
+ if (finished) {
+ if (multasyncrpc)
+ wakeme = &bp->nb_rpcs;
+ nfs_buf_read_finish(bp);
+ if (wakeme)
+ wakeup(wakeme);
+ }
+}
+
+/*
+ * Do buffer readahead.
+ * Initiate async I/O to read buffers not in cache.
+ */
+int
+nfs_buf_readahead(nfsnode_t np, int ioflag, daddr64_t *rabnp, daddr64_t lastrabn, thread_t thd, kauth_cred_t cred)
+{
+ struct nfsmount *nmp = NFSTONMP(np);
+ struct nfsbuf *bp;
+ int error = 0;
+ uint32_t nra;
+
+ if (!nmp)
+ return (ENXIO);
+ if (nmp->nm_readahead <= 0)
+ return (0);
+ if (*rabnp > lastrabn)
+ return (0);
+
+ for (nra = 0; (nra < nmp->nm_readahead) && (*rabnp <= lastrabn); nra++, *rabnp = *rabnp + 1) {
+ /* check if block exists and is valid. */
+ if ((*rabnp * nmp->nm_biosize) >= (off_t)np->n_size) {
+ /* stop reading ahead if we're beyond EOF */
+ *rabnp = lastrabn;
+ break;
+ }
+ error = nfs_buf_get(np, *rabnp, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp);
+ if (error)
+ break;
+ nfs_node_lock_force(np);
+ np->n_lastrahead = *rabnp;
+ nfs_node_unlock(np);
+ if (!bp)
+ continue;
+ if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE) &&
+ !bp->nb_dirty && !ISSET(bp->nb_flags, (NB_DELWRI|NB_NCRDAHEAD))) {
+ CLR(bp->nb_flags, NB_CACHE);
+ bp->nb_valid = 0;
+ bp->nb_validoff = bp->nb_validend = -1;
+ }
+ if ((bp->nb_dirtyend <= 0) && !bp->nb_dirty &&
+ !ISSET(bp->nb_flags, (NB_CACHE|NB_DELWRI))) {
+ SET(bp->nb_flags, (NB_READ|NB_ASYNC));
+ if (ioflag & IO_NOCACHE)
+ SET(bp->nb_flags, NB_NCRDAHEAD);
+ if (!IS_VALID_CRED(bp->nb_rcred) && IS_VALID_CRED(cred)) {
+ kauth_cred_ref(cred);
+ bp->nb_rcred = cred;
+ }
+ if ((error = nfs_buf_read(bp)))
+ break;
+ continue;
+ }
+ nfs_buf_release(bp, 1);
+ }
+ return (error);
+}
+
+/*
+ * NFS buffer I/O for reading files.
+ */
+int
+nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx)
+{
+ vnode_t vp = NFSTOV(np);
+ struct nfsbuf *bp = NULL;
+ struct nfsmount *nmp = VTONMP(vp);
+ daddr64_t lbn, rabn = 0, lastrabn, maxrabn = -1;
+ off_t diff;
+ int error = 0, n = 0, on = 0;
+ int nfsvers, biosize, modified, readaheads = 0;
+ thread_t thd;
+ kauth_cred_t cred;
+ int64_t io_resid;
+
+ FSDBG_TOP(514, np, uio_offset(uio), uio_resid(uio), ioflag);
+
+ nfsvers = nmp->nm_vers;
+ biosize = nmp->nm_biosize;
+ thd = vfs_context_thread(ctx);
+ cred = vfs_context_ucred(ctx);
+
+ if (vnode_vtype(vp) != VREG) {
+ printf("nfs_bioread: type %x unexpected\n", vnode_vtype(vp));
+ FSDBG_BOT(514, np, 0xd1e0016, 0, EINVAL);
+ return (EINVAL);
+ }
+
+ /*
+ * For NFS, cache consistency can only be maintained approximately.
+ * Although RFC1094 does not specify the criteria, the following is
+ * believed to be compatible with the reference port.
+ *
+ * If the file has changed since the last read RPC or you have
+ * written to the file, you may have lost data cache consistency
+ * with the server. So, check for a change, and flush all of the
+ * file's data out of the cache.
+ * NB: This implies that cache data can be read when up to
+ * NFS_MAXATTRTIMO seconds out of date. If you find that you
+ * need current attributes, nfs_getattr() can be forced to fetch
+ * new attributes (via NATTRINVALIDATE() or NGA_UNCACHED).
+ */
+
+ if (ISSET(np->n_flag, NUPDATESIZE))
+ nfs_data_update_size(np, 0);
+
+ if ((error = nfs_node_lock(np))) {
+ FSDBG_BOT(514, np, 0xd1e0222, 0, error);
+ return (error);
+ }
+
+ if (np->n_flag & NNEEDINVALIDATE) {
+ np->n_flag &= ~NNEEDINVALIDATE;
+ nfs_node_unlock(np);
+ error = nfs_vinvalbuf(vp, V_SAVE|V_IGNORE_WRITEERR, ctx, 1);
+ if (!error)
+ error = nfs_node_lock(np);
+ if (error) {
+ FSDBG_BOT(514, np, 0xd1e0322, 0, error);
+ return (error);
+ }
+ }
+
+ modified = (np->n_flag & NMODIFIED);
+ nfs_node_unlock(np);
+ /* nfs_getattr() will check changed and purge caches */
+ error = nfs_getattr(np, NULL, ctx, modified ? NGA_UNCACHED : NGA_CACHED);
+ if (error) {
+ FSDBG_BOT(514, np, 0xd1e0004, 0, error);
+ return (error);
+ }
+
+ if (uio_resid(uio) == 0) {
+ FSDBG_BOT(514, np, 0xd1e0001, 0, 0);
+ return (0);
+ }
+ if (uio_offset(uio) < 0) {
+ FSDBG_BOT(514, np, 0xd1e0002, 0, EINVAL);
+ return (EINVAL);
+ }
+
+ /*
+ * set up readahead - which may be limited by:
+ * + current request length (for IO_NOCACHE)
+ * + readahead setting
+ * + file size
+ */
+ if (nmp->nm_readahead > 0) {
+ off_t end = uio_offset(uio) + uio_resid(uio);
+ if (end > (off_t)np->n_size)
+ end = np->n_size;
+ rabn = uio_offset(uio) / biosize;
+ maxrabn = (end - 1) / biosize;
+ nfs_node_lock_force(np);
+ if (!(ioflag & IO_NOCACHE) &&
+ (!rabn || (rabn == np->n_lastread) || (rabn == (np->n_lastread+1)))) {
+ maxrabn += nmp->nm_readahead;
+ if ((maxrabn * biosize) >= (off_t)np->n_size)
+ maxrabn = ((off_t)np->n_size - 1)/biosize;
+ }
+ if (maxrabn < np->n_lastrahead)
+ np->n_lastrahead = -1;
+ if (rabn < np->n_lastrahead)
+ rabn = np->n_lastrahead + 1;
+ nfs_node_unlock(np);
+ } else {
+ rabn = maxrabn = 0;
+ }
+
+ do {
+
+ nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
+ lbn = uio_offset(uio) / biosize;
+
+ /*
+ * Copy directly from any cached pages without grabbing the bufs.
+ * (If we are NOCACHE and we've issued readahead requests, we need
+ * to grab the NB_NCRDAHEAD bufs to drop them.)
+ */
+ if ((!(ioflag & IO_NOCACHE) || !readaheads) &&
+ ((uio->uio_segflg == UIO_USERSPACE32 ||
+ uio->uio_segflg == UIO_USERSPACE64 ||
+ uio->uio_segflg == UIO_USERSPACE))) {
+ io_resid = uio_resid(uio);
+ diff = np->n_size - uio_offset(uio);
+ if (diff < io_resid)
+ io_resid = diff;
+ if (io_resid > 0) {
+ int count = (io_resid > INT_MAX) ? INT_MAX : io_resid;
+ error = cluster_copy_ubc_data(vp, uio, &count, 0);
+ if (error) {
+ nfs_data_unlock(np);
+ FSDBG_BOT(514, np, uio_offset(uio), 0xcacefeed, error);
+ return (error);
+ }
+ }
+ /* count any biocache reads that we just copied directly */
+ if (lbn != (uio_offset(uio)/biosize)) {
+ OSAddAtomic64((uio_offset(uio)/biosize) - lbn, &nfsstats.biocache_reads);
+ FSDBG(514, np, 0xcacefeed, uio_offset(uio), error);
+ }
+ }
+
+ lbn = uio_offset(uio) / biosize;
+ on = uio_offset(uio) % biosize;
+ nfs_node_lock_force(np);
+ np->n_lastread = (uio_offset(uio) - 1) / biosize;
+ nfs_node_unlock(np);
+
+ if ((uio_resid(uio) <= 0) || (uio_offset(uio) >= (off_t)np->n_size)) {
+ nfs_data_unlock(np);
+ FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), 0xaaaaaaaa);
+ return (0);
+ }
+
+ /* adjust readahead block number, if necessary */
+ if (rabn < lbn)
+ rabn = lbn;
+ lastrabn = MIN(maxrabn, lbn + nmp->nm_readahead);
+ if (rabn <= lastrabn) { /* start readaheads */
+ error = nfs_buf_readahead(np, ioflag, &rabn, lastrabn, thd, cred);
+ if (error) {
+ nfs_data_unlock(np);
+ FSDBG_BOT(514, np, 0xd1e000b, 1, error);
+ return (error);
+ }
+ readaheads = 1;
+ }
+
+ OSAddAtomic64(1, &nfsstats.biocache_reads);
+
+ /*
+ * If the block is in the cache and has the required data
+ * in a valid region, just copy it out.
+ * Otherwise, get the block and write back/read in,