+ bp->nb_error = error;
+ SET(bp->nb_flags, NB_ERROR);
+ if (ISSET(bp->nb_flags, NB_MULTASYNCRPC)) {
+ nrpcs = (length + nmrsize - 1) / nmrsize;
+ lck_mtx_lock(nfs_buf_mutex);
+ bp->nb_rpcs -= nrpcs;
+ if (bp->nb_rpcs == 0) {
+ /* No RPCs left, so the buffer's done */
+ lck_mtx_unlock(nfs_buf_mutex);
+ nfs_buf_iodone(bp);
+ } else {
+ /* wait for the last RPC to mark it done */
+ while (bp->nb_rpcs > 0)
+ msleep(&bp->nb_rpcs, nfs_buf_mutex, 0,
+ "nfs_buf_read_rpc_cancel", NULL);
+ lck_mtx_unlock(nfs_buf_mutex);
+ }
+ } else {
+ nfs_buf_iodone(bp);
+ }
+ }
+
+ return (error);
+}
+
+/*
+ * finish up an NFS READ RPC on a buffer
+ */
+void
+nfs_buf_read_rpc_finish(struct nfsreq *req)
+{
+ struct nfsmount *nmp;
+ size_t rlen;
+ struct nfsreq_cbinfo cb;
+ struct nfsbuf *bp;
+ int error = 0, nfsvers, offset, length, eof = 0, multasyncrpc, finished;
+ void *wakeme = NULL;
+ struct nfsreq *rreq = NULL;
+ nfsnode_t np;
+ thread_t thd;
+ kauth_cred_t cred;
+ struct uio uio;
+ struct iovec_32 io;
+
+finish:
+ np = req->r_np;
+ thd = req->r_thread;
+ cred = req->r_cred;
+ if (IS_VALID_CRED(cred))
+ kauth_cred_ref(cred);
+ cb = req->r_callback;
+ bp = cb.rcb_bp;
+
+ nmp = NFSTONMP(np);
+ if (!nmp) {
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error = ENXIO;
+ }
+ if (error || ISSET(bp->nb_flags, NB_ERROR)) {
+ /* just drop it */
+ nfs_request_async_cancel(req);
+ goto out;
+ }
+
+ nfsvers = nmp->nm_vers;
+ offset = cb.rcb_args[0];
+ rlen = length = cb.rcb_args[1];
+
+ uio.uio_iovs.iov32p = &io;
+ uio.uio_iovcnt = 1;
+ uio.uio_rw = UIO_READ;
+#if 1 /* LP64todo - can't use new segment flags until the drivers are ready */
+ uio.uio_segflg = UIO_SYSSPACE;
+#else
+ uio.uio_segflg = UIO_SYSSPACE32;
+#endif
+ io.iov_len = length;
+ uio_uio_resid_set(&uio, io.iov_len);
+ uio.uio_offset = NBOFF(bp) + offset;
+ io.iov_base = (uintptr_t) bp->nb_data + offset;
+
+ /* finish the RPC */
+ error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req, &uio, &rlen, &eof);
+ if ((error == EINPROGRESS) && cb.rcb_func) {
+ /* async request restarted */
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ return;
+ }
+
+ if (error) {
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error;
+ goto out;
+ }
+
+ if ((rlen > 0) && (bp->nb_endio < (offset + (int)rlen)))
+ bp->nb_endio = offset + rlen;
+
+ if ((nfsvers == NFS_VER2) || eof || (rlen == 0)) {
+ /* zero out the remaining data (up to EOF) */
+ off_t rpcrem, eofrem, rem;
+ rpcrem = (length - rlen);
+ eofrem = np->n_size - (NBOFF(bp) + offset + rlen);
+ rem = (rpcrem < eofrem) ? rpcrem : eofrem;
+ if (rem > 0)
+ bzero(bp->nb_data + offset + rlen, rem);
+ } else if (((int)rlen < length) && !ISSET(bp->nb_flags, NB_ERROR)) {
+ /*
+ * short read
+ *
+ * We haven't hit EOF and we didn't get all the data
+ * requested, so we need to issue another read for the rest.
+ * (Don't bother if the buffer already hit an error.)
+ */
+ offset += rlen;
+ length -= rlen;
+ cb.rcb_args[0] = offset;
+ cb.rcb_args[1] = length;
+ error = nmp->nm_funcs->nf_read_rpc_async(np, offset, length, thd, cred, &cb, &rreq);
+ if (!error) {
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+ if (!cb.rcb_func) {
+ /* if !async we'll need to wait for this RPC to finish */
+ req = rreq;
+ goto finish;
+ }
+ /*
+ * We're done here.
+ * Outstanding RPC count is unchanged.
+ * Callback will be called when RPC is done.
+ */
+ return;
+ }
+ SET(bp->nb_flags, NB_ERROR);
+ bp->nb_error = error;
+ }
+
+out:
+ if (IS_VALID_CRED(cred))
+ kauth_cred_unref(&cred);
+
+ /*
+ * Decrement outstanding RPC count on buffer
+ * and call nfs_buf_read_finish on last RPC.
+ *
+ * (Note: when there are multiple async RPCs issued for a
+ * buffer we need nfs_buffer_mutex to avoid problems when
+ * aborting a partially-initiated set of RPCs)
+ */
+
+ multasyncrpc = ISSET(bp->nb_flags, NB_MULTASYNCRPC);
+ if (multasyncrpc)
+ lck_mtx_lock(nfs_buf_mutex);
+
+ bp->nb_rpcs--;
+ finished = (bp->nb_rpcs == 0);
+
+ if (multasyncrpc)
+ lck_mtx_unlock(nfs_buf_mutex);
+
+ if (finished) {
+ if (multasyncrpc)
+ wakeme = &bp->nb_rpcs;
+ nfs_buf_read_finish(bp);
+ if (wakeme)
+ wakeup(wakeme);
+ }
+}
+
+/*
+ * Do buffer readahead.
+ * Initiate async I/O to read buffers not in cache.
+ */
+static int
+nfs_buf_readahead(nfsnode_t np, int ioflag, daddr64_t *rabnp, daddr64_t lastrabn, thread_t thd, kauth_cred_t cred)
+{
+ struct nfsmount *nmp = NFSTONMP(np);
+ struct nfsbuf *bp;
+ int error = 0, nra;
+
+ if (!nmp)
+ return (ENXIO);
+ if (nmp->nm_readahead <= 0)
+ return (0);
+ if (*rabnp > lastrabn)
+ return (0);
+
+ for (nra = 0; (nra < nmp->nm_readahead) && (*rabnp <= lastrabn); nra++, *rabnp = *rabnp + 1) {
+ /* check if block exists and is valid. */
+ error = nfs_buf_get(np, *rabnp, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp);
+ if (error)
+ break;
+ if (!bp)
+ continue;
+ if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE) &&
+ !bp->nb_dirty && !ISSET(bp->nb_flags, (NB_DELWRI|NB_NCRDAHEAD))) {
+ CLR(bp->nb_flags, NB_CACHE);
+ bp->nb_valid = 0;
+ bp->nb_validoff = bp->nb_validend = -1;
+ }
+ if ((bp->nb_dirtyend <= 0) && !bp->nb_dirty &&
+ !ISSET(bp->nb_flags, (NB_CACHE|NB_DELWRI))) {
+ SET(bp->nb_flags, (NB_READ|NB_ASYNC));
+ if (ioflag & IO_NOCACHE)
+ SET(bp->nb_flags, NB_NCRDAHEAD);
+ if (!IS_VALID_CRED(bp->nb_rcred) && IS_VALID_CRED(cred)) {
+ kauth_cred_ref(cred);
+ bp->nb_rcred = cred;
+ }
+ if ((error = nfs_buf_read(bp)))
+ break;
+ continue;
+ }
+ nfs_buf_release(bp, 1);
+ }
+ return (error);
+}
+
+/*
+ * NFS buffer I/O for reading files/directories.
+ */
+int
+nfs_bioread(nfsnode_t np, struct uio *uio, int ioflag, int *eofflag, vfs_context_t ctx)
+{
+ vnode_t vp = NFSTOV(np);
+ struct nfsbuf *bp = NULL;
+ struct nfs_vattr nvattr;
+ struct nfsmount *nmp = VTONMP(vp);
+ daddr64_t lbn, rabn = 0, lastrabn, maxrabn = -1, tlbn;
+ off_t diff;
+ int error = 0, n = 0, on = 0;
+ int nfsvers, biosize;
+ caddr_t dp;
+ struct dirent *direntp = NULL;
+ enum vtype vtype;
+ thread_t thd;
+ kauth_cred_t cred;
+
+ FSDBG_TOP(514, np, uio->uio_offset, uio_uio_resid(uio), ioflag);
+
+ if (uio_uio_resid(uio) == 0) {
+ FSDBG_BOT(514, np, 0xd1e0001, 0, 0);
+ return (0);
+ }
+ if (uio->uio_offset < 0) {
+ FSDBG_BOT(514, np, 0xd1e0002, 0, EINVAL);
+ return (EINVAL);
+ }
+
+ nfsvers = nmp->nm_vers;
+ biosize = nmp->nm_biosize;
+ thd = vfs_context_thread(ctx);
+ cred = vfs_context_ucred(ctx);
+
+ vtype = vnode_vtype(vp);
+ if ((vtype != VREG) && (vtype != VDIR)) {
+ printf("nfs_bioread: type %x unexpected\n", vtype);
+ FSDBG_BOT(514, np, 0xd1e0016, 0, EINVAL);
+ return (EINVAL);
+ }
+
+ /*
+ * For nfs, cache consistency can only be maintained approximately.
+ * Although RFC1094 does not specify the criteria, the following is
+ * believed to be compatible with the reference port.
+ * For nfs:
+ * If the file's modify time on the server has changed since the
+ * last read rpc or you have written to the file,
+ * you may have lost data cache consistency with the
+ * server, so flush all of the file's data out of the cache.
+ * Then force a getattr rpc to ensure that you have up to date
+ * attributes.
+ * NB: This implies that cache data can be read when up to
+ * NFS_MAXATTRTIMEO seconds out of date. If you find that you need
+ * current attributes this could be forced by calling
+ * NATTRINVALIDATE() before the nfs_getattr() call.
+ */
+
+ if (ISSET(np->n_flag, NUPDATESIZE))
+ nfs_data_update_size(np, 0);
+
+ if ((error = nfs_lock(np, NFS_NODE_LOCK_EXCLUSIVE))) {
+ FSDBG_BOT(514, np, 0xd1e0222, 0, error);
+ return (error);
+ }
+
+ if (np->n_flag & NNEEDINVALIDATE) {
+ np->n_flag &= ~NNEEDINVALIDATE;
+ nfs_unlock(np);
+ nfs_vinvalbuf(vp, V_SAVE|V_IGNORE_WRITEERR, ctx, 1);
+ if ((error = nfs_lock(np, NFS_NODE_LOCK_EXCLUSIVE))) {
+ FSDBG_BOT(514, np, 0xd1e0322, 0, error);
+ return (error);
+ }
+ }
+
+ if (np->n_flag & NMODIFIED) {
+ if (vtype == VDIR) {
+ nfs_invaldir(np);
+ nfs_unlock(np);
+ error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1);
+ if (!error)
+ error = nfs_lock(np, NFS_NODE_LOCK_EXCLUSIVE);
+ if (error) {
+ FSDBG_BOT(514, np, 0xd1e0003, 0, error);
+ return (error);
+ }
+ }
+ NATTRINVALIDATE(np);
+ error = nfs_getattr(np, &nvattr, ctx, 1);
+ if (error) {
+ nfs_unlock(np);
+ FSDBG_BOT(514, np, 0xd1e0004, 0, error);
+ return (error);
+ }
+ if (vtype == VDIR) {
+ /* if directory changed, purge any name cache entries */
+ if (NFS_CHANGED_NC(nfsvers, np, &nvattr))
+ cache_purge(vp);
+ NFS_CHANGED_UPDATE_NC(nfsvers, np, &nvattr);
+ }
+ NFS_CHANGED_UPDATE(nfsvers, np, &nvattr);
+ } else {
+ error = nfs_getattr(np, &nvattr, ctx, 1);
+ if (error) {
+ nfs_unlock(np);
+ FSDBG_BOT(514, np, 0xd1e0005, 0, error);
+ return (error);
+ }
+ if (NFS_CHANGED(nfsvers, np, &nvattr)) {
+ if (vtype == VDIR) {
+ nfs_invaldir(np);
+ /* purge name cache entries */
+ if (NFS_CHANGED_NC(nfsvers, np, &nvattr))
+ cache_purge(vp);
+ }
+ nfs_unlock(np);
+ error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1);
+ if (!error)
+ error = nfs_lock(np, NFS_NODE_LOCK_EXCLUSIVE);
+ if (error) {
+ FSDBG_BOT(514, np, 0xd1e0006, 0, error);
+ return (error);
+ }
+ if (vtype == VDIR)
+ NFS_CHANGED_UPDATE_NC(nfsvers, np, &nvattr);
+ NFS_CHANGED_UPDATE(nfsvers, np, &nvattr);
+ }
+ }
+
+ nfs_unlock(np);
+
+ if (vtype == VREG) {
+ if ((ioflag & IO_NOCACHE) && (uio_uio_resid(uio) < (2*biosize))) {
+ /* We have only a block or so to read, just do the rpc directly. */
+ error = nfs_read_rpc(np, uio, ctx);
+ FSDBG_BOT(514, np, uio->uio_offset, uio_uio_resid(uio), error);
+ return (error);
+ }
+ /*
+ * set up readahead - which may be limited by:
+ * + current request length (for IO_NOCACHE)
+ * + readahead setting
+ * + file size
+ */
+ if (nmp->nm_readahead > 0) {
+ off_t end = uio->uio_offset + uio_uio_resid(uio);
+ if (end > (off_t)np->n_size)
+ end = np->n_size;
+ rabn = uio->uio_offset / biosize;
+ maxrabn = (end - 1) / biosize;
+ if (!(ioflag & IO_NOCACHE) &&
+ (!rabn || (rabn == np->n_lastread) || (rabn == (np->n_lastread+1)))) {
+ maxrabn += nmp->nm_readahead;
+ if ((maxrabn * biosize) >= (off_t)np->n_size)
+ maxrabn = ((off_t)np->n_size - 1)/biosize;
+ }
+ } else {
+ rabn = maxrabn = 0;
+ }
+ }
+
+ do {
+
+ if (vtype == VREG) {
+ nfs_data_lock(np, NFS_NODE_LOCK_SHARED);
+ lbn = uio->uio_offset / biosize;
+
+ /*
+ * Copy directly from any cached pages without grabbing the bufs.
+ *
+ * Note: for "nocache" reads, we don't copy directly from UBC
+ * because any cached pages will be for readahead buffers that
+ * need to be invalidated anyway before we finish this request.
+ */
+ if (!(ioflag & IO_NOCACHE) &&
+ (uio->uio_segflg == UIO_USERSPACE32 ||
+ uio->uio_segflg == UIO_USERSPACE64 ||
+ uio->uio_segflg == UIO_USERSPACE)) {
+ // LP64todo - fix this!
+ int io_resid = uio_uio_resid(uio);
+ diff = np->n_size - uio->uio_offset;
+ if (diff < io_resid)
+ io_resid = diff;
+ if (io_resid > 0) {
+ error = cluster_copy_ubc_data(vp, uio, &io_resid, 0);