]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/nfs/nfs_vfsops.c
xnu-3248.20.55.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_vfsops.c
index 50af3f4c358e1f9200a83f46457b8d47d6a393bb..90e20e774614a6f8703838e82ac1f39935368b0e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 
 #include <pexpert/pexpert.h>
 
+#define NFS_VFS_DBG(...) NFS_DBG(NFS_FAC_VFS, 7, ## __VA_ARGS__)
+
 /*
  * NFS client globals
  */
@@ -180,6 +182,7 @@ static int  nfs_mount_diskless(struct nfs_dlmount *, const char *, int, vnode_t *
 static int     nfs_mount_diskless_private(struct nfs_dlmount *, const char *, int, vnode_t *, mount_t *, vfs_context_t);
 #endif /* NO_MOUNT_PRIVATE */
 int            nfs_mount_connect(struct nfsmount *);
+void           nfs_mount_drain_and_cleanup(struct nfsmount *);
 void           nfs_mount_cleanup(struct nfsmount *);
 int            nfs_mountinfo_assemble(struct nfsmount *, struct xdrbuf *);
 int            nfs4_mount_update_path_with_symlink(struct nfsmount *, struct nfs_fs_path *, uint32_t, fhandle_t *, int *, fhandle_t *, vfs_context_t);
@@ -373,7 +376,10 @@ nfs3_update_statfs(struct nfsmount *nmp, vfs_context_t ctx)
        nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
        nfsm_chain_build_done(error, &nmreq);
        nfsmout_if(error);
-       error = nfs_request(np, NULL, &nmreq, NFSPROC_FSSTAT, ctx, NULL, &nmrep, &xid, &status);
+       error = nfs_request2(np, NULL, &nmreq, NFSPROC_FSSTAT, vfs_context_thread(ctx),
+               vfs_context_ucred(ctx), NULL, R_SOFT, &nmrep, &xid, &status);
+       if (error == ETIMEDOUT)
+               goto nfsmout;
        if ((lockerror = nfs_node_lock(np)))
                error = lockerror;
        if (nfsvers == NFS_VER3)
@@ -449,7 +455,7 @@ nfs4_update_statfs(struct nfsmount *nmp, vfs_context_t ctx)
        // PUTFH + GETATTR
        numops = 2;
        nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
-       nfsm_chain_add_compound_header(error, &nmreq, "statfs", numops);
+       nfsm_chain_add_compound_header(error, &nmreq, "statfs", nmp->nm_minor_vers, numops);
        numops--;
        nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
        nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
@@ -461,7 +467,9 @@ nfs4_update_statfs(struct nfsmount *nmp, vfs_context_t ctx)
        nfsm_chain_build_done(error, &nmreq);
        nfsm_assert(error, (numops == 0), EPROTO);
        nfsmout_if(error);
-       error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
+       error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
+               vfs_context_thread(ctx), vfs_context_ucred(ctx),
+               NULL, R_SOFT, &nmrep, &xid, &status);
        nfsm_chain_skip_tag(error, &nmrep);
        nfsm_chain_get_32(error, &nmrep, numops);
        nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
@@ -502,7 +510,8 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx)
        uint32_t bsize;
        int error = 0, nfsvers;
 
-       if (!(nmp = VFSTONFS(mp)))
+       nmp = VFSTONFS(mp);
+       if (nfs_mount_gone(nmp))
                return (ENXIO);
        nfsvers = nmp->nm_vers;
 
@@ -537,7 +546,7 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx)
                        lck_mtx_unlock(&nmp->nm_lock);
                }
 
-               if (refresh)
+               if (refresh && !nfs_use_cache(nmp))
                        error = nmp->nm_funcs->nf_update_statfs(nmp, ctx);
                if ((error == ESTALE) || (error == ETIMEDOUT))
                        error = 0;
@@ -1309,6 +1318,7 @@ nfs_mount_diskless_private(
        strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSNAMELEN-1);
        vp->v_mountedhere = mp;
        mp->mnt_vnodecovered = vp;
+       vp = NULLVP;
        mp->mnt_vfsstat.f_owner = kauth_cred_getuid(kauth_cred_get());
        (void) copystr(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN - 1, 0);
        (void) copystr(ndmntp->ndm_mntfrom, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, 0);
@@ -1424,6 +1434,7 @@ nfs_mount_diskless_private(
        /* do the mount */
        if ((error = mountnfs(xdrbuf, mp, ctx, &vp))) {
                printf("nfs_mountroot: mount %s failed: %d\n", mntname, error);
+               vnode_put(mp->mnt_vnodecovered);
                mount_list_lock();
                vfsp->vfc_refcount--;
                mount_list_unlock();
@@ -1975,7 +1986,7 @@ nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nf
        // PUTFH, READLINK
        numops = 2;
        nfsm_chain_build_alloc_init(error, &nmreq, 12 * NFSX_UNSIGNED);
-       nfsm_chain_add_compound_header(error, &nmreq, "readlink", numops);
+       nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
        numops--;
        nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
        nfsm_chain_add_fh(error, &nmreq, NFS_VER4, fhp->fh_data, fhp->fh_len);
@@ -2169,7 +2180,7 @@ nocomponents:
                NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, NULL, 0);
                numops = 2;
                nfsm_chain_build_alloc_init(error, &nmreq, 9 * NFSX_UNSIGNED);
-               nfsm_chain_add_compound_header(error, &nmreq, "mount", numops);
+               nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
                numops--;
                nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTROOTFH);
                numops--;
@@ -2229,7 +2240,7 @@ nocomponents:
                        NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, isdotdot ? NULL : fspath.np_components[comp], 0);
                numops = 4;
                nfsm_chain_build_alloc_init(error, &nmreq, 18 * NFSX_UNSIGNED);
-               nfsm_chain_add_compound_header(error, &nmreq, "mount", numops);
+               nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
                numops--;
                if (dirfh.fh_len) {
                        nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
@@ -2392,7 +2403,7 @@ gotfh:
        /* get attrs for mount point root */
        numops = NMFLAG(nmp, NONAMEDATTR) ? 2 : 3; // PUTFH + GETATTR + OPENATTR
        nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED);
-       nfsm_chain_add_compound_header(error, &nmreq, "mount", numops);
+       nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
        numops--;
        nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
        nfsm_chain_add_fh(error, &nmreq, NFS_VER4, dirfh.fh_data, dirfh.fh_len);
@@ -2620,6 +2631,21 @@ nfs_mount_connect(struct nfsmount *nmp)
        return (error);
 }
 
+/* Table of maximum minor version for a given version */
+uint32_t maxminorverstab[] = {
+       0, /* Version 0 (does not exist) */
+       0, /* Version 1 (does not exist) */
+       0, /* Version 2 */
+       0, /* Version 3 */
+       0, /* Version 4 */
+};
+
+#define NFS_MAX_SUPPORTED_VERSION  ((long)(sizeof (maxminorverstab) / sizeof (uint32_t) - 1))
+#define NFS_MAX_SUPPORTED_MINOR_VERSION(v) ((long)(maxminorverstab[(v)]))
+
+#define DEFAULT_NFS_MIN_VERS VER2PVER(2, 0)
+#define DEFAULT_NFS_MAX_VERS VER2PVER(3, 0)
+
 /*
  * Common code to mount an NFS file system.
  */
@@ -2635,7 +2661,7 @@ mountnfs(
        int error = 0;
        struct vfsstatfs *sbp;
        struct xdrbuf xb;
-       uint32_t i, val, vers = 0, minorvers, maxio, iosize, len;
+       uint32_t i, val, maxio, iosize, len;
        uint32_t *mattrs;
        uint32_t *mflags_mask;
        uint32_t *mflags;
@@ -2675,7 +2701,10 @@ mountnfs(
                nmp->nm_args = xdrbuf;
 
                /* set up defaults */
+               nmp->nm_ref = 0;
                nmp->nm_vers = 0;
+               nmp->nm_min_vers = DEFAULT_NFS_MIN_VERS;
+               nmp->nm_max_vers = DEFAULT_NFS_MAX_VERS;
                nmp->nm_timeo = NFS_TIMEO;
                nmp->nm_retry = NFS_RETRANS;
                nmp->nm_sotype = 0;
@@ -2697,6 +2726,7 @@ mountnfs(
                nmp->nm_acdirmin = NFS_MINDIRATTRTIMO;
                nmp->nm_acdirmax = NFS_MAXDIRATTRTIMO;
                nmp->nm_auth = RPCAUTH_SYS;
+               nmp->nm_iodlink.tqe_next = NFSNOLIST;
                nmp->nm_deadtimeout = 0;
                nmp->nm_curdeadtimeout = 0;
                NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_NOACL);
@@ -2739,37 +2769,36 @@ mountnfs(
                }
        }
        if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) {
-               xb_get_32(error, &xb, vers);
+               /* Can't specify a single version and a range */
+               if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE))
+                       error = EINVAL;
+               xb_get_32(error, &xb, nmp->nm_vers);
+               if (nmp->nm_vers > NFS_MAX_SUPPORTED_VERSION ||
+                   nmp->nm_vers < NFS_VER2)
+                       error = EINVAL;
                if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION))
-                       xb_get_32(error, &xb, minorvers);
+                       xb_get_32(error, &xb, nmp->nm_minor_vers);
                else
-                       minorvers = 0;
-               nfsmerr_if(error);
-               switch (vers) {
-               case 2:
-                       nmp->nm_vers = NFS_VER2;
-                       break;
-               case 3:
-                       nmp->nm_vers = NFS_VER3;
-                       break;
-               case 4:
-                       switch (minorvers) {
-                       case 0:
-                               nmp->nm_vers = NFS_VER4;
-                               break;
-                       default:
-                               error = EINVAL;
-                       }
-                       break;
-               default:
+                       nmp->nm_minor_vers = maxminorverstab[nmp->nm_vers];
+               if (nmp->nm_minor_vers > maxminorverstab[nmp->nm_vers])
                        error = EINVAL;
-               }
-       }
+               nmp->nm_max_vers = nmp->nm_min_vers = 
+                       VER2PVER(nmp->nm_vers, nmp->nm_minor_vers);
+       } 
        if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) {
-               /* should have also gotten NFS version (and already gotten minorvers) */
+               /* should have also gotten NFS version (and already gotten minor version) */
                if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION))
                        error = EINVAL;
        }
+       if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
+               xb_get_32(error, &xb, nmp->nm_min_vers);
+               xb_get_32(error, &xb, nmp->nm_max_vers);
+               if ((nmp->nm_min_vers > nmp->nm_max_vers) ||
+                   (PVER2MAJOR(nmp->nm_max_vers) > NFS_MAX_SUPPORTED_VERSION) ||
+                   (PVER2MINOR(nmp->nm_min_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_min_vers)]) ||
+                   (PVER2MINOR(nmp->nm_max_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_max_vers)]))
+                       error = EINVAL;
+       }
        if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE))
                xb_get_32(error, &xb, nmp->nm_rsize);
        if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE))
@@ -3315,7 +3344,7 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx)
 
        xb_init(&xbnew, 0);
 
-       if (!nmp || (nmp->nm_state & NFSSTA_FORCE))
+       if (!nmp || (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)))
                return (ENXIO);
 
        /* allocate a couple path buffers we need */
@@ -3447,6 +3476,10 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx)
                xb_copy_32(error, &xb, &xbnew, val);
        if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION))
                xb_copy_32(error, &xb, &xbnew, val);
+       if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
+               xb_copy_32(error, &xb, &xbnew, val);
+               xb_copy_32(error, &xb, &xbnew, val);
+       }
        if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE))
                xb_copy_32(error, &xb, &xbnew, val);
        if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE))
@@ -4182,7 +4215,9 @@ nfs_vfs_unmount(
 
        vflush(mp, NULLVP, FORCECLOSE);
 
-       nfs_mount_cleanup(nmp);
+       /* Wait for all other references to be released and free the mount */
+       nfs_mount_drain_and_cleanup(nmp);
+       
        return (0);
 }
 
@@ -4234,26 +4269,59 @@ nfs_fs_locations_cleanup(struct nfs_fs_locations *nfslsp)
        nfslsp->nl_locations = NULL;
 }
 
+void
+nfs_mount_rele(struct nfsmount *nmp)
+{
+       int wup = 0;
+
+       lck_mtx_lock(&nmp->nm_lock);
+       if (nmp->nm_ref < 1)
+               panic("nfs zombie mount underflow\n");
+       nmp->nm_ref--;
+       if (nmp->nm_ref == 0)
+               wup = nmp->nm_state & NFSSTA_MOUNT_DRAIN;
+       lck_mtx_unlock(&nmp->nm_lock);
+       if (wup)
+               wakeup(&nmp->nm_ref);
+}
+
+void
+nfs_mount_drain_and_cleanup(struct nfsmount *nmp)
+{
+       lck_mtx_lock(&nmp->nm_lock);
+       nmp->nm_state |= NFSSTA_MOUNT_DRAIN;
+       while (nmp->nm_ref > 0) {
+               msleep(&nmp->nm_ref, &nmp->nm_lock, PZERO-1, "nfs_mount_drain", NULL);
+       }
+       assert(nmp->nm_ref == 0);
+       lck_mtx_unlock(&nmp->nm_lock);
+       nfs_mount_cleanup(nmp);
+}
+
 /*
- * cleanup/destroy an nfsmount
+ * nfs_mount_zombie
  */
 void
-nfs_mount_cleanup(struct nfsmount *nmp)
+nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags)
 {
        struct nfsreq *req, *treq;
-       struct nfs_reqqhead iodq;
+       struct nfs_reqqhead iodq, resendq;
        struct timespec ts = { 1, 0 };
        struct nfs_open_owner *noop, *nextnoop;
        nfsnode_t np;
        int docallback;
 
+       lck_mtx_lock(&nmp->nm_lock);
+       nmp->nm_state |= nm_state_flags;
+       nmp->nm_ref++;
+       lck_mtx_unlock(&nmp->nm_lock);
+       
        /* stop callbacks */
        if ((nmp->nm_vers >= NFS_VER4) && !NMFLAG(nmp, NOCALLBACK) && nmp->nm_cbid)
                nfs4_mount_callback_shutdown(nmp);
 
        /* Destroy any RPCSEC_GSS contexts */
-       if (!TAILQ_EMPTY(&nmp->nm_gsscl))
-               nfs_gss_clnt_ctx_unmount(nmp);
+       nfs_gss_clnt_ctx_unmount(nmp);
 
        /* mark the socket for termination */
        lck_mtx_lock(&nmp->nm_lock);
@@ -4261,23 +4329,19 @@ nfs_mount_cleanup(struct nfsmount *nmp)
 
        /* Have the socket thread send the unmount RPC, if requested/appropriate. */
        if ((nmp->nm_vers < NFS_VER4) && (nmp->nm_state & NFSSTA_MOUNTED) &&
-           !(nmp->nm_state & NFSSTA_FORCE) && NMFLAG(nmp, CALLUMNT))
+           !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) && NMFLAG(nmp, CALLUMNT))
                nfs_mount_sock_thread_wake(nmp);
 
        /* wait for the socket thread to terminate */
-       while (nmp->nm_sockthd) {
+       while (nmp->nm_sockthd && current_thread() != nmp->nm_sockthd) {
                wakeup(&nmp->nm_sockthd);
                msleep(&nmp->nm_sockthd, &nmp->nm_lock, PZERO-1, "nfswaitsockthd", &ts);
        }
-
        lck_mtx_unlock(&nmp->nm_lock);
 
        /* tear down the socket */
        nfs_disconnect(nmp);
 
-       if (nmp->nm_mountp)
-               vfs_setfsprivate(nmp->nm_mountp, NULL);
-
        lck_mtx_lock(&nmp->nm_lock);
 
        if ((nmp->nm_vers >= NFS_VER4) && !NMFLAG(nmp, NOCALLBACK) && nmp->nm_cbid) {
@@ -4294,10 +4358,6 @@ nfs_mount_cleanup(struct nfsmount *nmp)
                thread_call_free(nmp->nm_renew_timer);
        }
 
-       if (nmp->nm_saddr)
-               FREE(nmp->nm_saddr, M_SONAME);
-       if ((nmp->nm_vers < NFS_VER4) && nmp->nm_rqsaddr)
-               FREE(nmp->nm_rqsaddr, M_SONAME);
        lck_mtx_unlock(&nmp->nm_lock);
 
        if (nmp->nm_state & NFSSTA_MOUNTED)
@@ -4307,8 +4367,10 @@ nfs_mount_cleanup(struct nfsmount *nmp)
                        break;
                case NFS_LOCK_MODE_ENABLED:
                default:
-                       if (nmp->nm_vers <= NFS_VER3)
+                       if (nmp->nm_vers <= NFS_VER3) {
                                nfs_lockd_mount_unregister(nmp);
+                               nmp->nm_lockmode = NFS_LOCK_MODE_DISABLED;
+                       }
                        break;
                }
 
@@ -4323,48 +4385,83 @@ nfs_mount_cleanup(struct nfsmount *nmp)
        }
 
        /*
-        * Loop through outstanding request list and remove dangling
-        * references to defunct nfsmount struct
+        * Be sure all requests for this mount are completed
+        * and removed from the resend queue.
         */
-       TAILQ_INIT(&iodq);
+       TAILQ_INIT(&resendq);
        lck_mtx_lock(nfs_request_mutex);
        TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
                if (req->r_nmp == nmp) {
                        lck_mtx_lock(&req->r_mtx);
-                       req->r_nmp = NULL;
-                       lck_mtx_unlock(&req->r_mtx);
-                       if (req->r_callback.rcb_func) {
-                               /* async I/O RPC needs to be finished */
-                               lck_mtx_lock(nfsiod_mutex);
-                               if (req->r_achain.tqe_next == NFSREQNOLIST)
-                                       TAILQ_INSERT_TAIL(&iodq, req, r_achain);
-                               lck_mtx_unlock(nfsiod_mutex);
-                       }
-                       lck_mtx_lock(&req->r_mtx);
-                       lck_mtx_lock(&nmp->nm_lock);
+                       if (!req->r_error && req->r_nmrep.nmc_mhead == NULL)
+                               req->r_error = EIO;
                        if (req->r_flags & R_RESENDQ) {
+                               lck_mtx_lock(&nmp->nm_lock);
+                               req->r_flags &= ~R_RESENDQ;
                                if (req->r_rchain.tqe_next != NFSREQNOLIST) {
                                        TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
-                                       req->r_rchain.tqe_next = NFSREQNOLIST;
+                                       /*
+                                        * Queue up the request so that we can unreference them 
+                                        * with out holding nfs_request_mutex
+                                        */
+                                       TAILQ_INSERT_TAIL(&resendq, req, r_rchain);
                                }
-                               req->r_flags &= ~R_RESENDQ;
+                               lck_mtx_unlock(&nmp->nm_lock);
                        }
-                       lck_mtx_unlock(&nmp->nm_lock);
-                       lck_mtx_unlock(&req->r_mtx);
                        wakeup(req);
+                       lck_mtx_unlock(&req->r_mtx);
                }
        }
        lck_mtx_unlock(nfs_request_mutex);
 
-       /* finish any async I/O RPCs queued up */
+       /* Since we've drop the request mutex we can now safely unreference the request */
+       TAILQ_FOREACH_SAFE(req, &resendq, r_rchain, treq) {
+               TAILQ_REMOVE(&resendq, req, r_rchain);
+               nfs_request_rele(req);
+       }
+
+       /*
+        * Now handle and outstanding async requests. We need to walk the
+        * request queue again this time with the nfsiod_mutex held. No
+        * other iods can grab our requests until we've put them on our own
+        * local iod queue for processing.
+        */
+       TAILQ_INIT(&iodq);
+       lck_mtx_lock(nfs_request_mutex);
        lck_mtx_lock(nfsiod_mutex);
+       TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
+               if (req->r_nmp == nmp) {
+                       lck_mtx_lock(&req->r_mtx);
+                       if (req->r_callback.rcb_func
+                           && !(req->r_flags & R_WAITSENT) && !(req->r_flags & R_IOD)) {
+                               /* 
+                                * Since R_IOD is not set then we need to handle it. If
+                                * we're not on a list add it to our iod queue. Otherwise
+                                * we must already be on nm_iodq which is added to our
+                                * local queue below.
+                                * %%% We should really keep a back pointer to our iod queue
+                                * that we're on.
+                                */
+                               req->r_flags |= R_IOD;
+                               if (req->r_achain.tqe_next == NFSREQNOLIST) {
+                                       TAILQ_INSERT_TAIL(&iodq, req, r_achain);
+                               }
+                       }
+                       lck_mtx_unlock(&req->r_mtx);
+               }
+       }
+
+       /* finish any async I/O RPCs queued up */
+       if (nmp->nm_iodlink.tqe_next != NFSNOLIST)
+               TAILQ_REMOVE(&nfsiodmounts, nmp, nm_iodlink);
        TAILQ_CONCAT(&iodq, &nmp->nm_iodq, r_achain);
        lck_mtx_unlock(nfsiod_mutex);
+       lck_mtx_unlock(nfs_request_mutex);
+
        TAILQ_FOREACH_SAFE(req, &iodq, r_achain, treq) {
                TAILQ_REMOVE(&iodq, req, r_achain);
                req->r_achain.tqe_next = NFSREQNOLIST;
                lck_mtx_lock(&req->r_mtx);
-               req->r_error = ENXIO;
                docallback = !(req->r_flags & R_WAITSENT);
                lck_mtx_unlock(&req->r_mtx);
                if (docallback)
@@ -4395,6 +4492,41 @@ nfs_mount_cleanup(struct nfsmount *nmp)
                }
                lck_mtx_unlock(&nmp->nm_lock);
        }
+
+       nfs_mount_rele(nmp);
+}
+
+/*
+ * cleanup/destroy an nfsmount
+ */
+void
+nfs_mount_cleanup(struct nfsmount *nmp)
+{
+       if (!nmp)
+               return;
+
+       nfs_mount_zombie(nmp, 0);
+
+       NFS_VFS_DBG("Unmounting %s from %s\n",
+                   vfs_statfs(nmp->nm_mountp)->f_mntfromname,
+                   vfs_statfs(nmp->nm_mountp)->f_mntonname);
+       NFS_VFS_DBG("nfs state = %x\n", nmp->nm_state);
+       NFS_VFS_DBG("nfs socket flags = %x\n", nmp->nm_sockflags);
+       NFS_VFS_DBG("nfs mount ref count is %d\n", nmp->nm_ref);
+       NFS_VFS_DBG("mount ref count is %d\n", nmp->nm_mountp->mnt_count);
+       
+       if (nmp->nm_mountp)
+               vfs_setfsprivate(nmp->nm_mountp, NULL);
+
+       lck_mtx_lock(&nmp->nm_lock);
+       if (nmp->nm_ref)
+               panic("Some one has grabbed a ref %d\n", nmp->nm_ref);
+
+       if (nmp->nm_saddr)
+               FREE(nmp->nm_saddr, M_SONAME);
+       if ((nmp->nm_vers < NFS_VER4) && nmp->nm_rqsaddr)
+               FREE(nmp->nm_rqsaddr, M_SONAME);
+
        if (IS_VALID_CRED(nmp->nm_mcred))
                kauth_cred_unref(&nmp->nm_mcred);
 
@@ -4409,6 +4541,9 @@ nfs_mount_cleanup(struct nfsmount *nmp)
        
        if (nmp->nm_args)
                xb_free(nmp->nm_args);
+
+       lck_mtx_unlock(&nmp->nm_lock);
+       
        lck_mtx_destroy(&nmp->nm_lock, nfs_mount_grp);
        if (nmp->nm_fh)
                FREE(nmp->nm_fh, M_TEMP);
@@ -4625,7 +4760,7 @@ nfs4_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc
        // PUTFH + GETATTR
        numops = 2;
        nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
-       nfsm_chain_add_compound_header(error, &nmreq, "quota", numops);
+       nfsm_chain_add_compound_header(error, &nmreq, "quota", nmp->nm_minor_vers, numops);
        numops--;
        nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
        nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
@@ -4665,7 +4800,8 @@ nfs_vfs_quotactl(mount_t mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t c
        uid_t euid = kauth_cred_getuid(vfs_context_ucred(ctx));
        struct dqblk *dqb = (struct dqblk*)datap;
 
-       if (!(nmp = VFSTONFS(mp)))
+       nmp = VFSTONFS(mp);
+       if (nfs_mount_gone(nmp))
                return (ENXIO);
        nfsvers = nmp->nm_vers;
 
@@ -4996,7 +5132,7 @@ nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb)
        xb_add_bitmap(error, &xbinfo, mflags, NFS_MFLAG_BITMAP_LEN);
        xb_add_32(error, &xbinfo, nmp->nm_vers);                /* NFS_VERSION */
        if (nmp->nm_vers >= NFS_VER4)
-               xb_add_32(error, &xbinfo, 0);                   /* NFS_MINOR_VERSION */
+               xb_add_32(error, &xbinfo, nmp->nm_minor_vers);  /* NFS_MINOR_VERSION */
        xb_add_32(error, &xbinfo, nmp->nm_rsize);               /* READ_SIZE */
        xb_add_32(error, &xbinfo, nmp->nm_wsize);               /* WRITE_SIZE */
        xb_add_32(error, &xbinfo, nmp->nm_readdirsize);         /* READDIR_SIZE */
@@ -5119,15 +5255,20 @@ int
 nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
            user_addr_t newp, size_t newlen, vfs_context_t ctx)
 {
-       int error = 0, val, softnobrowse;
+       int error = 0, val;
+       int softnobrowse;
        struct sysctl_req *req = NULL;
        union union_vfsidctl vc;
        mount_t mp;
        struct nfsmount *nmp = NULL;
        struct vfsquery vq;
+       struct nfsreq *rq;
        boolean_t is_64_bit;
        fsid_t fsid;
        struct xdrbuf xb;
+       struct netfs_status *nsp = NULL;
+       int timeoutmask;
+       uint pos, totlen, count, numThreads;
 #if NFSSERVER
        struct nfs_exportfs *nxfs;
        struct nfs_export *nx;
@@ -5139,7 +5280,7 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
        struct nfs_user_stat_user_rec ustat_rec;
        struct nfs_user_stat_path_rec upath_rec;
        uint bytes_avail, bytes_total, recs_copied;
-       uint numExports, totlen, pos, numRecs, count;
+       uint numExports, numRecs;
 #endif /* NFSSERVER */
 
        /*
@@ -5153,9 +5294,13 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
        /* common code for "new style" VFS_CTL sysctl, get the mount. */
        switch (name[0]) {
        case VFS_CTL_TIMEO:
-       case VFS_CTL_QUERY:
        case VFS_CTL_NOLOCKS:
+       case VFS_CTL_NSTATUS:
+       case VFS_CTL_QUERY:
                req = CAST_DOWN(struct sysctl_req *, oldp);
+               if (req == NULL) {
+                       return EFAULT;
+               }
                error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
                if (error)
                        return (error);
@@ -5163,7 +5308,7 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
                if (mp == NULL)
                        return (ENOENT);
                nmp = VFSTONFS(mp);
-               if (nmp == NULL)
+               if (!nmp)
                        return (ENOENT);
                bzero(&vq, sizeof(vq));
                req->newidx = 0;
@@ -5174,6 +5319,7 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
                        req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
                        req->newlen = vc.vc32.vc_len;
                }
+               break;
        }
 
        switch(name[0]) {
@@ -5535,6 +5681,88 @@ ustat_skip:
                        lck_mtx_unlock(&nmp->nm_lock);
                }
                break;
+       case VFS_CTL_NSTATUS:
+               /*
+                * Return the status of this mount.  This is much more
+                * information than VFS_CTL_QUERY.  In addition to the
+                * vq_flags return the significant mount options along
+                * with the list of threads blocked on the mount and
+                * how long the threads have been waiting.
+                */
+
+               lck_mtx_lock(nfs_request_mutex);
+               lck_mtx_lock(&nmp->nm_lock);
+
+               /*
+                * Count the number of requests waiting for a reply.
+                * Note: there could be multiple requests from the same thread.
+                */
+               numThreads = 0;
+               TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
+                       if (rq->r_nmp == nmp)
+                               numThreads++;
+               }
+
+               /* Calculate total size of result buffer */
+               totlen = sizeof(struct netfs_status) + (numThreads * sizeof(uint64_t));
+
+               if (req->oldptr == USER_ADDR_NULL) {            // Caller is querying buffer size
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       lck_mtx_unlock(nfs_request_mutex);
+                       return SYSCTL_OUT(req, NULL, totlen);
+               }
+               if (req->oldlen < totlen) {     // Check if caller's buffer is big enough
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       lck_mtx_unlock(nfs_request_mutex);
+                       return (ERANGE);
+               }
+
+               MALLOC(nsp, struct netfs_status *, totlen, M_TEMP, M_WAITOK|M_ZERO);
+               if (nsp == NULL) {
+                       lck_mtx_unlock(&nmp->nm_lock);
+                       lck_mtx_unlock(nfs_request_mutex);
+                       return (ENOMEM);
+               }
+               timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
+               if (nmp->nm_state & timeoutmask)
+                       nsp->ns_status |= VQ_NOTRESP;
+               if (nmp->nm_state & NFSSTA_DEAD)
+                       nsp->ns_status |= VQ_DEAD;
+
+               (void) nfs_mountopts(nmp, nsp->ns_mountopts, sizeof(nsp->ns_mountopts));
+               nsp->ns_threadcount = numThreads;
+               
+               /*
+                * Get the thread ids of threads waiting for a reply
+                * and find the longest wait time.
+                */
+               if (numThreads > 0) {
+                       struct timeval now;
+                       time_t sendtime;
+
+                       microuptime(&now);
+                       count = 0;
+                       sendtime = now.tv_sec;
+                       TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
+                               if (rq->r_nmp == nmp) {
+                                       if (rq->r_start < sendtime)
+                                               sendtime = rq->r_start;
+                                       // A thread_id of zero is used to represent an async I/O request.
+                                       nsp->ns_threadids[count] =
+                                               rq->r_thread ? thread_tid(rq->r_thread) : 0;
+                                       if (++count >= numThreads)
+                                               break;
+                               }
+                       }
+                       nsp->ns_waittime = now.tv_sec - sendtime;
+               }
+
+               lck_mtx_unlock(&nmp->nm_lock);
+               lck_mtx_unlock(nfs_request_mutex);
+
+               error = SYSCTL_OUT(req, nsp, totlen);
+               FREE(nsp, M_TEMP);
+               break;
        default:
                return (ENOTSUP);
        }