+ * Be sure all requests for this mount are completed
+ * and removed from the resend queue.
+ */
+ TAILQ_INIT(&resendq);
+ lck_mtx_lock(nfs_request_mutex);
+ TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
+ if (req->r_nmp == nmp) {
+ lck_mtx_lock(&req->r_mtx);
+ if (!req->r_error && req->r_nmrep.nmc_mhead == NULL)
+ req->r_error = EIO;
+ if (req->r_flags & R_RESENDQ) {
+ lck_mtx_lock(&nmp->nm_lock);
+ req->r_flags &= ~R_RESENDQ;
+ if (req->r_rchain.tqe_next != NFSREQNOLIST) {
+ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
+ /*
+ * Queue up the request so that we can unreference them
+ * with out holding nfs_request_mutex
+ */
+ TAILQ_INSERT_TAIL(&resendq, req, r_rchain);
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+ wakeup(req);
+ lck_mtx_unlock(&req->r_mtx);
+ }
+ }
+ lck_mtx_unlock(nfs_request_mutex);
+
+ /* Since we've drop the request mutex we can now safely unreference the request */
+ TAILQ_FOREACH_SAFE(req, &resendq, r_rchain, treq) {
+ TAILQ_REMOVE(&resendq, req, r_rchain);
+ nfs_request_rele(req);
+ }
+
+ /*
+ * Now handle and outstanding async requests. We need to walk the
+ * request queue again this time with the nfsiod_mutex held. No
+ * other iods can grab our requests until we've put them on our own
+ * local iod queue for processing.