+ struct timespec ts = { 2, 0 };
+ int error;
+
+ if ((error = nfs_node_lock(np)))
+ return (error);
+ while (ISSET(np->n_flag, NBUSY)) {
+ SET(np->n_flag, NBUSYWANT);
+ msleep(np, &np->n_lock, PZERO-1, "nfsbusywant", &ts);
+ if ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0)))
+ break;
+ }
+ if (!error)
+ SET(np->n_flag, NBUSY);
+ nfs_node_unlock(np);
+ return (error);
+}
+
+void
+nfs_node_clear_busy(nfsnode_t np)
+{
+ int wanted;
+
+ nfs_node_lock_force(np);
+ wanted = ISSET(np->n_flag, NBUSYWANT);
+ CLR(np->n_flag, NBUSY|NBUSYWANT);
+ nfs_node_unlock(np);
+ if (wanted)
+ wakeup(np);
+}
+
+int
+nfs_node_set_busy2(nfsnode_t np1, nfsnode_t np2, thread_t thd)
+{
+ nfsnode_t first, second;
+ int error;
+
+ first = (np1 > np2) ? np1 : np2;
+ second = (np1 > np2) ? np2 : np1;
+ if ((error = nfs_node_set_busy(first, thd)))
+ return (error);
+ if (np1 == np2)
+ return (error);
+ if ((error = nfs_node_set_busy(second, thd)))
+ nfs_node_clear_busy(first);
+ return (error);
+}
+
+void
+nfs_node_clear_busy2(nfsnode_t np1, nfsnode_t np2)
+{
+ nfs_node_clear_busy(np1);
+ if (np1 != np2)
+ nfs_node_clear_busy(np2);
+}
+
+/* helper function to sort four nodes in reverse address order (no dupes) */
+static void
+nfs_node_sort4(nfsnode_t np1, nfsnode_t np2, nfsnode_t np3, nfsnode_t np4, nfsnode_t *list, int *lcntp)
+{
+ nfsnode_t na[2], nb[2];
+ int a, b, i, lcnt;
+
+ /* sort pairs then merge */
+ na[0] = (np1 > np2) ? np1 : np2;
+ na[1] = (np1 > np2) ? np2 : np1;
+ nb[0] = (np3 > np4) ? np3 : np4;
+ nb[1] = (np3 > np4) ? np4 : np3;
+ for (a = b = i = lcnt = 0; i < 4; i++) {
+ if (a >= 2)
+ list[lcnt] = nb[b++];
+ else if ((b >= 2) || (na[a] >= nb[b]))
+ list[lcnt] = na[a++];
+ else
+ list[lcnt] = nb[b++];
+ if ((lcnt <= 0) || (list[lcnt] != list[lcnt-1]))
+ lcnt++; /* omit dups */
+ }
+ if (list[lcnt-1] == NULL)
+ lcnt--;
+ *lcntp = lcnt;
+}
+
+int
+nfs_node_set_busy4(nfsnode_t np1, nfsnode_t np2, nfsnode_t np3, nfsnode_t np4, thread_t thd)
+{
+ nfsnode_t list[4];
+ int i, lcnt, error;
+
+ nfs_node_sort4(np1, np2, np3, np4, list, &lcnt);
+
+ /* Now we can lock using list[0 - lcnt-1] */
+ for (i = 0; i < lcnt; ++i)
+ if ((error = nfs_node_set_busy(list[i], thd))) {
+ /* Drop any locks we acquired. */
+ while (--i >= 0)
+ nfs_node_clear_busy(list[i]);
+ return (error);
+ }
+ return (0);
+}
+
+void
+nfs_node_clear_busy4(nfsnode_t np1, nfsnode_t np2, nfsnode_t np3, nfsnode_t np4)
+{
+ nfsnode_t list[4];
+ int lcnt;
+
+ nfs_node_sort4(np1, np2, np3, np4, list, &lcnt);
+ while (--lcnt >= 0)
+ nfs_node_clear_busy(list[lcnt]);
+}
+
+/*
+ * Acquire an NFS node data lock
+ */
+void
+nfs_data_lock(nfsnode_t np, int locktype)
+{
+ nfs_data_lock_internal(np, locktype, 1);
+}
+void
+nfs_data_lock_noupdate(nfsnode_t np, int locktype)
+{
+ nfs_data_lock_internal(np, locktype, 0);
+}
+void
+nfs_data_lock_internal(nfsnode_t np, int locktype, int updatesize)
+{
+ FSDBG_TOP(270, np, locktype, np->n_datalockowner, 0);
+ if (locktype == NFS_DATA_LOCK_SHARED) {
+ if (updatesize && ISSET(np->n_flag, NUPDATESIZE))
+ nfs_data_update_size(np, 0);
+ lck_rw_lock_shared(&np->n_datalock);
+ } else {
+ lck_rw_lock_exclusive(&np->n_datalock);
+ np->n_datalockowner = current_thread();
+ if (updatesize && ISSET(np->n_flag, NUPDATESIZE))
+ nfs_data_update_size(np, 1);
+ }
+ FSDBG_BOT(270, np, locktype, np->n_datalockowner, 0);
+}
+
+/*
+ * Release an NFS node data lock
+ */
+void
+nfs_data_unlock(nfsnode_t np)
+{
+ nfs_data_unlock_internal(np, 1);
+}
+void
+nfs_data_unlock_noupdate(nfsnode_t np)
+{
+ nfs_data_unlock_internal(np, 0);
+}
+void
+nfs_data_unlock_internal(nfsnode_t np, int updatesize)
+{
+ int mine = (np->n_datalockowner == current_thread());
+ FSDBG_TOP(271, np, np->n_datalockowner, current_thread(), 0);
+ if (updatesize && mine && ISSET(np->n_flag, NUPDATESIZE))
+ nfs_data_update_size(np, 1);
+ np->n_datalockowner = NULL;
+ lck_rw_done(&np->n_datalock);
+ if (updatesize && !mine && ISSET(np->n_flag, NUPDATESIZE))
+ nfs_data_update_size(np, 0);
+ FSDBG_BOT(271, np, np->n_datalockowner, current_thread(), 0);
+}
+
+
+/*
+ * update an NFS node's size
+ */
+void
+nfs_data_update_size(nfsnode_t np, int datalocked)
+{
+ int error;
+
+ FSDBG_TOP(272, np, np->n_flag, np->n_size, np->n_newsize);
+ if (!datalocked) {
+ nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
+ /* grabbing data lock will automatically update size */
+ nfs_data_unlock(np);
+ FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
+ return;
+ }
+ error = nfs_node_lock(np);
+ if (error || !ISSET(np->n_flag, NUPDATESIZE)) {
+ if (!error)
+ nfs_node_unlock(np);
+ FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
+ return;
+ }
+ CLR(np->n_flag, NUPDATESIZE);
+ np->n_size = np->n_newsize;
+ /* make sure we invalidate buffers the next chance we get */
+ SET(np->n_flag, NNEEDINVALIDATE);
+ nfs_node_unlock(np);
+ ubc_setsize(NFSTOV(np), (off_t)np->n_size); /* XXX error? */
+ FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
+}
+
+#define DODEBUG 1
+
+int
+nfs_mount_is_dirty(mount_t mp)
+{
+ u_long i;
+ nfsnode_t np;
+#ifdef DODEBUG
+ struct timeval now, then, diff;
+ u_long ncnt = 0;
+ microuptime(&now);
+#endif
+ lck_mtx_lock(nfs_node_hash_mutex);
+ for (i = 0; i <= nfsnodehash; i++) {
+ LIST_FOREACH(np, &nfsnodehashtbl[i], n_hash) {
+#ifdef DODEBUG
+ ncnt++;
+#endif
+ if (np->n_mount == mp && !LIST_EMPTY(&np->n_dirtyblkhd))
+ goto out;
+ }
+ }
+out:
+ lck_mtx_unlock(nfs_node_hash_mutex);
+#ifdef DODEBUG
+ microuptime(&then);
+ timersub(&then, &now, &diff);
+
+ NFS_DBG(NFS_FAC_SOCK, 7, "mount_is_dirty for %s took %lld mics for %ld slots and %ld nodes return %d\n",
+ vfs_statfs(mp)->f_mntfromname, (uint64_t)diff.tv_sec * 1000000LL + diff.tv_usec, i, ncnt, (i <= nfsnodehash));
+#endif