+/*
+ * Acquire an NFS node lock
+ */
+int
+nfs_lock(nfsnode_t np, int locktype)
+{
+ FSDBG_TOP(268, np, locktype, np->n_lockowner, 0);
+ if (locktype == NFS_NODE_LOCK_SHARED) {
+ lck_rw_lock_shared(&np->n_lock);
+ } else {
+ lck_rw_lock_exclusive(&np->n_lock);
+ np->n_lockowner = current_thread();
+ }
+ if ((locktype != NFS_NODE_LOCK_FORCE) && !(np->n_hflag && NHHASHED)) {
+ FSDBG_BOT(268, np, 0xdead, np->n_lockowner, 0);
+ nfs_unlock(np);
+ return (ENOENT);
+ }
+ FSDBG_BOT(268, np, locktype, np->n_lockowner, 0);
+ return (0);
+}
+
+/*
+ * Release an NFS node lock
+ */
+void
+nfs_unlock(nfsnode_t np)
+{
+ FSDBG(269, np, np->n_lockowner, current_thread(), 0);
+ np->n_lockowner = NULL;
+ lck_rw_done(&np->n_lock);
+}
+
+/*
+ * Acquire 2 NFS node locks
+ * - locks taken in order given (assumed to be parent-child order)
+ * - both or neither of the locks are taken
+ * - only one lock taken per node (dup nodes are skipped)
+ */
+int
+nfs_lock2(nfsnode_t np1, nfsnode_t np2, int locktype)
+{
+ int error;
+
+ if ((error = nfs_lock(np1, locktype)))
+ return (error);
+ if (np1 == np2)
+ return (error);
+ if ((error = nfs_lock(np2, locktype)))
+ nfs_unlock(np1);
+ return (error);
+}
+
+/*
+ * Unlock a couple of NFS nodes
+ */
+void
+nfs_unlock2(nfsnode_t np1, nfsnode_t np2)
+{
+ nfs_unlock(np1);
+ if (np1 != np2)
+ nfs_unlock(np2);
+}
+
+/*
+ * Acquire 4 NFS node locks
+ * - fdnp/fnp and tdnp/tnp locks taken in order given
+ * - otherwise locks taken in node address order.
+ * - all or none of the locks are taken
+ * - only one lock taken per node (dup nodes are skipped)
+ * - some of the node pointers may be null
+ */
+int
+nfs_lock4(nfsnode_t fdnp, nfsnode_t fnp, nfsnode_t tdnp, nfsnode_t tnp, int locktype)
+{
+ nfsnode_t list[4];
+ int i, lcnt = 0, error;
+
+ if (fdnp == tdnp) {
+ list[lcnt++] = fdnp;
+ } else if (fdnp->n_parent && (tdnp == VTONFS(fdnp->n_parent))) {
+ list[lcnt++] = tdnp;
+ list[lcnt++] = fdnp;
+ } else if (tdnp->n_parent && (fdnp == VTONFS(tdnp->n_parent))) {
+ list[lcnt++] = fdnp;
+ list[lcnt++] = tdnp;
+ } else if (fdnp < tdnp) {
+ list[lcnt++] = fdnp;
+ list[lcnt++] = tdnp;
+ } else {
+ list[lcnt++] = tdnp;
+ list[lcnt++] = fdnp;
+ }
+
+ if (!tnp || (fnp == tnp) || (tnp == fdnp)) {
+ list[lcnt++] = fnp;
+ } else if (fnp < tnp) {
+ list[lcnt++] = fnp;
+ list[lcnt++] = tnp;
+ } else {
+ list[lcnt++] = tnp;
+ list[lcnt++] = fnp;
+ }
+
+ /* Now we can lock using list[0 - lcnt-1] */
+ for (i = 0; i < lcnt; ++i) {
+ if (list[i])
+ if ((error = nfs_lock(list[i], locktype))) {
+ /* Drop any locks we acquired. */
+ while (--i >= 0) {
+ if (list[i])
+ nfs_unlock(list[i]);
+ }
+ return (error);
+ }
+ }
+ return (0);
+}
+
+/*
+ * Unlock a group of NFS nodes
+ */
+void
+nfs_unlock4(nfsnode_t np1, nfsnode_t np2, nfsnode_t np3, nfsnode_t np4)
+{
+ nfsnode_t list[4];
+ int i, k = 0;
+
+ if (np1) {
+ nfs_unlock(np1);
+ list[k++] = np1;
+ }
+ if (np2) {
+ for (i = 0; i < k; ++i)
+ if (list[i] == np2)
+ goto skip2;
+ nfs_unlock(np2);
+ list[k++] = np2;
+ }
+skip2:
+ if (np3) {
+ for (i = 0; i < k; ++i)
+ if (list[i] == np3)
+ goto skip3;
+ nfs_unlock(np3);
+ list[k++] = np3;
+ }
+skip3:
+ if (np4) {
+ for (i = 0; i < k; ++i)
+ if (list[i] == np4)
+ return;
+ nfs_unlock(np4);
+ }
+}
+
+/*
+ * Acquire an NFS node data lock
+ */
+void
+nfs_data_lock(nfsnode_t np, int locktype)
+{
+ nfs_data_lock2(np, locktype, 1);
+}
+void
+nfs_data_lock2(nfsnode_t np, int locktype, int updatesize)
+{
+ FSDBG_TOP(270, np, locktype, np->n_datalockowner, 0);
+ if (locktype == NFS_NODE_LOCK_SHARED) {
+ if (updatesize && ISSET(np->n_flag, NUPDATESIZE))
+ nfs_data_update_size(np, 0);
+ lck_rw_lock_shared(&np->n_datalock);
+ } else {
+ lck_rw_lock_exclusive(&np->n_datalock);
+ np->n_datalockowner = current_thread();
+ if (updatesize && ISSET(np->n_flag, NUPDATESIZE))
+ nfs_data_update_size(np, 1);
+ }
+ FSDBG_BOT(270, np, locktype, np->n_datalockowner, 0);
+}
+
+/*
+ * Release an NFS node data lock
+ */
+void
+nfs_data_unlock(nfsnode_t np)
+{
+ nfs_data_unlock2(np, 1);
+}
+void
+nfs_data_unlock2(nfsnode_t np, int updatesize)
+{
+ int mine = (np->n_datalockowner == current_thread());
+ FSDBG_TOP(271, np, np->n_datalockowner, current_thread(), 0);
+ if (updatesize && mine && ISSET(np->n_flag, NUPDATESIZE))
+ nfs_data_update_size(np, 1);
+ np->n_datalockowner = NULL;
+ lck_rw_done(&np->n_datalock);
+ if (updatesize && !mine && ISSET(np->n_flag, NUPDATESIZE))
+ nfs_data_update_size(np, 0);
+ FSDBG_BOT(271, np, np->n_datalockowner, current_thread(), 0);
+}
+
+
+/*
+ * update an NFS node's size
+ */
+void
+nfs_data_update_size(nfsnode_t np, int datalocked)
+{
+ int error;
+
+ FSDBG_TOP(272, np, np->n_flag, np->n_size, np->n_newsize);
+ if (!datalocked) {
+ nfs_data_lock(np, NFS_NODE_LOCK_EXCLUSIVE);
+ /* grabbing data lock will automatically update size */
+ nfs_data_unlock(np);
+ FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
+ return;
+ }
+ error = nfs_lock(np, NFS_NODE_LOCK_EXCLUSIVE);
+ if (error || !ISSET(np->n_flag, NUPDATESIZE)) {
+ if (!error)
+ nfs_unlock(np);
+ FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
+ return;
+ }
+ CLR(np->n_flag, NUPDATESIZE);
+ np->n_size = np->n_newsize;
+ /* make sure we invalidate buffers the next chance we get */
+ SET(np->n_flag, NNEEDINVALIDATE);
+ nfs_unlock(np);
+ ubc_setsize(NFSTOV(np), (off_t)np->n_size); /* XXX error? */
+ FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
+}
+