+/*
+ * Check ordering of two cnodes. Return true if they are are in-order.
+ */
+static int
+hfs_isordered(struct cnode *cp1, struct cnode *cp2)
+{
+ if (cp1 == cp2)
+ return (0);
+ if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff)
+ return (1);
+ if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff)
+ return (0);
+ if (cp1->c_fileid == cp2->c_parentcnid)
+ return (1); /* cp1 is the parent and should go first */
+ if (cp2->c_fileid == cp1->c_parentcnid)
+ return (0); /* cp1 is the child and should go last */
+
+ return (cp1 < cp2); /* fall-back is to use address order */
+}
+
+/*
+ * Acquire 4 cnode locks.
+ * - locked in cnode parent-child order (if there is a relationship)
+ * otherwise lock in cnode address order (lesser address first).
+ * - all or none of the locks are taken
+ * - only one lock taken per cnode (dup cnodes are skipped)
+ * - some of the cnode pointers may be null
+ */
+__private_extern__
+int
+hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3,
+ struct cnode *cp4, enum hfslocktype locktype)
+{
+ struct cnode * a[3];
+ struct cnode * b[3];
+ struct cnode * list[4];
+ struct cnode * tmp;
+ int i, j, k;
+ int error;
+
+ if (hfs_isordered(cp1, cp2)) {
+ a[0] = cp1; a[1] = cp2;
+ } else {
+ a[0] = cp2; a[1] = cp1;
+ }
+ if (hfs_isordered(cp3, cp4)) {
+ b[0] = cp3; b[1] = cp4;
+ } else {
+ b[0] = cp4; b[1] = cp3;
+ }
+ a[2] = (struct cnode *)0xffffffff; /* sentinel value */
+ b[2] = (struct cnode *)0xffffffff; /* sentinel value */
+
+ /*
+ * Build the lock list, skipping over duplicates
+ */
+ for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) {
+ tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++];
+ if (k == 0 || tmp != list[k-1])
+ list[k++] = tmp;
+ }
+
+ /*
+ * Now we can lock using list[0 - k].
+ * Skip over NULL entries.
+ */
+ for (i = 0; i < k; ++i) {
+ if (list[i])
+ if ((error = hfs_lock(list[i], locktype))) {
+ /* Drop any locks we acquired. */
+ while (--i >= 0) {
+ if (list[i])
+ hfs_unlock(list[i]);
+ }
+ return (error);
+ }
+ }
+ return (0);
+}
+
+
+/*
+ * Unlock a cnode.
+ */
+__private_extern__
+void
+hfs_unlock(struct cnode *cp)
+{
+ vnode_t rvp = NULLVP;
+ vnode_t vp = NULLVP;
+ u_int32_t c_flag;
+
+ /* System files need to keep track of owner */
+ if ((cp->c_fileid < kHFSFirstUserCatalogNodeID) &&
+ (cp->c_fileid > kHFSRootFolderID) &&
+ (cp->c_datafork != NULL)) {
+ /*
+ * The extents and bitmap file locks support
+ * recursion and are always taken exclusive.
+ */
+ if (cp->c_fileid == kHFSExtentsFileID ||
+ cp->c_fileid == kHFSAllocationFileID) {
+ if (--cp->c_syslockcount > 0) {
+ return;
+ }
+ }
+ }
+ c_flag = cp->c_flag;
+ cp->c_flag &= ~(C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT | C_NEED_DATA_SETSIZE | C_NEED_RSRC_SETSIZE);
+ if (c_flag & (C_NEED_DVNODE_PUT | C_NEED_DATA_SETSIZE)) {
+ vp = cp->c_vp;
+ }
+ if (c_flag & (C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE)) {
+ rvp = cp->c_rsrc_vp;
+ }
+
+ cp->c_lockowner = NULL;
+ lck_rw_done(&cp->c_rwlock);
+
+ /* Perform any vnode post processing after cnode lock is dropped. */
+ if (vp) {
+ if (c_flag & C_NEED_DATA_SETSIZE)
+ ubc_setsize(vp, 0);
+ if (c_flag & C_NEED_DVNODE_PUT)
+ vnode_put(vp);
+ }
+ if (rvp) {
+ if (c_flag & C_NEED_RSRC_SETSIZE)
+ ubc_setsize(rvp, 0);
+ if (c_flag & C_NEED_RVNODE_PUT)
+ vnode_put(rvp);
+ }
+}
+
+/*
+ * Unlock a pair of cnodes.
+ */
+__private_extern__
+void
+hfs_unlockpair(struct cnode *cp1, struct cnode *cp2)
+{
+ hfs_unlock(cp1);
+ if (cp2 != cp1)
+ hfs_unlock(cp2);
+}
+
+/*
+ * Unlock a group of cnodes.
+ */
+__private_extern__
+void
+hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4)
+{
+ struct cnode * list[4];
+ int i, k = 0;
+
+ if (cp1) {
+ hfs_unlock(cp1);
+ list[k++] = cp1;
+ }
+ if (cp2) {
+ for (i = 0; i < k; ++i) {
+ if (list[i] == cp2)
+ goto skip1;
+ }
+ hfs_unlock(cp2);
+ list[k++] = cp2;
+ }
+skip1:
+ if (cp3) {
+ for (i = 0; i < k; ++i) {
+ if (list[i] == cp3)
+ goto skip2;
+ }
+ hfs_unlock(cp3);
+ list[k++] = cp3;
+ }
+skip2:
+ if (cp4) {
+ for (i = 0; i < k; ++i) {
+ if (list[i] == cp4)
+ return;
+ }
+ hfs_unlock(cp4);
+ }
+}
+
+
+/*
+ * Protect a cnode against a truncation.
+ *
+ * Used mainly by read/write since they don't hold the
+ * cnode lock across calls to the cluster layer.
+ *
+ * The process doing a truncation must take the lock
+ * exclusive. The read/write processes can take it
+ * non-exclusive.
+ */
+__private_extern__
+void
+hfs_lock_truncate(struct cnode *cp, int exclusive)
+{
+ if (cp->c_lockowner == current_thread())
+ panic("hfs_lock_truncate: cnode 0x%08x locked!", cp);
+
+ if (exclusive)
+ lck_rw_lock_exclusive(&cp->c_truncatelock);
+ else
+ lck_rw_lock_shared(&cp->c_truncatelock);
+}
+
+__private_extern__
+void
+hfs_unlock_truncate(struct cnode *cp)
+{
+ lck_rw_done(&cp->c_truncatelock);
+}
+
+
+
+