+/*
+ * Check ordering of two cnodes. Return true if they are are in-order.
+ */
+static int
+hfs_isordered(struct cnode *cp1, struct cnode *cp2)
+{
+ if (cp1 == cp2)
+ return (0);
+ if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff)
+ return (1);
+ if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff)
+ return (0);
+ /*
+ * Locking order is cnode address order.
+ */
+ return (cp1 < cp2);
+}
+
+/*
+ * Acquire 4 cnode locks.
+ * - locked in cnode address order (lesser address first).
+ * - all or none of the locks are taken
+ * - only one lock taken per cnode (dup cnodes are skipped)
+ * - some of the cnode pointers may be null
+ */
+int
+hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3,
+ struct cnode *cp4, enum hfslocktype locktype, struct cnode **error_cnode)
+{
+ struct cnode * a[3];
+ struct cnode * b[3];
+ struct cnode * list[4];
+ struct cnode * tmp;
+ int i, j, k;
+ int error;
+ if (error_cnode) {
+ *error_cnode = NULL;
+ }
+
+ if (hfs_isordered(cp1, cp2)) {
+ a[0] = cp1; a[1] = cp2;
+ } else {
+ a[0] = cp2; a[1] = cp1;
+ }
+ if (hfs_isordered(cp3, cp4)) {
+ b[0] = cp3; b[1] = cp4;
+ } else {
+ b[0] = cp4; b[1] = cp3;
+ }
+ a[2] = (struct cnode *)0xffffffff; /* sentinel value */
+ b[2] = (struct cnode *)0xffffffff; /* sentinel value */
+
+ /*
+ * Build the lock list, skipping over duplicates
+ */
+ for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) {
+ tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++];
+ if (k == 0 || tmp != list[k-1])
+ list[k++] = tmp;
+ }
+
+ /*
+ * Now we can lock using list[0 - k].
+ * Skip over NULL entries.
+ */
+ for (i = 0; i < k; ++i) {
+ if (list[i])
+ if ((error = hfs_lock(list[i], locktype))) {
+ /* Only stuff error_cnode if requested */
+ if (error_cnode) {
+ *error_cnode = list[i];
+ }
+ /* Drop any locks we acquired. */
+ while (--i >= 0) {
+ if (list[i])
+ hfs_unlock(list[i]);
+ }
+ return (error);
+ }
+ }
+ return (0);
+}
+
+
+/*
+ * Unlock a cnode.
+ */
+void
+hfs_unlock(struct cnode *cp)
+{
+ vnode_t rvp = NULLVP;
+ vnode_t vp = NULLVP;
+ u_int32_t c_flag;
+ void *lockowner;
+
+ /*
+ * Only the extents and bitmap file's support lock recursion.
+ */
+ if ((cp->c_fileid == kHFSExtentsFileID) ||
+ (cp->c_fileid == kHFSAllocationFileID)) {
+ if (--cp->c_syslockcount > 0) {
+ return;
+ }
+ }
+ c_flag = cp->c_flag;
+ cp->c_flag &= ~(C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT | C_NEED_DATA_SETSIZE | C_NEED_RSRC_SETSIZE);
+
+ if (c_flag & (C_NEED_DVNODE_PUT | C_NEED_DATA_SETSIZE)) {
+ vp = cp->c_vp;
+ }
+ if (c_flag & (C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE)) {
+ rvp = cp->c_rsrc_vp;
+ }
+
+ lockowner = cp->c_lockowner;
+ if (lockowner == current_thread()) {
+ cp->c_lockowner = NULL;
+ lck_rw_unlock_exclusive(&cp->c_rwlock);
+ } else {
+ lck_rw_unlock_shared(&cp->c_rwlock);
+ }
+
+ /* Perform any vnode post processing after cnode lock is dropped. */
+ if (vp) {
+ if (c_flag & C_NEED_DATA_SETSIZE)
+ ubc_setsize(vp, 0);
+ if (c_flag & C_NEED_DVNODE_PUT)
+ vnode_put(vp);
+ }
+ if (rvp) {
+ if (c_flag & C_NEED_RSRC_SETSIZE)
+ ubc_setsize(rvp, 0);
+ if (c_flag & C_NEED_RVNODE_PUT)
+ vnode_put(rvp);
+ }
+}
+
+/*
+ * Unlock a pair of cnodes.
+ */
+void
+hfs_unlockpair(struct cnode *cp1, struct cnode *cp2)
+{
+ hfs_unlock(cp1);
+ if (cp2 != cp1)
+ hfs_unlock(cp2);
+}
+
+/*
+ * Unlock a group of cnodes.
+ */
+void
+hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4)
+{
+ struct cnode * list[4];
+ int i, k = 0;
+
+ if (cp1) {
+ hfs_unlock(cp1);
+ list[k++] = cp1;
+ }
+ if (cp2) {
+ for (i = 0; i < k; ++i) {
+ if (list[i] == cp2)
+ goto skip1;
+ }
+ hfs_unlock(cp2);
+ list[k++] = cp2;
+ }
+skip1:
+ if (cp3) {
+ for (i = 0; i < k; ++i) {
+ if (list[i] == cp3)
+ goto skip2;
+ }
+ hfs_unlock(cp3);
+ list[k++] = cp3;
+ }
+skip2:
+ if (cp4) {
+ for (i = 0; i < k; ++i) {
+ if (list[i] == cp4)
+ return;
+ }
+ hfs_unlock(cp4);
+ }
+}
+
+
+/*
+ * Protect a cnode against a truncation.
+ *
+ * Used mainly by read/write since they don't hold the
+ * cnode lock across calls to the cluster layer.
+ *
+ * The process doing a truncation must take the lock
+ * exclusive. The read/write processes can take it
+ * shared. The locktype argument is the same as supplied to
+ * hfs_lock.
+ */
+void
+hfs_lock_truncate(struct cnode *cp, enum hfslocktype locktype)
+{
+ void * thread = current_thread();
+
+ if (cp->c_truncatelockowner == thread) {
+ /*
+ * Only HFS_RECURSE_TRUNCLOCK is allowed to recurse.
+ *
+ * This is needed on the hfs_vnop_pagein path where we need to ensure
+ * the file does not change sizes while we are paging in. However,
+ * we may already hold the lock exclusive due to another
+ * VNOP from earlier in the call stack. So if we already hold
+ * the truncate lock exclusive, allow it to proceed, but ONLY if
+ * it's in the recursive case.
+ */
+ if (locktype != HFS_RECURSE_TRUNCLOCK) {
+ panic("hfs_lock_truncate: cnode %p locked!", cp);
+ }
+ }
+ /* HFS_RECURSE_TRUNCLOCK takes a shared lock if it is not already locked */
+ else if ((locktype == HFS_SHARED_LOCK) || (locktype == HFS_RECURSE_TRUNCLOCK)) {
+ lck_rw_lock_shared(&cp->c_truncatelock);
+ cp->c_truncatelockowner = HFS_SHARED_OWNER;
+ }
+ else { /* must be an HFS_EXCLUSIVE_LOCK */
+ lck_rw_lock_exclusive(&cp->c_truncatelock);
+ cp->c_truncatelockowner = thread;
+ }
+}
+
+
+/*
+ * Attempt to get the truncate lock. If it cannot be acquired, error out.
+ * This function is needed in the degenerate hfs_vnop_pagein during force unmount
+ * case. To prevent deadlocks while a VM copy object is moving pages, HFS vnop pagein will
+ * temporarily need to disable V2 semantics.
+ */
+int hfs_try_trunclock (struct cnode *cp, enum hfslocktype locktype) {
+ void * thread = current_thread();
+ boolean_t didlock = false;
+
+ if (cp->c_truncatelockowner == thread) {
+ /*
+ * Only HFS_RECURSE_TRUNCLOCK is allowed to recurse.
+ *
+ * This is needed on the hfs_vnop_pagein path where we need to ensure
+ * the file does not change sizes while we are paging in. However,
+ * we may already hold the lock exclusive due to another
+ * VNOP from earlier in the call stack. So if we already hold
+ * the truncate lock exclusive, allow it to proceed, but ONLY if
+ * it's in the recursive case.
+ */
+ if (locktype != HFS_RECURSE_TRUNCLOCK) {
+ panic("hfs_lock_truncate: cnode %p locked!", cp);
+ }
+ }
+ /* HFS_RECURSE_TRUNCLOCK takes a shared lock if it is not already locked */
+ else if ((locktype == HFS_SHARED_LOCK) || (locktype == HFS_RECURSE_TRUNCLOCK)) {
+ didlock = lck_rw_try_lock(&cp->c_truncatelock, LCK_RW_TYPE_SHARED);
+ if (didlock) {
+ cp->c_truncatelockowner = HFS_SHARED_OWNER;
+ }
+ }
+ else { /* must be an HFS_EXCLUSIVE_LOCK */
+ didlock = lck_rw_try_lock (&cp->c_truncatelock, LCK_RW_TYPE_EXCLUSIVE);
+ if (didlock) {
+ cp->c_truncatelockowner = thread;
+ }
+ }
+
+ return didlock;
+}
+
+
+/*
+ * Unlock the truncate lock, which protects against size changes.
+ *
+ * The been_recursed argument is used when we may need to return
+ * from this function without actually unlocking the truncate lock.
+ */
+void
+hfs_unlock_truncate(struct cnode *cp, int been_recursed)
+{
+ void *thread = current_thread();
+
+ /*
+ * If been_recursed is nonzero AND the current lock owner of the
+ * truncate lock is our current thread, then we must have recursively
+ * taken the lock earlier on. If the lock were unlocked,
+ * HFS_RECURSE_TRUNCLOCK took a shared lock and it would fall through
+ * to the SHARED case below.
+ *
+ * If been_recursed is zero (most of the time) then we check the
+ * lockowner field to infer whether the lock was taken exclusively or
+ * shared in order to know what underlying lock routine to call.
+ */
+ if (been_recursed) {
+ if (cp->c_truncatelockowner == thread) {
+ return;
+ }
+ }
+
+ /* HFS_LOCK_EXCLUSIVE */
+ if (thread == cp->c_truncatelockowner) {
+ cp->c_truncatelockowner = NULL;
+ lck_rw_unlock_exclusive(&cp->c_truncatelock);
+ }
+ /* HFS_LOCK_SHARED */
+ else {
+ lck_rw_unlock_shared(&cp->c_truncatelock);
+ }
+}