+extern void hfs_chashinit_finish(struct hfsmount *hfsmp);
+extern void hfs_delete_chash(struct hfsmount *hfsmp);
+extern int hfs_chashremove(struct hfsmount *hfsmp, struct cnode *cp);
+extern void hfs_chash_abort(struct hfsmount *hfsmp, struct cnode *cp);
+extern void hfs_chash_rehash(struct hfsmount *hfsmp, struct cnode *cp1, struct cnode *cp2);
+extern void hfs_chashwakeup(struct hfsmount *hfsmp, struct cnode *cp, int flags);
+extern void hfs_chash_mark_in_transit(struct hfsmount *hfsmp, struct cnode *cp);
+
+extern struct vnode * hfs_chash_getvnode(struct hfsmount *hfsmp, ino_t inum, int wantrsrc,
+ int skiplock, int allow_deleted);
+extern struct cnode * hfs_chash_getcnode(struct hfsmount *hfsmp, ino_t inum, struct vnode **vpp,
+ int wantrsrc, int skiplock, int *out_flags, int *hflags);
+extern int hfs_chash_snoop(struct hfsmount *, ino_t, int, int (*)(const cnode_t *, void *), void *);
+extern int hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp,
+ cnid_t cnid, struct cat_attr *cattr, int *error);
+
+extern int hfs_chash_set_childlinkbit(struct hfsmount *hfsmp, cnid_t cnid);
+
+/*
+ * HFS cnode lock functions.
+ *
+ * HFS Locking Order:
+ *
+ * 1. cnode truncate lock (if needed) -- see below for more on this
+ *
+ * + hfs_vnop_pagein/out handles recursive use of this lock (by
+ * using flag option HFS_LOCK_SKIP_IF_EXCLUSIVE) although there
+ * are issues with this (see #16620278).
+ *
+ * + If locking multiple cnodes then the truncate lock must be taken on
+ * all (in address order), before taking the cnode locks.
+ *
+ * 2. Hot Files stage mutex (grabbed before manipulating individual vnodes/cnodes)
+ *
+ * 3. cnode locks in address order (if needed)
+ *
+ * 4. journal (if needed)
+ *
+ * 5. Hot Files B-Tree lock (not treated as a system file)
+ *
+ * 6. system files (as needed)
+ *
+ * A. Catalog B-tree file
+ * B. Attributes B-tree file
+ * C. Startup file (if there is one)
+ * D. Allocation Bitmap file (always exclusive, supports recursion)
+ * E. Overflow Extents B-tree file (always exclusive, supports recursion)
+ *
+ * 7. hfs mount point (always last)
+ *
+ *
+ * I. HFS cnode hash lock (must not acquire any new locks while holding this lock, always taken last)
+ */
+
+/*
+ * -- The Truncate Lock --
+ *
+ * The truncate lock is used for a few purposes (more than its name
+ * might suggest). The first thing to note is that the cnode lock
+ * cannot be held whilst issuing any I/O other than metadata changes,
+ * so the truncate lock, in either shared or exclusive form, must
+ * usually be held in these cases. This includes calls to ubc_setsize
+ * where the new size is less than the current size known to the VM
+ * subsystem (for two reasons: a) because reaping pages can block
+ * (e.g. on pages that are busy or being cleaned); b) reaping pages
+ * might require page-in for tasks that have that region mapped
+ * privately). The same applies to other calls into the VM subsystem.
+ *
+ * Here are some (but not necessarily all) cases that the truncate
+ * lock protects for:
+ *
+ * + When reading and writing a file, we hold the truncate lock
+ * shared to ensure that the underlying blocks cannot be deleted
+ * and on systems that use content protection, this also ensures
+ * the keys remain valid (which might be being used by the
+ * underlying layers).
+ *
+ * + We need to protect against the following sequence of events:
+ *
+ * A file is initially size X. A thread issues an append to that
+ * file. Another thread truncates the file and then extends it
+ * to a a new size Y. Now the append can be applied at offset X
+ * and then the data is lost when the file is truncated; or it
+ * could be applied after the truncate, i.e. at offset 0; or it
+ * can be applied at offset Y. What we *cannot* do is apply the
+ * append at offset X and for the data to be visible at the end.
+ * (Note that we are free to choose when we apply the append
+ * operation.)
+ *
+ * To solve this, we keep things simple and take the truncate lock
+ * exclusively in order to sequence the append with other size
+ * changes. Therefore any size change must take the truncate lock
+ * exclusively.
+ *
+ * (N.B. we could do better and allow readers to run concurrently
+ * during the append and other size changes.)
+ *
+ * So here are the rules:
+ *
+ * + If you plan to change ff_size, you must take the truncate lock
+ * exclusively, *but* be careful what I/O you do whilst you have
+ * the truncate lock exclusively and try and avoid it if you can:
+ * if the VM subsystem tries to do something with some pages on a
+ * different thread and you try and do some I/O with those same
+ * pages, we will deadlock. (See #16620278.)
+ *
+ * + If you do anything that requires blocks to not be deleted or
+ * encryption keys to remain valid, you must take the truncate lock
+ * shared.
+ *
+ * + And it follows therefore, that if you want to delete blocks or
+ * delete keys, you must take the truncate lock exclusively. Note
+ * that for asynchronous writes, the truncate lock will be dropped
+ * after issuing I/O but before the I/O has completed which means
+ * that before manipulating keys, you *must* issue
+ * vnode_wait_for_writes in addition to holding the truncate lock.
+ *
+ * N.B. ff_size is actually protected by the cnode lock and so you
+ * must hold the cnode lock exclusively to change it and shared to
+ * read it.
+ *
+ */
+
+enum hfs_locktype {
+ HFS_SHARED_LOCK = 1,
+ HFS_EXCLUSIVE_LOCK = 2
+};
+
+/* Option flags for cnode and truncate lock functions */
+enum hfs_lockflags {
+ HFS_LOCK_DEFAULT = 0x0, /* Default flag, no options provided */
+ HFS_LOCK_ALLOW_NOEXISTS = 0x1, /* Allow locking of all cnodes, including cnode marked deleted with no catalog entry */
+ HFS_LOCK_SKIP_IF_EXCLUSIVE = 0x2, /* Skip locking if the current thread already holds the lock exclusive */
+
+ // Used when you do not want to check return from hfs_lock
+ HFS_LOCK_ALWAYS = HFS_LOCK_ALLOW_NOEXISTS,
+};
+#define HFS_SHARED_OWNER (void *)0xffffffff
+
+void hfs_lock_always(cnode_t *cnode, enum hfs_locktype);
+int hfs_lock(struct cnode *, enum hfs_locktype, enum hfs_lockflags);
+bool hfs_lock_upgrade(cnode_t *cp);
+int hfs_lockpair(struct cnode *, struct cnode *, enum hfs_locktype);
+int hfs_lockfour(struct cnode *, struct cnode *, struct cnode *, struct cnode *,
+ enum hfs_locktype, struct cnode **);
+void hfs_unlock(struct cnode *);
+void hfs_unlockpair(struct cnode *, struct cnode *);
+void hfs_unlockfour(struct cnode *, struct cnode *, struct cnode *, struct cnode *);
+
+void hfs_lock_truncate(struct cnode *, enum hfs_locktype, enum hfs_lockflags);
+bool hfs_truncate_lock_upgrade(struct cnode *cp);
+void hfs_truncate_lock_downgrade(struct cnode *cp);
+void hfs_unlock_truncate(struct cnode *, enum hfs_lockflags);
+int hfs_try_trunclock(struct cnode *, enum hfs_locktype, enum hfs_lockflags);
+
+extern int hfs_systemfile_lock(struct hfsmount *, int, enum hfs_locktype);
+extern void hfs_systemfile_unlock(struct hfsmount *, int);
+
+void hfs_clear_might_be_dirty_flag(cnode_t *cp);
+
+// cnode must be locked
+static inline __attribute__((pure))
+bool hfs_has_rsrc(const cnode_t *cp)
+{
+ if (cp->c_rsrcfork)
+ return cp->c_rsrcfork->ff_blocks > 0;
+ else
+ return cp->c_datafork && cp->c_blocks > cp->c_datafork->ff_blocks;
+}