+ /*
+ * Verify the source file is not in use by anyone besides us.
+ *
+ * This function is typically invoked by a namespace handler
+ * process responding to a temporarily stalled system call.
+ * The FD that it is working off of is opened O_EVTONLY, so
+ * it really has no active usecounts (the kusecount from O_EVTONLY
+ * is subtracted from the total usecounts).
+ *
+ * As a result, we shouldn't have any active usecounts against
+ * this vnode when we go to check it below.
+ */
+ if (vnode_isinuse(from_cp->c_vp, 0))
+ return EBUSY;
+
+ if (include_rsrc && from_cp->c_rsrc_vp) {
+ if (vnode_isinuse(from_cp->c_rsrc_vp, 0))
+ return EBUSY;
+
+ /*
+ * In the code below, if the destination file doesn't have a
+ * c_rsrcfork then we don't create it which means we we cannot
+ * transfer the ff_invalidranges and cf_vblocks fields. These
+ * shouldn't be set because we flush the resource fork before
+ * calling this function but there is a tiny window when we
+ * did not have any locks...
+ */
+ if (!to_cp->c_rsrcfork
+ && (!TAILQ_EMPTY(&from_cp->c_rsrcfork->ff_invalidranges)
+ || from_cp->c_rsrcfork->ff_unallocblocks)) {
+ /*
+ * The file isn't really busy now but something did slip
+ * in and tinker with the file while we didn't have any
+ * locks, so this is the most meaningful return code for
+ * the caller.
+ */
+ return EBUSY;
+ }
+ }
+
+ // Check the destination file is empty
+ if (to_cp->c_datafork->ff_blocks
+ || to_cp->c_datafork->ff_size
+ || (include_rsrc
+ && (to_cp->c_blocks
+ || (to_cp->c_rsrcfork && to_cp->c_rsrcfork->ff_size)))) {
+ return EFBIG;
+ }
+
+ if ((error = hfs_start_transaction (hfsmp)))
+ return error;
+
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE,
+ HFS_EXCLUSIVE_LOCK);
+
+ // filefork_t is 128 bytes which should be OK
+ filefork_t rfork_buf, *from_rfork = NULL;
+
+ if (include_rsrc) {
+ from_rfork = from_cp->c_rsrcfork;
+
+ /*
+ * Creating resource fork vnodes is expensive, so just get get
+ * the fork data if we need it.
+ */
+ if (!from_rfork && hfs_has_rsrc(from_cp)) {
+ from_rfork = &rfork_buf;
+
+ from_rfork->ff_cp = from_cp;
+ TAILQ_INIT(&from_rfork->ff_invalidranges);
+
+ error = cat_idlookup(hfsmp, from_cp->c_fileid, 0, 1, NULL, NULL,
+ &from_rfork->ff_data);
+
+ if (error)
+ goto exit;
+ }
+ }
+
+ /*
+ * From here on, any failures mean that we might be leaving things
+ * in a weird or inconsistent state. Ideally, we should back out
+ * all the changes, but to do that properly we need to fix
+ * MoveData. We'll save fixing that for another time. For now,
+ * just return EIO in all cases to the caller so that they know.
+ */
+ return_EIO_on_error = true;
+
+ bool data_overflow_extents = overflow_extents(from_cp->c_datafork);
+
+ // Move the data fork
+ if ((error = hfs_move_fork (from_cp->c_datafork, from_cp,
+ to_cp->c_datafork, to_cp))) {
+ goto exit;
+ }
+
+ SET(from_cp->c_flag, C_NEED_DATA_SETSIZE);
+ SET(to_cp->c_flag, C_NEED_DATA_SETSIZE);
+
+ // We move the resource fork later
+
+ /*
+ * Note that because all we're doing is moving the extents around,
+ * we can probably do this in a single transaction: Each extent
+ * record (group of 8) is 64 bytes. A extent overflow B-Tree node
+ * is typically 4k. This means each node can hold roughly ~60
+ * extent records == (480 extents).
+ *
+ * If a file was massively fragmented and had 20k extents, this
+ * means we'd roughly touch 20k/480 == 41 to 42 nodes, plus the
+ * index nodes, for half of the operation. (inserting or
+ * deleting). So if we're manipulating 80-100 nodes, this is
+ * basically 320k of data to write to the journal in a bad case.
+ */
+ if (data_overflow_extents) {
+ if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0)))
+ goto exit;
+ }
+
+ if (from_rfork && overflow_extents(from_rfork)) {
+ if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 1)))
+ goto exit;
+ }
+
+ // Touch times
+ from_cp->c_touch_acctime = TRUE;
+ from_cp->c_touch_chgtime = TRUE;
+ from_cp->c_touch_modtime = TRUE;
+ hfs_touchtimes(hfsmp, from_cp);
+
+ to_cp->c_touch_acctime = TRUE;
+ to_cp->c_touch_chgtime = TRUE;
+ to_cp->c_touch_modtime = TRUE;
+ hfs_touchtimes(hfsmp, to_cp);
+
+ struct cat_fork dfork_buf;
+ const struct cat_fork *dfork, *rfork;
+
+ dfork = hfs_prepare_fork_for_update(to_cp->c_datafork, NULL,
+ &dfork_buf, hfsmp->blockSize);
+ rfork = hfs_prepare_fork_for_update(from_rfork, NULL,
+ &rfork_buf.ff_data, hfsmp->blockSize);
+
+ // Update the catalog nodes, to_cp first
+ if ((error = cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr,
+ dfork, rfork))) {
+ goto exit;
+ }
+
+ CLR(to_cp->c_flag, C_MODIFIED | C_MINOR_MOD);
+
+ // Update in-memory resource fork data here
+ if (from_rfork) {
+ // Update c_blocks
+ uint32_t moving = from_rfork->ff_blocks + from_rfork->ff_unallocblocks;
+
+ from_cp->c_blocks -= moving;
+ to_cp->c_blocks += moving;
+
+ // Update to_cp's resource data if it has it
+ filefork_t *to_rfork = to_cp->c_rsrcfork;
+ if (to_rfork) {
+ to_rfork->ff_invalidranges = from_rfork->ff_invalidranges;
+ to_rfork->ff_data = from_rfork->ff_data;
+
+ // Deal with ubc_setsize
+ hfs_rsrc_setsize(to_cp);
+ }
+
+ // Wipe out the resource fork in from_cp
+ rl_init(&from_rfork->ff_invalidranges);
+ bzero(&from_rfork->ff_data, sizeof(from_rfork->ff_data));
+
+ // Deal with ubc_setsize
+ hfs_rsrc_setsize(from_cp);
+ }
+
+ // Currently unnecessary, but might be useful in future...
+ dfork = hfs_prepare_fork_for_update(from_cp->c_datafork, NULL, &dfork_buf,
+ hfsmp->blockSize);
+ rfork = hfs_prepare_fork_for_update(from_rfork, NULL, &rfork_buf.ff_data,
+ hfsmp->blockSize);
+
+ // Update from_cp
+ if ((error = cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr,
+ dfork, rfork))) {
+ goto exit;
+ }
+
+ CLR(from_cp->c_flag, C_MODIFIED | C_MINOR_MOD);
+
+exit:
+ if (lockflags) {
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ hfs_end_transaction(hfsmp);
+ }
+
+ if (error && error != EIO && return_EIO_on_error) {
+ printf("hfs_move_data: encountered error %d\n", error);
+ error = EIO;
+ }
+
+ return error;
+}
+
+/*
+ * Move all of the catalog and runtime data in srcfork to dstfork.
+ *
+ * This allows us to maintain the invalid ranges across the move data
+ * operation so we don't need to force all of the pending IO right
+ * now. In addition, we move all non overflow-extent extents into the
+ * destination here.
+ *
+ * The destination fork must be empty and should have been checked
+ * prior to calling this.
+ */
+static int hfs_move_fork(filefork_t *srcfork, cnode_t *src_cp,
+ filefork_t *dstfork, cnode_t *dst_cp)
+{
+ // Move the invalid ranges
+ TAILQ_SWAP(&dstfork->ff_invalidranges, &srcfork->ff_invalidranges,
+ rl_entry, rl_link);
+ rl_remove_all(&srcfork->ff_invalidranges);
+
+ // Move the fork data (copy whole structure)
+ dstfork->ff_data = srcfork->ff_data;
+ bzero(&srcfork->ff_data, sizeof(srcfork->ff_data));
+
+ // Update c_blocks
+ src_cp->c_blocks -= dstfork->ff_blocks + dstfork->ff_unallocblocks;
+ dst_cp->c_blocks += dstfork->ff_blocks + dstfork->ff_unallocblocks;
+
+ return 0;
+}
+
+
+#include <i386/panic_hooks.h>
+
+struct hfs_fsync_panic_hook {
+ panic_hook_t hook;
+ struct cnode *cp;
+};
+
+static void hfs_fsync_panic_hook(panic_hook_t *hook_)
+{
+ struct hfs_fsync_panic_hook *hook = (struct hfs_fsync_panic_hook *)hook_;
+ extern int kdb_log(const char *fmt, ...);
+
+ // Get the physical region just before cp
+ panic_phys_range_t range;
+ uint64_t phys;
+
+ if (panic_phys_range_before(hook->cp, &phys, &range)) {
+ kdb_log("cp = %p, phys = %p, prev (%p: %p-%p)\n",
+ hook->cp, phys, range.type, range.phys_start,
+ range.phys_start + range.len);
+ } else
+ kdb_log("cp = %p, phys = %p, prev (!)\n", hook->cp, phys);
+
+ panic_dump_mem((void *)(((vm_offset_t)hook->cp - 4096) & ~4095), 12288);
+
+ kdb_log("\n");
+}
+
+
+/*
+ * cnode must be locked
+ */
+int
+hfs_fsync(struct vnode *vp, int waitfor, hfs_fsync_mode_t fsyncmode, struct proc *p)