+
+/*
+ * Invalidate the physical block numbers associated with buffer cache blocks
+ * in the given extent of the given vnode.
+ */
+struct hfs_inval_blk_no {
+ daddr64_t sectorStart;
+ daddr64_t sectorCount;
+};
+static int
+hfs_invalidate_block_numbers_callback(buf_t bp, void *args_in)
+{
+ daddr64_t blkno;
+ struct hfs_inval_blk_no *args;
+
+ blkno = buf_blkno(bp);
+ args = args_in;
+
+ if (blkno >= args->sectorStart && blkno < args->sectorStart+args->sectorCount)
+ buf_setblkno(bp, buf_lblkno(bp));
+
+ return BUF_RETURNED;
+}
+static void
+hfs_invalidate_sectors(struct vnode *vp, daddr64_t sectorStart, daddr64_t sectorCount)
+{
+ struct hfs_inval_blk_no args;
+ args.sectorStart = sectorStart;
+ args.sectorCount = sectorCount;
+
+ buf_iterate(vp, hfs_invalidate_block_numbers_callback, BUF_SCAN_DIRTY|BUF_SCAN_CLEAN, &args);
+}
+
+
+/*
+ * Copy the contents of an extent to a new location. Also invalidates the
+ * physical block number of any buffer cache block in the copied extent
+ * (so that if the block is written, it will go through VNOP_BLOCKMAP to
+ * determine the new physical block number).
+ */
+static int
+hfs_copy_extent(
+ struct hfsmount *hfsmp,
+ struct vnode *vp, /* The file whose extent is being copied. */
+ u_int32_t oldStart, /* The start of the source extent. */
+ u_int32_t newStart, /* The start of the destination extent. */
+ u_int32_t blockCount, /* The number of allocation blocks to copy. */
+ vfs_context_t context)
+{
+ int err = 0;
+ size_t bufferSize;
+ void *buffer = NULL;
+ struct vfsioattr ioattr;
+ buf_t bp = NULL;
+ off_t resid;
+ size_t ioSize;
+ u_int32_t ioSizeSectors; /* Device sectors in this I/O */
+ daddr64_t srcSector, destSector;
+ u_int32_t sectorsPerBlock = hfsmp->blockSize / hfsmp->hfs_logical_block_size;
+
+ /*
+ * Sanity check that we have locked the vnode of the file we're copying.
+ *
+ * But since hfs_systemfile_lock() doesn't actually take the lock on
+ * the allocation file if a journal is active, ignore the check if the
+ * file being copied is the allocation file.
+ */
+ struct cnode *cp = VTOC(vp);
+ if (cp != hfsmp->hfs_allocation_cp && cp->c_lockowner != current_thread())
+ panic("hfs_copy_extent: vp=%p (cp=%p) not owned?\n", vp, cp);
+
+ /*
+ * Determine the I/O size to use
+ *
+ * NOTE: Many external drives will result in an ioSize of 128KB.
+ * TODO: Should we use a larger buffer, doing several consecutive
+ * reads, then several consecutive writes?
+ */
+ vfs_ioattr(hfsmp->hfs_mp, &ioattr);
+ bufferSize = MIN(ioattr.io_maxreadcnt, ioattr.io_maxwritecnt);
+ if (kmem_alloc(kernel_map, (vm_offset_t*) &buffer, bufferSize))
+ return ENOMEM;
+
+ /* Get a buffer for doing the I/O */
+ bp = buf_alloc(hfsmp->hfs_devvp);
+ buf_setdataptr(bp, (uintptr_t)buffer);
+
+ resid = (off_t) blockCount * (off_t) hfsmp->blockSize;
+ srcSector = (daddr64_t) oldStart * hfsmp->blockSize / hfsmp->hfs_logical_block_size;
+ destSector = (daddr64_t) newStart * hfsmp->blockSize / hfsmp->hfs_logical_block_size;
+ while (resid > 0) {
+ ioSize = MIN(bufferSize, (size_t) resid);
+ ioSizeSectors = ioSize / hfsmp->hfs_logical_block_size;
+
+ /* Prepare the buffer for reading */
+ buf_reset(bp, B_READ);
+ buf_setsize(bp, ioSize);
+ buf_setcount(bp, ioSize);
+ buf_setblkno(bp, srcSector);
+ buf_setlblkno(bp, srcSector);
+
+ /* Do the read */
+ err = VNOP_STRATEGY(bp);
+ if (!err)
+ err = buf_biowait(bp);
+ if (err) {
+ printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (read)\n", err);
+ break;
+ }
+
+ /* Prepare the buffer for writing */
+ buf_reset(bp, B_WRITE);
+ buf_setsize(bp, ioSize);
+ buf_setcount(bp, ioSize);
+ buf_setblkno(bp, destSector);
+ buf_setlblkno(bp, destSector);
+ if (vnode_issystem(vp) && journal_uses_fua(hfsmp->jnl))
+ buf_markfua(bp);
+
+ /* Do the write */
+ vnode_startwrite(hfsmp->hfs_devvp);
+ err = VNOP_STRATEGY(bp);
+ if (!err)
+ err = buf_biowait(bp);
+ if (err) {
+ printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (write)\n", err);
+ break;
+ }
+
+ resid -= ioSize;
+ srcSector += ioSizeSectors;
+ destSector += ioSizeSectors;
+ }
+ if (bp)
+ buf_free(bp);
+ if (buffer)
+ kmem_free(kernel_map, (vm_offset_t)buffer, bufferSize);
+
+ /* Make sure all writes have been flushed to disk. */
+ if (vnode_issystem(vp) && !journal_uses_fua(hfsmp->jnl)) {
+ err = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
+ if (err) {
+ printf("hfs_copy_extent: DKIOCSYNCHRONIZECACHE failed (%d)\n", err);
+ err = 0; /* Don't fail the copy. */
+ }
+ }
+
+ if (!err)
+ hfs_invalidate_sectors(vp, (daddr64_t)oldStart*sectorsPerBlock, (daddr64_t)blockCount*sectorsPerBlock);
+
+ return err;
+}
+
+
+static int
+hfs_relocate_callback(__unused HFSPlusExtentKey *key, HFSPlusExtentRecord *record, HFSPlusExtentRecord *state)
+{
+ bcopy(state, record, sizeof(HFSPlusExtentRecord));
+ return 0;
+}
+
+/*
+ * Reclaim space at the end of a volume, used by a given file.
+ *
+ * This routine attempts to move any extent which contains allocation blocks
+ * at or after "startblk." A separate transaction is used to do the move.
+ * The contents of any moved extents are read and written via the volume's
+ * device vnode -- NOT via "vp." During the move, moved blocks which are part
+ * of a transaction have their physical block numbers invalidated so they will
+ * eventually be written to their new locations.
+ *
+ * Inputs:
+ * hfsmp The volume being resized.
+ * startblk Blocks >= this allocation block need to be moved.
+ * locks Which locks need to be taken for the given system file.
+ * vp The vnode for the system file.
+ *
+ * The caller of this function, hfs_reclaimspace(), grabs cnode lock
+ * for non-system files before calling this function.
+ *
+ * Outputs:
+ * blks_moved Total number of allocation blocks moved by this routine.
+ */
+static int
+hfs_reclaim_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int locks, u_int32_t *blks_moved, vfs_context_t context)
+{
+ int error;
+ int lockflags;
+ int i;
+ u_long datablks;
+ u_long end_block;
+ u_int32_t oldStartBlock;
+ u_int32_t newStartBlock;
+ u_int32_t oldBlockCount;
+ u_int32_t newBlockCount;
+ struct filefork *fp;
+ struct cnode *cp;
+ int is_sysfile;
+ int took_truncate_lock = 0;
+ struct BTreeIterator *iterator = NULL;
+ u_int8_t forktype;
+ u_int32_t fileID;
+ u_int32_t alloc_flags;
+
+ /* If there is no vnode for this file, then there's nothing to do. */
+ if (vp == NULL)
+ return 0;
+
+ cp = VTOC(vp);
+ fileID = cp->c_cnid;
+ is_sysfile = vnode_issystem(vp);
+ forktype = VNODE_IS_RSRC(vp) ? 0xFF : 0;
+
+ /* Flush all the buffer cache blocks and cluster pages associated with
+ * this vnode.
+ *
+ * If the current vnode is a system vnode, all the buffer cache blocks
+ * associated with it should already be sync'ed to the disk as part of
+ * journal flush in hfs_truncatefs(). Normally there should not be
+ * buffer cache blocks for regular files, but for objects like symlinks,
+ * we can have buffer cache blocks associated with the vnode. Therefore
+ * we call buf_flushdirtyblks() always. Resource fork data for directory
+ * hard links are directly written using buffer cache for device vnode,
+ * which should also be sync'ed as part of journal flush in hfs_truncatefs().
+ *
+ * Flushing cluster pages should be the normal case for regular files,
+ * and really should not do anything for system files. But just to be
+ * sure that all blocks associated with this vnode is sync'ed to the
+ * disk, we call both buffer cache and cluster layer functions.
+ */
+ buf_flushdirtyblks(vp, MNT_NOWAIT, 0, "hfs_reclaim_file");
+
+ if (!is_sysfile) {
+ /* The caller grabs cnode lock for non-system files only, therefore
+ * we unlock only non-system files before calling cluster layer.
+ */
+ hfs_unlock(cp);
+ hfs_lock_truncate(cp, TRUE);
+ took_truncate_lock = 1;
+ }
+ (void) cluster_push(vp, 0);
+ if (!is_sysfile) {
+ error = hfs_lock(cp, HFS_FORCE_LOCK);
+ if (error) {
+ hfs_unlock_truncate(cp, TRUE);
+ return error;
+ }
+
+ /* If the file no longer exists, nothing left to do */
+ if (cp->c_flag & C_NOEXISTS) {
+ hfs_unlock_truncate(cp, TRUE);
+ return 0;
+ }
+ }
+
+ /* Wait for any in-progress writes to this vnode to complete, so that we'll
+ * be copying consistent bits. (Otherwise, it's possible that an async
+ * write will complete to the old extent after we read from it. That
+ * could lead to corruption.)
+ */
+ error = vnode_waitforwrites(vp, 0, 0, 0, "hfs_reclaim_file");
+ if (error) {
+ printf("hfs_reclaim_file: Error %d from vnode_waitforwrites\n", error);
+ return error;
+ }
+
+ if (hfs_resize_debug) {
+ printf("hfs_reclaim_file: Start relocating %sfork for fileid=%u name=%.*s\n", (forktype ? "rsrc" : "data"), fileID, cp->c_desc.cd_namelen, cp->c_desc.cd_nameptr);
+ }
+
+ /* We always need the allocation bitmap and extents B-tree */
+ locks |= SFL_BITMAP | SFL_EXTENTS;
+
+ error = hfs_start_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_file: hfs_start_transaction returned %d\n", error);
+ if (took_truncate_lock) {
+ hfs_unlock_truncate(cp, TRUE);
+ }
+ return error;
+ }
+ lockflags = hfs_systemfile_lock(hfsmp, locks, HFS_EXCLUSIVE_LOCK);
+ fp = VTOF(vp);
+ datablks = 0;
+ *blks_moved = 0;
+
+ /* Relocate non-overflow extents */
+ for (i = 0; i < kHFSPlusExtentDensity; ++i) {
+ if (fp->ff_extents[i].blockCount == 0)
+ break;
+ oldStartBlock = fp->ff_extents[i].startBlock;
+ oldBlockCount = fp->ff_extents[i].blockCount;
+ datablks += oldBlockCount;
+ end_block = oldStartBlock + oldBlockCount;
+ /* Check if the file overlaps the target space */
+ if (end_block > startblk) {
+ alloc_flags = HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS;
+ if (is_sysfile) {
+ alloc_flags |= HFS_ALLOC_METAZONE;
+ }
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, alloc_flags, &newStartBlock, &newBlockCount);
+ if (error) {
+ if (!is_sysfile && ((error == dskFulErr) || (error == ENOSPC))) {
+ /* Try allocating again using the metadata zone */
+ alloc_flags |= HFS_ALLOC_METAZONE;
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, alloc_flags, &newStartBlock, &newBlockCount);
+ }
+ if (error) {
+ printf("hfs_reclaim_file: BlockAllocate(metazone) (error=%d) for fileID=%u %u:(%u,%u)\n", error, fileID, i, oldStartBlock, oldBlockCount);
+ goto fail;
+ } else {
+ if (hfs_resize_debug) {
+ printf("hfs_reclaim_file: BlockAllocate(metazone) success for fileID=%u %u:(%u,%u)\n", fileID, i, newStartBlock, newBlockCount);
+ }
+ }
+ }
+
+ /* Copy data from old location to new location */
+ error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, newBlockCount, context);
+ if (error) {
+ printf("hfs_reclaim_file: hfs_copy_extent error=%d for fileID=%u %u:(%u,%u) to %u:(%u,%u)\n", error, fileID, i, oldStartBlock, oldBlockCount, i, newStartBlock, newBlockCount);
+ if (BlockDeallocate(hfsmp, newStartBlock, newBlockCount, HFS_ALLOC_SKIPFREEBLKS)) {
+ hfs_mark_volume_inconsistent(hfsmp);
+ }
+ goto fail;
+ }
+ fp->ff_extents[i].startBlock = newStartBlock;
+ cp->c_flag |= C_MODIFIED;
+ *blks_moved += newBlockCount;
+
+ /* Deallocate the old extent */
+ error = BlockDeallocate(hfsmp, oldStartBlock, oldBlockCount, HFS_ALLOC_SKIPFREEBLKS);
+ if (error) {
+ printf("hfs_reclaim_file: BlockDeallocate returned %d\n", error);
+ hfs_mark_volume_inconsistent(hfsmp);
+ goto fail;
+ }
+
+ /* If this is a system file, sync the volume header on disk */
+ if (is_sysfile) {
+ error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
+ if (error) {
+ printf("hfs_reclaim_file: hfs_flushvolumeheader returned %d\n", error);
+ hfs_mark_volume_inconsistent(hfsmp);
+ goto fail;
+ }
+ }
+
+ if (hfs_resize_debug) {
+ printf ("hfs_reclaim_file: Relocated %u:(%u,%u) to %u:(%u,%u)\n", i, oldStartBlock, oldBlockCount, i, newStartBlock, newBlockCount);
+ }
+ }
+ }
+
+ /* Relocate overflow extents (if any) */
+ if (i == kHFSPlusExtentDensity && fp->ff_blocks > datablks) {
+ struct FSBufferDescriptor btdata;
+ HFSPlusExtentRecord record;
+ HFSPlusExtentKey *key;
+ FCB *fcb;
+ int overflow_count = 0;
+
+ if (kmem_alloc(kernel_map, (vm_offset_t*) &iterator, sizeof(*iterator))) {
+ printf("hfs_reclaim_file: kmem_alloc failed!\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ bzero(iterator, sizeof(*iterator));
+ key = (HFSPlusExtentKey *) &iterator->key;
+ key->keyLength = kHFSPlusExtentKeyMaximumLength;
+ key->forkType = forktype;
+ key->fileID = fileID;
+ key->startBlock = datablks;
+
+ btdata.bufferAddress = &record;
+ btdata.itemSize = sizeof(record);
+ btdata.itemCount = 1;
+
+ fcb = VTOF(hfsmp->hfs_extents_vp);
+
+ error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator);
+ while (error == 0) {
+ /* Stop when we encounter a different file or fork. */
+ if ((key->fileID != fileID) ||
+ (key->forkType != forktype)) {
+ break;
+ }
+
+ /* Just track the overflow extent record number for debugging... */
+ if (hfs_resize_debug) {
+ overflow_count++;
+ }
+
+ /*
+ * Check if the file overlaps target space.
+ */
+ for (i = 0; i < kHFSPlusExtentDensity; ++i) {
+ if (record[i].blockCount == 0) {
+ goto fail;
+ }
+ oldStartBlock = record[i].startBlock;
+ oldBlockCount = record[i].blockCount;
+ end_block = oldStartBlock + oldBlockCount;
+ if (end_block > startblk) {
+ alloc_flags = HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS;
+ if (is_sysfile) {
+ alloc_flags |= HFS_ALLOC_METAZONE;
+ }
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, alloc_flags, &newStartBlock, &newBlockCount);
+ if (error) {
+ if (!is_sysfile && ((error == dskFulErr) || (error == ENOSPC))) {
+ /* Try allocating again using the metadata zone */
+ alloc_flags |= HFS_ALLOC_METAZONE;
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, alloc_flags, &newStartBlock, &newBlockCount);
+ }
+ if (error) {
+ printf("hfs_reclaim_file: BlockAllocate(metazone) (error=%d) for fileID=%u %u:(%u,%u)\n", error, fileID, i, oldStartBlock, oldBlockCount);
+ goto fail;
+ } else {
+ if (hfs_resize_debug) {
+ printf("hfs_reclaim_file: BlockAllocate(metazone) success for fileID=%u %u:(%u,%u)\n", fileID, i, newStartBlock, newBlockCount);
+ }
+ }
+ }
+ error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, newBlockCount, context);
+ if (error) {
+ printf("hfs_reclaim_file: hfs_copy_extent error=%d for fileID=%u (%u,%u) to (%u,%u)\n", error, fileID, oldStartBlock, oldBlockCount, newStartBlock, newBlockCount);
+ if (BlockDeallocate(hfsmp, newStartBlock, newBlockCount, HFS_ALLOC_SKIPFREEBLKS)) {
+ hfs_mark_volume_inconsistent(hfsmp);
+ }
+ goto fail;
+ }
+ record[i].startBlock = newStartBlock;
+ cp->c_flag |= C_MODIFIED;
+ *blks_moved += newBlockCount;
+
+ /*
+ * NOTE: To support relocating overflow extents of the
+ * allocation file, we must update the BTree record BEFORE
+ * deallocating the old extent so that BlockDeallocate will
+ * use the extent's new location to calculate physical block
+ * numbers. (This is for the case where the old extent's
+ * bitmap bits actually reside in the extent being moved.)
+ */
+ error = BTUpdateRecord(fcb, iterator, (IterateCallBackProcPtr) hfs_relocate_callback, &record);
+ if (error) {
+ printf("hfs_reclaim_file: BTUpdateRecord returned %d\n", error);
+ hfs_mark_volume_inconsistent(hfsmp);
+ goto fail;
+ }
+ error = BlockDeallocate(hfsmp, oldStartBlock, oldBlockCount, HFS_ALLOC_SKIPFREEBLKS);
+ if (error) {
+ printf("hfs_reclaim_file: BlockDeallocate returned %d\n", error);
+ hfs_mark_volume_inconsistent(hfsmp);
+ goto fail;
+ }
+ if (hfs_resize_debug) {
+ printf ("hfs_reclaim_file: Relocated overflow#%d %u:(%u,%u) to %u:(%u,%u)\n", overflow_count, i, oldStartBlock, oldBlockCount, i, newStartBlock, newBlockCount);
+ }
+ }
+ }
+ /* Look for more records. */
+ error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
+ if (error == btNotFound) {
+ error = 0;
+ break;
+ }
+ }
+ }
+
+fail:
+ if (iterator) {
+ kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
+ }
+
+ (void) hfs_systemfile_unlock(hfsmp, lockflags);
+
+ if ((*blks_moved != 0) && (is_sysfile == false)) {
+ (void) hfs_update(vp, MNT_WAIT);
+ }
+
+ (void) hfs_end_transaction(hfsmp);
+
+ if (took_truncate_lock) {
+ hfs_unlock_truncate(cp, TRUE);
+ }
+
+ if (hfs_resize_debug) {
+ printf("hfs_reclaim_file: Finished relocating %sfork for fileid=%u (error=%d)\n", (forktype ? "rsrc" : "data"), fileID, error);
+ }
+
+ return error;
+}
+
+
+/*
+ * This journal_relocate callback updates the journal info block to point
+ * at the new journal location. This write must NOT be done using the
+ * transaction. We must write the block immediately. We must also force
+ * it to get to the media so that the new journal location will be seen by
+ * the replay code before we can safely let journaled blocks be written
+ * to their normal locations.
+ *
+ * The tests for journal_uses_fua below are mildly hacky. Since the journal
+ * and the file system are both on the same device, I'm leveraging what
+ * the journal has decided about FUA.
+ */
+struct hfs_journal_relocate_args {
+ struct hfsmount *hfsmp;
+ vfs_context_t context;
+ u_int32_t newStartBlock;
+};
+
+static errno_t
+hfs_journal_relocate_callback(void *_args)
+{
+ int error;
+ struct hfs_journal_relocate_args *args = _args;
+ struct hfsmount *hfsmp = args->hfsmp;
+ buf_t bp;
+ JournalInfoBlock *jibp;
+
+ error = buf_meta_bread(hfsmp->hfs_devvp,
+ hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size),
+ hfsmp->blockSize, vfs_context_ucred(args->context), &bp);
+ if (error) {
+ printf("hfs_reclaim_journal_file: failed to read JIB (%d)\n", error);
+ return error;
+ }
+ jibp = (JournalInfoBlock*) buf_dataptr(bp);
+ jibp->offset = SWAP_BE64((u_int64_t)args->newStartBlock * hfsmp->blockSize);
+ jibp->size = SWAP_BE64(hfsmp->jnl_size);
+ if (journal_uses_fua(hfsmp->jnl))
+ buf_markfua(bp);
+ error = buf_bwrite(bp);
+ if (error) {
+ printf("hfs_reclaim_journal_file: failed to write JIB (%d)\n", error);
+ return error;
+ }
+ if (!journal_uses_fua(hfsmp->jnl)) {
+ error = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, args->context);
+ if (error) {
+ printf("hfs_reclaim_journal_file: DKIOCSYNCHRONIZECACHE failed (%d)\n", error);
+ error = 0; /* Don't fail the operation. */
+ }
+ }
+
+ return error;
+}
+
+
+static int
+hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context)
+{
+ int error;
+ int lockflags;
+ u_int32_t oldStartBlock;
+ u_int32_t newStartBlock;
+ u_int32_t oldBlockCount;
+ u_int32_t newBlockCount;
+ struct cat_desc journal_desc;
+ struct cat_attr journal_attr;
+ struct cat_fork journal_fork;
+ struct hfs_journal_relocate_args callback_args;
+
+ error = hfs_start_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_journal_file: hfs_start_transaction returned %d\n", error);
+ return error;
+ }
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
+
+ oldBlockCount = hfsmp->jnl_size / hfsmp->blockSize;
+
+ /* TODO: Allow the journal to change size based on the new volume size. */
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount,
+ HFS_ALLOC_METAZONE | HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS,
+ &newStartBlock, &newBlockCount);
+ if (error) {
+ printf("hfs_reclaim_journal_file: BlockAllocate returned %d\n", error);
+ goto fail;
+ }
+ if (newBlockCount != oldBlockCount) {
+ printf("hfs_reclaim_journal_file: newBlockCount != oldBlockCount (%u, %u)\n", newBlockCount, oldBlockCount);
+ goto free_fail;
+ }
+
+ error = BlockDeallocate(hfsmp, hfsmp->jnl_start, oldBlockCount, HFS_ALLOC_SKIPFREEBLKS);
+ if (error) {
+ printf("hfs_reclaim_journal_file: BlockDeallocate returned %d\n", error);
+ goto free_fail;
+ }
+
+ /* Update the catalog record for .journal */
+ error = cat_idlookup(hfsmp, hfsmp->hfs_jnlfileid, 1, &journal_desc, &journal_attr, &journal_fork);
+ if (error) {
+ printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error);
+ goto free_fail;
+ }
+ oldStartBlock = journal_fork.cf_extents[0].startBlock;
+ journal_fork.cf_size = newBlockCount * hfsmp->blockSize;
+ journal_fork.cf_extents[0].startBlock = newStartBlock;
+ journal_fork.cf_extents[0].blockCount = newBlockCount;
+ journal_fork.cf_blocks = newBlockCount;
+ error = cat_update(hfsmp, &journal_desc, &journal_attr, &journal_fork, NULL);
+ cat_releasedesc(&journal_desc); /* all done with cat descriptor */
+ if (error) {
+ printf("hfs_reclaim_journal_file: cat_update returned %d\n", error);
+ goto free_fail;
+ }
+ callback_args.hfsmp = hfsmp;
+ callback_args.context = context;
+ callback_args.newStartBlock = newStartBlock;
+
+ error = journal_relocate(hfsmp->jnl, (off_t)newStartBlock*hfsmp->blockSize,
+ (off_t)newBlockCount*hfsmp->blockSize, 0,
+ hfs_journal_relocate_callback, &callback_args);
+ if (error) {
+ /* NOTE: journal_relocate will mark the journal invalid. */
+ printf("hfs_reclaim_journal_file: journal_relocate returned %d\n", error);
+ goto fail;
+ }
+ hfsmp->jnl_start = newStartBlock;
+ hfsmp->jnl_size = (off_t)newBlockCount * hfsmp->blockSize;
+
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ error = hfs_end_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_journal_file: hfs_end_transaction returned %d\n", error);
+ }
+
+ if (!error && hfs_resize_debug) {
+ printf ("hfs_reclaim_journal_file: Successfully relocated journal from (%u,%u) to (%u,%u)\n", oldStartBlock, oldBlockCount, newStartBlock, newBlockCount);
+ }
+ return error;
+
+free_fail:
+ (void) BlockDeallocate(hfsmp, newStartBlock, newBlockCount, HFS_ALLOC_SKIPFREEBLKS);
+fail:
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+ if (hfs_resize_debug) {
+ printf ("hfs_reclaim_journal_file: Error relocating journal file (error=%d)\n", error);
+ }
+ return error;
+}
+
+
+/*
+ * Move the journal info block to a new location. We have to make sure the
+ * new copy of the journal info block gets to the media first, then change
+ * the field in the volume header and the catalog record.
+ */
+static int
+hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context)
+{
+ int error;
+ int lockflags;
+ u_int32_t oldBlock;
+ u_int32_t newBlock;
+ u_int32_t blockCount;
+ struct cat_desc jib_desc;
+ struct cat_attr jib_attr;
+ struct cat_fork jib_fork;
+ buf_t old_bp, new_bp;
+
+ error = hfs_start_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: hfs_start_transaction returned %d\n", error);
+ return error;
+ }
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
+
+ error = BlockAllocate(hfsmp, 1, 1, 1,
+ HFS_ALLOC_METAZONE | HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS,
+ &newBlock, &blockCount);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: BlockAllocate returned %d\n", error);
+ goto fail;
+ }
+ if (blockCount != 1) {
+ printf("hfs_reclaim_journal_info_block: blockCount != 1 (%u)\n", blockCount);
+ goto free_fail;
+ }
+ error = BlockDeallocate(hfsmp, hfsmp->vcbJinfoBlock, 1, HFS_ALLOC_SKIPFREEBLKS);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: BlockDeallocate returned %d\n", error);
+ goto free_fail;
+ }
+
+ /* Copy the old journal info block content to the new location */
+ error = buf_meta_bread(hfsmp->hfs_devvp,
+ hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size),
+ hfsmp->blockSize, vfs_context_ucred(context), &old_bp);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: failed to read JIB (%d)\n", error);
+ goto free_fail;
+ }
+ new_bp = buf_getblk(hfsmp->hfs_devvp,
+ newBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size),
+ hfsmp->blockSize, 0, 0, BLK_META);
+ bcopy((char*)buf_dataptr(old_bp), (char*)buf_dataptr(new_bp), hfsmp->blockSize);
+ buf_brelse(old_bp);
+ if (journal_uses_fua(hfsmp->jnl))
+ buf_markfua(new_bp);
+ error = buf_bwrite(new_bp);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: failed to write new JIB (%d)\n", error);
+ goto free_fail;
+ }
+ if (!journal_uses_fua(hfsmp->jnl)) {
+ error = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: DKIOCSYNCHRONIZECACHE failed (%d)\n", error);
+ /* Don't fail the operation. */
+ }
+ }
+
+ /* Update the catalog record for .journal_info_block */
+ error = cat_idlookup(hfsmp, hfsmp->hfs_jnlinfoblkid, 1, &jib_desc, &jib_attr, &jib_fork);
+ if (error) {
+ printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error);
+ goto fail;
+ }
+ oldBlock = jib_fork.cf_extents[0].startBlock;
+ jib_fork.cf_size = hfsmp->blockSize;
+ jib_fork.cf_extents[0].startBlock = newBlock;
+ jib_fork.cf_extents[0].blockCount = 1;
+ jib_fork.cf_blocks = 1;
+ error = cat_update(hfsmp, &jib_desc, &jib_attr, &jib_fork, NULL);
+ cat_releasedesc(&jib_desc); /* all done with cat descriptor */
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: cat_update returned %d\n", error);
+ goto fail;
+ }
+
+ /* Update the pointer to the journal info block in the volume header. */
+ hfsmp->vcbJinfoBlock = newBlock;
+ error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: hfs_flushvolumeheader returned %d\n", error);
+ goto fail;
+ }
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ error = hfs_end_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: hfs_end_transaction returned %d\n", error);
+ }
+ error = hfs_journal_flush(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: journal_flush returned %d\n", error);
+ }
+
+ if (!error && hfs_resize_debug) {
+ printf ("hfs_reclaim_journal_info_block: Successfully relocated journal info block from (%u,%u) to (%u,%u)\n", oldBlock, blockCount, newBlock, blockCount);
+ }
+ return error;
+
+free_fail:
+ (void) BlockDeallocate(hfsmp, newBlock, blockCount, HFS_ALLOC_SKIPFREEBLKS);
+fail:
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+ if (hfs_resize_debug) {
+ printf ("hfs_reclaim_journal_info_block: Error relocating journal info block (error=%d)\n", error);
+ }
+ return error;
+}
+
+