+
+ /*
+ * TODO: Adjust the size of the metadata zone based on new volume size?
+ */
+
+ /*
+ * Adjust the size of hfsmp->hfs_attrdata_vp
+ */
+ if (hfsmp->hfs_attrdata_vp) {
+ struct cnode *cp;
+ struct filefork *fp;
+
+ if (vnode_get(hfsmp->hfs_attrdata_vp) == 0) {
+ cp = VTOC(hfsmp->hfs_attrdata_vp);
+ fp = VTOF(hfsmp->hfs_attrdata_vp);
+
+ cp->c_blocks = newblkcnt;
+ fp->ff_blocks = newblkcnt;
+ fp->ff_extents[0].blockCount = newblkcnt;
+ fp->ff_size = (off_t) newblkcnt * hfsmp->blockSize;
+ ubc_setsize(hfsmp->hfs_attrdata_vp, fp->ff_size);
+ vnode_put(hfsmp->hfs_attrdata_vp);
+ }
+ }
+
+out:
+ if (error)
+ hfsmp->freeBlocks += reclaimblks;
+
+ lck_mtx_lock(&hfsmp->hfs_mutex);
+ hfsmp->allocLimit = hfsmp->totalBlocks;
+ if (hfsmp->nextAllocation >= hfsmp->allocLimit)
+ hfsmp->nextAllocation = hfsmp->hfs_metazone_end + 1;
+ hfsmp->hfs_flags &= ~HFS_RESIZE_IN_PROGRESS;
+ lck_mtx_unlock(&hfsmp->hfs_mutex);
+
+ if (lockflags) {
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ }
+ if (transaction_begun) {
+ hfs_end_transaction(hfsmp);
+ journal_flush(hfsmp->jnl);
+ }
+
+ return (error);
+}
+
+
+/*
+ * Invalidate the physical block numbers associated with buffer cache blocks
+ * in the given extent of the given vnode.
+ */
+struct hfs_inval_blk_no {
+ daddr64_t sectorStart;
+ daddr64_t sectorCount;
+};
+static int
+hfs_invalidate_block_numbers_callback(buf_t bp, void *args_in)
+{
+ daddr64_t blkno;
+ struct hfs_inval_blk_no *args;
+
+ blkno = buf_blkno(bp);
+ args = args_in;
+
+ if (blkno >= args->sectorStart && blkno < args->sectorStart+args->sectorCount)
+ buf_setblkno(bp, buf_lblkno(bp));
+
+ return BUF_RETURNED;
+}
+static void
+hfs_invalidate_sectors(struct vnode *vp, daddr64_t sectorStart, daddr64_t sectorCount)
+{
+ struct hfs_inval_blk_no args;
+ args.sectorStart = sectorStart;
+ args.sectorCount = sectorCount;
+
+ buf_iterate(vp, hfs_invalidate_block_numbers_callback, BUF_SCAN_DIRTY|BUF_SCAN_CLEAN, &args);
+}
+
+
+/*
+ * Copy the contents of an extent to a new location. Also invalidates the
+ * physical block number of any buffer cache block in the copied extent
+ * (so that if the block is written, it will go through VNOP_BLOCKMAP to
+ * determine the new physical block number).
+ */
+static int
+hfs_copy_extent(
+ struct hfsmount *hfsmp,
+ struct vnode *vp, /* The file whose extent is being copied. */
+ u_int32_t oldStart, /* The start of the source extent. */
+ u_int32_t newStart, /* The start of the destination extent. */
+ u_int32_t blockCount, /* The number of allocation blocks to copy. */
+ vfs_context_t context)
+{
+ int err = 0;
+ size_t bufferSize;
+ void *buffer = NULL;
+ struct vfsioattr ioattr;
+ buf_t bp = NULL;
+ off_t resid;
+ size_t ioSize;
+ u_int32_t ioSizeSectors; /* Device sectors in this I/O */
+ daddr64_t srcSector, destSector;
+ u_int32_t sectorsPerBlock = hfsmp->blockSize / hfsmp->hfs_phys_block_size;
+
+ /*
+ * Sanity check that we have locked the vnode of the file we're copying.
+ *
+ * But since hfs_systemfile_lock() doesn't actually take the lock on
+ * the allocation file if a journal is active, ignore the check if the
+ * file being copied is the allocation file.
+ */
+ struct cnode *cp = VTOC(vp);
+ if (cp != hfsmp->hfs_allocation_cp && cp->c_lockowner != current_thread())
+ panic("hfs_copy_extent: vp=%p (cp=%p) not owned?\n", vp, cp);
+
+ /*
+ * Wait for any in-progress writes to this vnode to complete, so that we'll
+ * be copying consistent bits. (Otherwise, it's possible that an async
+ * write will complete to the old extent after we read from it. That
+ * could lead to corruption.)
+ */
+ err = vnode_waitforwrites(vp, 0, 0, 0, "hfs_copy_extent");
+ if (err) {
+ printf("hfs_copy_extent: Error %d from vnode_waitforwrites\n", err);
+ return err;
+ }
+
+ /*
+ * Determine the I/O size to use
+ *
+ * NOTE: Many external drives will result in an ioSize of 128KB.
+ * TODO: Should we use a larger buffer, doing several consecutive
+ * reads, then several consecutive writes?
+ */
+ vfs_ioattr(hfsmp->hfs_mp, &ioattr);
+ bufferSize = MIN(ioattr.io_maxreadcnt, ioattr.io_maxwritecnt);
+ if (kmem_alloc(kernel_map, (vm_offset_t*) &buffer, bufferSize))
+ return ENOMEM;
+
+ /* Get a buffer for doing the I/O */
+ bp = buf_alloc(hfsmp->hfs_devvp);
+ buf_setdataptr(bp, (uintptr_t)buffer);
+
+ resid = (off_t) blockCount * (off_t) hfsmp->blockSize;
+ srcSector = (daddr64_t) oldStart * hfsmp->blockSize / hfsmp->hfs_phys_block_size;
+ destSector = (daddr64_t) newStart * hfsmp->blockSize / hfsmp->hfs_phys_block_size;
+ while (resid > 0) {
+ ioSize = MIN(bufferSize, resid);
+ ioSizeSectors = ioSize / hfsmp->hfs_phys_block_size;
+
+ /* Prepare the buffer for reading */
+ buf_reset(bp, B_READ);
+ buf_setsize(bp, ioSize);
+ buf_setcount(bp, ioSize);
+ buf_setblkno(bp, srcSector);
+ buf_setlblkno(bp, srcSector);
+
+ /* Do the read */
+ err = VNOP_STRATEGY(bp);
+ if (!err)
+ err = buf_biowait(bp);
+ if (err) {
+ printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (read)\n", err);
+ break;
+ }
+
+ /* Prepare the buffer for writing */
+ buf_reset(bp, B_WRITE);
+ buf_setsize(bp, ioSize);
+ buf_setcount(bp, ioSize);
+ buf_setblkno(bp, destSector);
+ buf_setlblkno(bp, destSector);
+ if (journal_uses_fua(hfsmp->jnl))
+ buf_markfua(bp);
+
+ /* Do the write */
+ vnode_startwrite(hfsmp->hfs_devvp);
+ err = VNOP_STRATEGY(bp);
+ if (!err)
+ err = buf_biowait(bp);
+ if (err) {
+ printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (write)\n", err);
+ break;
+ }
+
+ resid -= ioSize;
+ srcSector += ioSizeSectors;
+ destSector += ioSizeSectors;
+ }
+ if (bp)
+ buf_free(bp);
+ if (buffer)
+ kmem_free(kernel_map, (vm_offset_t)buffer, bufferSize);
+
+ /* Make sure all writes have been flushed to disk. */
+ if (!journal_uses_fua(hfsmp->jnl)) {
+ err = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
+ if (err) {
+ printf("hfs_copy_extent: DKIOCSYNCHRONIZECACHE failed (%d)\n", err);
+ err = 0; /* Don't fail the copy. */
+ }
+ }
+
+ if (!err)
+ hfs_invalidate_sectors(vp, (daddr64_t)oldStart*sectorsPerBlock, (daddr64_t)blockCount*sectorsPerBlock);
+
+ return err;
+}
+
+
+/*
+ * Reclaim space at the end of a volume, used by a given system file.
+ *
+ * This routine attempts to move any extent which contains allocation blocks
+ * at or after "startblk." A separate transaction is used to do the move.
+ * The contents of any moved extents are read and written via the volume's
+ * device vnode -- NOT via "vp." During the move, moved blocks which are part
+ * of a transaction have their physical block numbers invalidated so they will
+ * eventually be written to their new locations.
+ *
+ * This routine can be used to move overflow extents for the allocation file.
+ *
+ * Inputs:
+ * hfsmp The volume being resized.
+ * startblk Blocks >= this allocation block need to be moved.
+ * locks Which locks need to be taken for the given system file.
+ * vp The vnode for the system file.
+ *
+ * Outputs:
+ * moved Set to true if any extents were moved.
+ */
+static int
+hfs_relocate_callback(__unused HFSPlusExtentKey *key, HFSPlusExtentRecord *record, HFSPlusExtentRecord *state)
+{
+ bcopy(state, record, sizeof(HFSPlusExtentRecord));
+ return 0;
+}
+static int
+hfs_reclaim_sys_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int locks, Boolean *moved, vfs_context_t context)
+{
+ int error;
+ int lockflags;
+ int i;
+ u_long datablks;
+ u_long block;
+ u_int32_t oldStartBlock;
+ u_int32_t newStartBlock;
+ u_int32_t blockCount;
+ struct filefork *fp;
+
+ /* If there is no vnode for this file, then there's nothing to do. */
+ if (vp == NULL)
+ return 0;
+
+ /* printf("hfs_reclaim_sys_file: %.*s\n", VTOC(vp)->c_desc.cd_namelen, VTOC(vp)->c_desc.cd_nameptr); */
+
+ /* We always need the allocation bitmap and extents B-tree */
+ locks |= SFL_BITMAP | SFL_EXTENTS;
+
+ error = hfs_start_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_sys_file: hfs_start_transaction returned %d\n", error);
+ return error;
+ }
+ lockflags = hfs_systemfile_lock(hfsmp, locks, HFS_EXCLUSIVE_LOCK);
+ fp = VTOF(vp);
+ datablks = 0;
+
+ /* Relocate non-overflow extents */
+ for (i = 0; i < kHFSPlusExtentDensity; ++i) {
+ if (fp->ff_extents[i].blockCount == 0)
+ break;
+ oldStartBlock = fp->ff_extents[i].startBlock;
+ blockCount = fp->ff_extents[i].blockCount;
+ datablks += blockCount;
+ block = oldStartBlock + blockCount;
+ if (block > startblk) {
+ error = BlockAllocate(hfsmp, 1, blockCount, blockCount, true, true, &newStartBlock, &blockCount);
+ if (error) {
+ printf("hfs_reclaim_sys_file: BlockAllocate returned %d\n", error);
+ goto fail;
+ }
+ if (blockCount != fp->ff_extents[i].blockCount) {
+ printf("hfs_reclaim_sys_file: new blockCount=%u, original blockCount=%u", blockCount, fp->ff_extents[i].blockCount);
+ goto free_fail;
+ }
+ error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, blockCount, context);
+ if (error) {
+ printf("hfs_reclaim_sys_file: hfs_copy_extent returned %d\n", error);
+ goto free_fail;
+ }
+ fp->ff_extents[i].startBlock = newStartBlock;
+ VTOC(vp)->c_flag |= C_MODIFIED;
+ *moved = true;
+ error = BlockDeallocate(hfsmp, oldStartBlock, blockCount);
+ if (error) {
+ /* TODO: Mark volume inconsistent? */
+ printf("hfs_reclaim_sys_file: BlockDeallocate returned %d\n", error);
+ goto fail;
+ }
+ error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
+ if (error) {
+ /* TODO: Mark volume inconsistent? */
+ printf("hfs_reclaim_sys_file: hfs_flushvolumeheader returned %d\n", error);
+ goto fail;
+ }
+ }
+ }
+
+ /* Relocate overflow extents (if any) */
+ if (i == kHFSPlusExtentDensity && fp->ff_blocks > datablks) {
+ struct BTreeIterator *iterator = NULL;
+ struct FSBufferDescriptor btdata;
+ HFSPlusExtentRecord record;
+ HFSPlusExtentKey *key;
+ FCB *fcb;
+ u_int32_t fileID;
+ u_int8_t forktype;
+
+ forktype = VNODE_IS_RSRC(vp) ? 0xFF : 0;
+ fileID = VTOC(vp)->c_cnid;
+ if (kmem_alloc(kernel_map, (vm_offset_t*) &iterator, sizeof(*iterator))) {
+ printf("hfs_reclaim_sys_file: kmem_alloc failed!\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ bzero(iterator, sizeof(*iterator));
+ key = (HFSPlusExtentKey *) &iterator->key;
+ key->keyLength = kHFSPlusExtentKeyMaximumLength;
+ key->forkType = forktype;
+ key->fileID = fileID;
+ key->startBlock = datablks;
+
+ btdata.bufferAddress = &record;
+ btdata.itemSize = sizeof(record);
+ btdata.itemCount = 1;
+
+ fcb = VTOF(hfsmp->hfs_extents_vp);
+
+ error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator);
+ while (error == 0) {
+ /* Stop when we encounter a different file or fork. */
+ if ((key->fileID != fileID) ||
+ (key->forkType != forktype)) {
+ break;
+ }
+ /*
+ * Check if the file overlaps target space.
+ */
+ for (i = 0; i < kHFSPlusExtentDensity; ++i) {
+ if (record[i].blockCount == 0) {
+ goto overflow_done;
+ }
+ oldStartBlock = record[i].startBlock;
+ blockCount = record[i].blockCount;
+ block = oldStartBlock + blockCount;
+ if (block > startblk) {
+ error = BlockAllocate(hfsmp, 1, blockCount, blockCount, true, true, &newStartBlock, &blockCount);
+ if (error) {
+ printf("hfs_reclaim_sys_file: BlockAllocate returned %d\n", error);
+ goto overflow_done;
+ }
+ if (blockCount != record[i].blockCount) {
+ printf("hfs_reclaim_sys_file: new blockCount=%u, original blockCount=%u", blockCount, fp->ff_extents[i].blockCount);
+ kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
+ goto free_fail;
+ }
+ error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, blockCount, context);
+ if (error) {
+ printf("hfs_reclaim_sys_file: hfs_copy_extent returned %d\n", error);
+ kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
+ goto free_fail;
+ }
+ record[i].startBlock = newStartBlock;
+ VTOC(vp)->c_flag |= C_MODIFIED;
+ *moved = true;
+ /*
+ * NOTE: To support relocating overflow extents of the
+ * allocation file, we must update the BTree record BEFORE
+ * deallocating the old extent so that BlockDeallocate will
+ * use the extent's new location to calculate physical block
+ * numbers. (This is for the case where the old extent's
+ * bitmap bits actually reside in the extent being moved.)
+ */
+ error = BTUpdateRecord(fcb, iterator, (IterateCallBackProcPtr) hfs_relocate_callback, &record);
+ if (error) {
+ /* TODO: Mark volume inconsistent? */
+ printf("hfs_reclaim_sys_file: BTUpdateRecord returned %d\n", error);
+ goto overflow_done;
+ }
+ error = BlockDeallocate(hfsmp, oldStartBlock, blockCount);
+ if (error) {
+ /* TODO: Mark volume inconsistent? */
+ printf("hfs_reclaim_sys_file: BlockDeallocate returned %d\n", error);
+ goto overflow_done;
+ }
+ }
+ }
+ /* Look for more records. */
+ error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
+ if (error == btNotFound) {
+ error = 0;
+ break;
+ }
+ }
+overflow_done:
+ kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
+ if (error) {
+ goto fail;
+ }
+ }
+
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ error = hfs_end_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_sys_file: hfs_end_transaction returned %d\n", error);
+ }
+
+ return error;
+
+free_fail:
+ (void) BlockDeallocate(hfsmp, newStartBlock, blockCount);
+fail:
+ (void) hfs_systemfile_unlock(hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+ return error;