+struct hfs_inval_blk_no {
+ daddr64_t sectorStart;
+ daddr64_t sectorCount;
+};
+static int
+hfs_invalidate_block_numbers_callback(buf_t bp, void *args_in)
+{
+ daddr64_t blkno;
+ struct hfs_inval_blk_no *args;
+
+ blkno = buf_blkno(bp);
+ args = args_in;
+
+ if (blkno >= args->sectorStart && blkno < args->sectorStart+args->sectorCount)
+ buf_setblkno(bp, buf_lblkno(bp));
+
+ return BUF_RETURNED;
+}
+static void
+hfs_invalidate_sectors(struct vnode *vp, daddr64_t sectorStart, daddr64_t sectorCount)
+{
+ struct hfs_inval_blk_no args;
+ args.sectorStart = sectorStart;
+ args.sectorCount = sectorCount;
+
+ buf_iterate(vp, hfs_invalidate_block_numbers_callback, BUF_SCAN_DIRTY|BUF_SCAN_CLEAN, &args);
+}
+
+
+/*
+ * Copy the contents of an extent to a new location. Also invalidates the
+ * physical block number of any buffer cache block in the copied extent
+ * (so that if the block is written, it will go through VNOP_BLOCKMAP to
+ * determine the new physical block number).
+ */
+static int
+hfs_copy_extent(
+ struct hfsmount *hfsmp,
+ struct vnode *vp, /* The file whose extent is being copied. */
+ u_int32_t oldStart, /* The start of the source extent. */
+ u_int32_t newStart, /* The start of the destination extent. */
+ u_int32_t blockCount, /* The number of allocation blocks to copy. */
+ vfs_context_t context)
+{
+ int err = 0;
+ size_t bufferSize;
+ void *buffer = NULL;
+ struct vfsioattr ioattr;
+ buf_t bp = NULL;
+ off_t resid;
+ size_t ioSize;
+ u_int32_t ioSizeSectors; /* Device sectors in this I/O */
+ daddr64_t srcSector, destSector;
+ u_int32_t sectorsPerBlock = hfsmp->blockSize / hfsmp->hfs_phys_block_size;
+
+ /*
+ * Sanity check that we have locked the vnode of the file we're copying.
+ *
+ * But since hfs_systemfile_lock() doesn't actually take the lock on
+ * the allocation file if a journal is active, ignore the check if the
+ * file being copied is the allocation file.
+ */
+ struct cnode *cp = VTOC(vp);
+ if (cp != hfsmp->hfs_allocation_cp && cp->c_lockowner != current_thread())
+ panic("hfs_copy_extent: vp=%p (cp=%p) not owned?\n", vp, cp);
+
+ /*
+ * Wait for any in-progress writes to this vnode to complete, so that we'll
+ * be copying consistent bits. (Otherwise, it's possible that an async
+ * write will complete to the old extent after we read from it. That
+ * could lead to corruption.)
+ */
+ err = vnode_waitforwrites(vp, 0, 0, 0, "hfs_copy_extent");
+ if (err) {
+ printf("hfs_copy_extent: Error %d from vnode_waitforwrites\n", err);
+ return err;
+ }
+
+ /*
+ * Determine the I/O size to use
+ *
+ * NOTE: Many external drives will result in an ioSize of 128KB.
+ * TODO: Should we use a larger buffer, doing several consecutive
+ * reads, then several consecutive writes?
+ */
+ vfs_ioattr(hfsmp->hfs_mp, &ioattr);
+ bufferSize = MIN(ioattr.io_maxreadcnt, ioattr.io_maxwritecnt);
+ if (kmem_alloc(kernel_map, (vm_offset_t*) &buffer, bufferSize))
+ return ENOMEM;
+
+ /* Get a buffer for doing the I/O */
+ bp = buf_alloc(hfsmp->hfs_devvp);
+ buf_setdataptr(bp, (uintptr_t)buffer);
+
+ resid = (off_t) blockCount * (off_t) hfsmp->blockSize;
+ srcSector = (daddr64_t) oldStart * hfsmp->blockSize / hfsmp->hfs_phys_block_size;
+ destSector = (daddr64_t) newStart * hfsmp->blockSize / hfsmp->hfs_phys_block_size;
+ while (resid > 0) {
+ ioSize = MIN(bufferSize, resid);
+ ioSizeSectors = ioSize / hfsmp->hfs_phys_block_size;
+
+ /* Prepare the buffer for reading */
+ buf_reset(bp, B_READ);
+ buf_setsize(bp, ioSize);
+ buf_setcount(bp, ioSize);
+ buf_setblkno(bp, srcSector);
+ buf_setlblkno(bp, srcSector);
+
+ /* Do the read */
+ err = VNOP_STRATEGY(bp);
+ if (!err)
+ err = buf_biowait(bp);
+ if (err) {
+ printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (read)\n", err);
+ break;
+ }
+
+ /* Prepare the buffer for writing */
+ buf_reset(bp, B_WRITE);
+ buf_setsize(bp, ioSize);
+ buf_setcount(bp, ioSize);
+ buf_setblkno(bp, destSector);
+ buf_setlblkno(bp, destSector);
+ if (journal_uses_fua(hfsmp->jnl))
+ buf_markfua(bp);
+
+ /* Do the write */
+ vnode_startwrite(hfsmp->hfs_devvp);
+ err = VNOP_STRATEGY(bp);
+ if (!err)
+ err = buf_biowait(bp);
+ if (err) {
+ printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (write)\n", err);
+ break;
+ }
+
+ resid -= ioSize;
+ srcSector += ioSizeSectors;
+ destSector += ioSizeSectors;
+ }
+ if (bp)
+ buf_free(bp);
+ if (buffer)
+ kmem_free(kernel_map, (vm_offset_t)buffer, bufferSize);
+
+ /* Make sure all writes have been flushed to disk. */
+ if (!journal_uses_fua(hfsmp->jnl)) {
+ err = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
+ if (err) {
+ printf("hfs_copy_extent: DKIOCSYNCHRONIZECACHE failed (%d)\n", err);
+ err = 0; /* Don't fail the copy. */
+ }
+ }
+
+ if (!err)
+ hfs_invalidate_sectors(vp, (daddr64_t)oldStart*sectorsPerBlock, (daddr64_t)blockCount*sectorsPerBlock);
+
+ return err;
+}
+
+
+/*
+ * Reclaim space at the end of a volume, used by a given system file.
+ *
+ * This routine attempts to move any extent which contains allocation blocks
+ * at or after "startblk." A separate transaction is used to do the move.
+ * The contents of any moved extents are read and written via the volume's
+ * device vnode -- NOT via "vp." During the move, moved blocks which are part
+ * of a transaction have their physical block numbers invalidated so they will
+ * eventually be written to their new locations.
+ *
+ * This routine can be used to move overflow extents for the allocation file.
+ *
+ * Inputs:
+ * hfsmp The volume being resized.
+ * startblk Blocks >= this allocation block need to be moved.
+ * locks Which locks need to be taken for the given system file.
+ * vp The vnode for the system file.
+ *
+ * Outputs:
+ * moved Set to true if any extents were moved.
+ */
+static int
+hfs_relocate_callback(__unused HFSPlusExtentKey *key, HFSPlusExtentRecord *record, HFSPlusExtentRecord *state)
+{
+ bcopy(state, record, sizeof(HFSPlusExtentRecord));
+ return 0;
+}
+static int
+hfs_reclaim_sys_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int locks, Boolean *moved, vfs_context_t context)
+{
+ int error;
+ int lockflags;
+ int i;
+ u_long datablks;
+ u_long block;
+ u_int32_t oldStartBlock;
+ u_int32_t newStartBlock;
+ u_int32_t blockCount;
+ struct filefork *fp;
+
+ /* If there is no vnode for this file, then there's nothing to do. */
+ if (vp == NULL)
+ return 0;
+
+ /* printf("hfs_reclaim_sys_file: %.*s\n", VTOC(vp)->c_desc.cd_namelen, VTOC(vp)->c_desc.cd_nameptr); */
+
+ /* We always need the allocation bitmap and extents B-tree */
+ locks |= SFL_BITMAP | SFL_EXTENTS;
+
+ error = hfs_start_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_sys_file: hfs_start_transaction returned %d\n", error);
+ return error;
+ }
+ lockflags = hfs_systemfile_lock(hfsmp, locks, HFS_EXCLUSIVE_LOCK);
+ fp = VTOF(vp);
+ datablks = 0;
+
+ /* Relocate non-overflow extents */
+ for (i = 0; i < kHFSPlusExtentDensity; ++i) {
+ if (fp->ff_extents[i].blockCount == 0)
+ break;
+ oldStartBlock = fp->ff_extents[i].startBlock;
+ blockCount = fp->ff_extents[i].blockCount;
+ datablks += blockCount;
+ block = oldStartBlock + blockCount;
+ if (block > startblk) {
+ error = BlockAllocate(hfsmp, 1, blockCount, blockCount, true, true, &newStartBlock, &blockCount);
+ if (error) {
+ printf("hfs_reclaim_sys_file: BlockAllocate returned %d\n", error);
+ goto fail;
+ }
+ if (blockCount != fp->ff_extents[i].blockCount) {
+ printf("hfs_reclaim_sys_file: new blockCount=%u, original blockCount=%u", blockCount, fp->ff_extents[i].blockCount);
+ goto free_fail;
+ }
+ error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, blockCount, context);
+ if (error) {
+ printf("hfs_reclaim_sys_file: hfs_copy_extent returned %d\n", error);
+ goto free_fail;
+ }
+ fp->ff_extents[i].startBlock = newStartBlock;
+ VTOC(vp)->c_flag |= C_MODIFIED;
+ *moved = true;
+ error = BlockDeallocate(hfsmp, oldStartBlock, blockCount);
+ if (error) {
+ /* TODO: Mark volume inconsistent? */
+ printf("hfs_reclaim_sys_file: BlockDeallocate returned %d\n", error);
+ goto fail;
+ }
+ error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
+ if (error) {
+ /* TODO: Mark volume inconsistent? */
+ printf("hfs_reclaim_sys_file: hfs_flushvolumeheader returned %d\n", error);
+ goto fail;
+ }
+ }
+ }
+
+ /* Relocate overflow extents (if any) */
+ if (i == kHFSPlusExtentDensity && fp->ff_blocks > datablks) {
+ struct BTreeIterator *iterator = NULL;
+ struct FSBufferDescriptor btdata;
+ HFSPlusExtentRecord record;
+ HFSPlusExtentKey *key;
+ FCB *fcb;
+ u_int32_t fileID;
+ u_int8_t forktype;
+
+ forktype = VNODE_IS_RSRC(vp) ? 0xFF : 0;
+ fileID = VTOC(vp)->c_cnid;
+ if (kmem_alloc(kernel_map, (vm_offset_t*) &iterator, sizeof(*iterator))) {
+ printf("hfs_reclaim_sys_file: kmem_alloc failed!\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ bzero(iterator, sizeof(*iterator));
+ key = (HFSPlusExtentKey *) &iterator->key;
+ key->keyLength = kHFSPlusExtentKeyMaximumLength;
+ key->forkType = forktype;
+ key->fileID = fileID;
+ key->startBlock = datablks;
+
+ btdata.bufferAddress = &record;
+ btdata.itemSize = sizeof(record);
+ btdata.itemCount = 1;
+
+ fcb = VTOF(hfsmp->hfs_extents_vp);
+
+ error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator);
+ while (error == 0) {
+ /* Stop when we encounter a different file or fork. */
+ if ((key->fileID != fileID) ||
+ (key->forkType != forktype)) {
+ break;
+ }
+ /*
+ * Check if the file overlaps target space.
+ */
+ for (i = 0; i < kHFSPlusExtentDensity; ++i) {
+ if (record[i].blockCount == 0) {
+ goto overflow_done;
+ }
+ oldStartBlock = record[i].startBlock;
+ blockCount = record[i].blockCount;
+ block = oldStartBlock + blockCount;
+ if (block > startblk) {
+ error = BlockAllocate(hfsmp, 1, blockCount, blockCount, true, true, &newStartBlock, &blockCount);
+ if (error) {
+ printf("hfs_reclaim_sys_file: BlockAllocate returned %d\n", error);
+ goto overflow_done;
+ }
+ if (blockCount != record[i].blockCount) {
+ printf("hfs_reclaim_sys_file: new blockCount=%u, original blockCount=%u", blockCount, fp->ff_extents[i].blockCount);
+ kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
+ goto free_fail;
+ }
+ error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, blockCount, context);
+ if (error) {
+ printf("hfs_reclaim_sys_file: hfs_copy_extent returned %d\n", error);
+ kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
+ goto free_fail;
+ }
+ record[i].startBlock = newStartBlock;
+ VTOC(vp)->c_flag |= C_MODIFIED;
+ *moved = true;
+ /*
+ * NOTE: To support relocating overflow extents of the
+ * allocation file, we must update the BTree record BEFORE
+ * deallocating the old extent so that BlockDeallocate will
+ * use the extent's new location to calculate physical block
+ * numbers. (This is for the case where the old extent's
+ * bitmap bits actually reside in the extent being moved.)
+ */
+ error = BTUpdateRecord(fcb, iterator, (IterateCallBackProcPtr) hfs_relocate_callback, &record);
+ if (error) {
+ /* TODO: Mark volume inconsistent? */
+ printf("hfs_reclaim_sys_file: BTUpdateRecord returned %d\n", error);
+ goto overflow_done;
+ }
+ error = BlockDeallocate(hfsmp, oldStartBlock, blockCount);
+ if (error) {
+ /* TODO: Mark volume inconsistent? */
+ printf("hfs_reclaim_sys_file: BlockDeallocate returned %d\n", error);
+ goto overflow_done;
+ }
+ }
+ }
+ /* Look for more records. */
+ error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
+ if (error == btNotFound) {
+ error = 0;
+ break;
+ }
+ }
+overflow_done:
+ kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
+ if (error) {
+ goto fail;
+ }
+ }
+
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ error = hfs_end_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_sys_file: hfs_end_transaction returned %d\n", error);
+ }
+
+ return error;
+
+free_fail:
+ (void) BlockDeallocate(hfsmp, newStartBlock, blockCount);
+fail:
+ (void) hfs_systemfile_unlock(hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+ return error;
+}
+
+
+/*
+ * This journal_relocate callback updates the journal info block to point
+ * at the new journal location. This write must NOT be done using the
+ * transaction. We must write the block immediately. We must also force
+ * it to get to the media so that the new journal location will be seen by
+ * the replay code before we can safely let journaled blocks be written
+ * to their normal locations.
+ *
+ * The tests for journal_uses_fua below are mildly hacky. Since the journal
+ * and the file system are both on the same device, I'm leveraging what
+ * the journal has decided about FUA.
+ */
+struct hfs_journal_relocate_args {
+ struct hfsmount *hfsmp;
+ vfs_context_t context;
+ u_int32_t newStartBlock;
+};
+
+static errno_t
+hfs_journal_relocate_callback(void *_args)
+{
+ int error;
+ struct hfs_journal_relocate_args *args = _args;
+ struct hfsmount *hfsmp = args->hfsmp;
+ buf_t bp;
+ JournalInfoBlock *jibp;
+
+ error = buf_meta_bread(hfsmp->hfs_devvp,
+ hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_phys_block_size),
+ hfsmp->blockSize, vfs_context_ucred(args->context), &bp);
+ if (error) {
+ printf("hfs_reclaim_journal_file: failed to read JIB (%d)\n", error);
+ return error;
+ }
+ jibp = (JournalInfoBlock*) buf_dataptr(bp);
+ jibp->offset = SWAP_BE64((u_int64_t)args->newStartBlock * hfsmp->blockSize);
+ jibp->size = SWAP_BE64(hfsmp->jnl_size);
+ if (journal_uses_fua(hfsmp->jnl))
+ buf_markfua(bp);
+ error = buf_bwrite(bp);
+ if (error) {
+ printf("hfs_reclaim_journal_file: failed to write JIB (%d)\n", error);
+ return error;
+ }
+ if (!journal_uses_fua(hfsmp->jnl)) {
+ error = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, args->context);
+ if (error) {
+ printf("hfs_reclaim_journal_file: DKIOCSYNCHRONIZECACHE failed (%d)\n", error);
+ error = 0; /* Don't fail the operation. */
+ }
+ }
+
+ return error;
+}
+
+
+static int
+hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context)
+{
+ int error;
+ int lockflags;
+ u_int32_t newStartBlock;
+ u_int32_t oldBlockCount;
+ u_int32_t newBlockCount;
+ struct cat_desc journal_desc;
+ struct cat_attr journal_attr;
+ struct cat_fork journal_fork;
+ struct hfs_journal_relocate_args callback_args;
+
+ error = hfs_start_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_journal_file: hfs_start_transaction returned %d\n", error);
+ return error;
+ }
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
+
+ oldBlockCount = hfsmp->jnl_size / hfsmp->blockSize;
+
+ /* TODO: Allow the journal to change size based on the new volume size. */
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, true, true, &newStartBlock, &newBlockCount);
+ if (error) {
+ printf("hfs_reclaim_journal_file: BlockAllocate returned %d\n", error);
+ goto fail;
+ }
+ if (newBlockCount != oldBlockCount) {
+ printf("hfs_reclaim_journal_file: newBlockCount != oldBlockCount (%u, %u)\n", newBlockCount, oldBlockCount);
+ goto free_fail;
+ }
+
+ error = BlockDeallocate(hfsmp, hfsmp->jnl_start, oldBlockCount);
+ if (error) {
+ printf("hfs_reclaim_journal_file: BlockDeallocate returned %d\n", error);
+ goto free_fail;
+ }
+
+ /* Update the catalog record for .journal */
+ error = cat_idlookup(hfsmp, hfsmp->hfs_jnlfileid, 1, &journal_desc, &journal_attr, &journal_fork);
+ if (error) {
+ printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error);
+ goto free_fail;
+ }
+ journal_fork.cf_size = newBlockCount * hfsmp->blockSize;
+ journal_fork.cf_extents[0].startBlock = newStartBlock;
+ journal_fork.cf_extents[0].blockCount = newBlockCount;
+ journal_fork.cf_blocks = newBlockCount;
+ error = cat_update(hfsmp, &journal_desc, &journal_attr, &journal_fork, NULL);
+ cat_releasedesc(&journal_desc); /* all done with cat descriptor */
+ if (error) {
+ printf("hfs_reclaim_journal_file: cat_update returned %d\n", error);
+ goto free_fail;
+ }
+ callback_args.hfsmp = hfsmp;
+ callback_args.context = context;
+ callback_args.newStartBlock = newStartBlock;
+
+ error = journal_relocate(hfsmp->jnl, (off_t)newStartBlock*hfsmp->blockSize,
+ (off_t)newBlockCount*hfsmp->blockSize, 0,
+ hfs_journal_relocate_callback, &callback_args);
+ if (error) {
+ /* NOTE: journal_relocate will mark the journal invalid. */
+ printf("hfs_reclaim_journal_file: journal_relocate returned %d\n", error);
+ goto fail;
+ }
+ hfsmp->jnl_start = newStartBlock;
+ hfsmp->jnl_size = (off_t)newBlockCount * hfsmp->blockSize;
+
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ error = hfs_end_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_journal_file: hfs_end_transaction returned %d\n", error);
+ }
+
+ return error;
+
+free_fail:
+ (void) BlockDeallocate(hfsmp, newStartBlock, newBlockCount);
+fail:
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+ return error;
+}
+
+
+/*
+ * Move the journal info block to a new location. We have to make sure the
+ * new copy of the journal info block gets to the media first, then change
+ * the field in the volume header and the catalog record.
+ */
+static int
+hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context)
+{
+ int error;
+ int lockflags;
+ u_int32_t newBlock;
+ u_int32_t blockCount;
+ struct cat_desc jib_desc;
+ struct cat_attr jib_attr;
+ struct cat_fork jib_fork;
+ buf_t old_bp, new_bp;
+
+ error = hfs_start_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: hfs_start_transaction returned %d\n", error);
+ return error;
+ }
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
+
+ error = BlockAllocate(hfsmp, 1, 1, 1, true, true, &newBlock, &blockCount);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: BlockAllocate returned %d\n", error);
+ goto fail;
+ }
+ if (blockCount != 1) {
+ printf("hfs_reclaim_journal_info_block: blockCount != 1 (%u)\n", blockCount);
+ goto free_fail;
+ }
+ error = BlockDeallocate(hfsmp, hfsmp->vcbJinfoBlock, 1);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: BlockDeallocate returned %d\n", error);
+ goto free_fail;
+ }
+
+ /* Copy the old journal info block content to the new location */
+ error = buf_meta_bread(hfsmp->hfs_devvp,
+ hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_phys_block_size),
+ hfsmp->blockSize, vfs_context_ucred(context), &old_bp);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: failed to read JIB (%d)\n", error);
+ goto free_fail;
+ }
+ new_bp = buf_getblk(hfsmp->hfs_devvp,
+ newBlock * (hfsmp->blockSize/hfsmp->hfs_phys_block_size),
+ hfsmp->blockSize, 0, 0, BLK_META);
+ bcopy((char*)buf_dataptr(old_bp), (char*)buf_dataptr(new_bp), hfsmp->blockSize);
+ buf_brelse(old_bp);
+ if (journal_uses_fua(hfsmp->jnl))
+ buf_markfua(new_bp);
+ error = buf_bwrite(new_bp);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: failed to write new JIB (%d)\n", error);
+ goto free_fail;
+ }
+ if (!journal_uses_fua(hfsmp->jnl)) {
+ error = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: DKIOCSYNCHRONIZECACHE failed (%d)\n", error);
+ /* Don't fail the operation. */
+ }
+ }
+
+ /* Update the catalog record for .journal_info_block */
+ error = cat_idlookup(hfsmp, hfsmp->hfs_jnlinfoblkid, 1, &jib_desc, &jib_attr, &jib_fork);
+ if (error) {
+ printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error);
+ goto fail;
+ }
+ jib_fork.cf_size = hfsmp->blockSize;
+ jib_fork.cf_extents[0].startBlock = newBlock;
+ jib_fork.cf_extents[0].blockCount = 1;
+ jib_fork.cf_blocks = 1;
+ error = cat_update(hfsmp, &jib_desc, &jib_attr, &jib_fork, NULL);
+ cat_releasedesc(&jib_desc); /* all done with cat descriptor */
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: cat_update returned %d\n", error);
+ goto fail;
+ }
+
+ /* Update the pointer to the journal info block in the volume header. */
+ hfsmp->vcbJinfoBlock = newBlock;
+ error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: hfs_flushvolumeheader returned %d\n", error);
+ goto fail;
+ }
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ error = hfs_end_transaction(hfsmp);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: hfs_end_transaction returned %d\n", error);
+ }
+ error = journal_flush(hfsmp->jnl);
+ if (error) {
+ printf("hfs_reclaim_journal_info_block: journal_flush returned %d\n", error);
+ }
+ return error;
+
+free_fail:
+ (void) BlockDeallocate(hfsmp, newBlock, blockCount);
+fail:
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+ return error;
+}
+
+
+/*
+ * Reclaim space at the end of a file system.
+ */
+static int
+hfs_reclaimspace(struct hfsmount *hfsmp, u_long startblk, u_long reclaimblks, vfs_context_t context)
+{
+ struct vnode *vp = NULL;
+ FCB *fcb;
+ struct BTreeIterator * iterator = NULL;
+ struct FSBufferDescriptor btdata;
+ struct HFSPlusCatalogFile filerec;
+ u_int32_t saved_next_allocation;
+ cnid_t * cnidbufp;
+ size_t cnidbufsize;
+ int filecnt = 0;
+ int maxfilecnt;
+ u_long block;
+ u_long datablks;
+ u_long rsrcblks;
+ u_long blkstomove = 0;
+ int lockflags;
+ int i;
+ int error;
+ int lastprogress = 0;
+ Boolean system_file_moved = false;
+
+ /* Relocate extents of the Allocation file if they're in the way. */
+ error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_allocation_vp, startblk, SFL_BITMAP, &system_file_moved, context);
+ if (error) {
+ printf("hfs_reclaimspace: reclaim allocation file returned %d\n", error);
+ return error;
+ }
+ /* Relocate extents of the Extents B-tree if they're in the way. */
+ error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_extents_vp, startblk, SFL_EXTENTS, &system_file_moved, context);
+ if (error) {
+ printf("hfs_reclaimspace: reclaim extents b-tree returned %d\n", error);
+ return error;
+ }
+ /* Relocate extents of the Catalog B-tree if they're in the way. */
+ error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_catalog_vp, startblk, SFL_CATALOG, &system_file_moved, context);
+ if (error) {
+ printf("hfs_reclaimspace: reclaim catalog b-tree returned %d\n", error);
+ return error;
+ }
+ /* Relocate extents of the Attributes B-tree if they're in the way. */
+ error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_attribute_vp, startblk, SFL_ATTRIBUTE, &system_file_moved, context);
+ if (error) {
+ printf("hfs_reclaimspace: reclaim attribute b-tree returned %d\n", error);
+ return error;
+ }
+ /* Relocate extents of the Startup File if there is one and they're in the way. */
+ error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_startup_vp, startblk, SFL_STARTUP, &system_file_moved, context);
+ if (error) {
+ printf("hfs_reclaimspace: reclaim startup file returned %d\n", error);
+ return error;
+ }
+
+ /*
+ * We need to make sure the alternate volume header gets flushed if we moved
+ * any extents in the volume header. But we need to do that before
+ * shrinking the size of the volume, or else the journal code will panic
+ * with an invalid (too large) block number.
+ *
+ * Note that system_file_moved will be set if ANY extent was moved, even
+ * if it was just an overflow extent. In this case, the journal_flush isn't
+ * strictly required, but shouldn't hurt.
+ */
+ if (system_file_moved)
+ journal_flush(hfsmp->jnl);
+
+ if (hfsmp->jnl_start + (hfsmp->jnl_size / hfsmp->blockSize) > startblk) {
+ error = hfs_reclaim_journal_file(hfsmp, context);
+ if (error) {
+ printf("hfs_reclaimspace: hfs_reclaim_journal_file failed (%d)\n", error);
+ return error;
+ }
+ }
+
+ if (hfsmp->vcbJinfoBlock >= startblk) {
+ error = hfs_reclaim_journal_info_block(hfsmp, context);
+ if (error) {
+ printf("hfs_reclaimspace: hfs_reclaim_journal_info_block failed (%d)\n", error);
+ return error;
+ }
+ }
+
+ /* For now move a maximum of 250,000 files. */
+ maxfilecnt = MIN(hfsmp->hfs_filecount, 250000);
+ maxfilecnt = MIN((u_long)maxfilecnt, reclaimblks);
+ cnidbufsize = maxfilecnt * sizeof(cnid_t);
+ if (kmem_alloc(kernel_map, (vm_offset_t *)&cnidbufp, cnidbufsize)) {
+ return (ENOMEM);
+ }
+ if (kmem_alloc(kernel_map, (vm_offset_t *)&iterator, sizeof(*iterator))) {
+ kmem_free(kernel_map, (vm_offset_t)cnidbufp, cnidbufsize);
+ return (ENOMEM);
+ }
+
+ saved_next_allocation = hfsmp->nextAllocation;
+ HFS_UPDATE_NEXT_ALLOCATION(hfsmp, hfsmp->hfs_metazone_start);
+
+ fcb = VTOF(hfsmp->hfs_catalog_vp);
+ bzero(iterator, sizeof(*iterator));
+
+ btdata.bufferAddress = &filerec;
+ btdata.itemSize = sizeof(filerec);
+ btdata.itemCount = 1;
+
+ /* Keep the Catalog and extents files locked during iteration. */
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_SHARED_LOCK);
+
+ error = BTIterateRecord(fcb, kBTreeFirstRecord, iterator, NULL, NULL);
+ if (error) {
+ goto end_iteration;
+ }
+ /*
+ * Iterate over all the catalog records looking for files
+ * that overlap into the space we're trying to free up.
+ */
+ for (filecnt = 0; filecnt < maxfilecnt; ) {
+ error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
+ if (error) {
+ if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) {
+ error = 0;
+ }
+ break;
+ }
+ if (filerec.recordType != kHFSPlusFileRecord) {
+ continue;
+ }
+ datablks = rsrcblks = 0;
+ /*
+ * Check if either fork overlaps target space.
+ */
+ for (i = 0; i < kHFSPlusExtentDensity; ++i) {
+ if (filerec.dataFork.extents[i].blockCount != 0) {
+ datablks += filerec.dataFork.extents[i].blockCount;
+ block = filerec.dataFork.extents[i].startBlock +
+ filerec.dataFork.extents[i].blockCount;
+ if (block >= startblk) {
+ if ((filerec.fileID == hfsmp->hfs_jnlfileid) ||
+ (filerec.fileID == hfsmp->hfs_jnlinfoblkid)) {
+ printf("hfs_reclaimspace: cannot move active journal\n");
+ error = EPERM;
+ goto end_iteration;
+ }
+ cnidbufp[filecnt++] = filerec.fileID;
+ blkstomove += filerec.dataFork.totalBlocks;
+ break;
+ }
+ }
+ if (filerec.resourceFork.extents[i].blockCount != 0) {
+ rsrcblks += filerec.resourceFork.extents[i].blockCount;
+ block = filerec.resourceFork.extents[i].startBlock +
+ filerec.resourceFork.extents[i].blockCount;
+ if (block >= startblk) {
+ cnidbufp[filecnt++] = filerec.fileID;
+ blkstomove += filerec.resourceFork.totalBlocks;
+ break;
+ }
+ }
+ }
+ /*
+ * Check for any overflow extents that overlap.
+ */
+ if (i == kHFSPlusExtentDensity) {
+ if (filerec.dataFork.totalBlocks > datablks) {
+ if (hfs_overlapped_overflow_extents(hfsmp, startblk, datablks, filerec.fileID, 0)) {
+ cnidbufp[filecnt++] = filerec.fileID;
+ blkstomove += filerec.dataFork.totalBlocks;
+ }
+ } else if (filerec.resourceFork.totalBlocks > rsrcblks) {
+ if (hfs_overlapped_overflow_extents(hfsmp, startblk, rsrcblks, filerec.fileID, 1)) {
+ cnidbufp[filecnt++] = filerec.fileID;
+ blkstomove += filerec.resourceFork.totalBlocks;
+ }
+ }
+ }
+ }
+
+end_iteration:
+ if (filecnt == 0 && !system_file_moved) {
+ printf("hfs_reclaimspace: no files moved\n");
+ error = ENOSPC;
+ }
+ /* All done with catalog. */
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ if (error || filecnt == 0)
+ goto out;
+
+ /*
+ * Double check space requirements to make sure
+ * there is enough space to relocate any files
+ * that reside in the reclaim area.
+ *
+ * Blocks To Move --------------
+ * | | |
+ * V V V
+ * ------------------------------------------------------------------------
+ * | | / /// // |
+ * | | / /// // |
+ * | | / /// // |
+ * ------------------------------------------------------------------------
+ *
+ * <------------------- New Total Blocks ------------------><-- Reclaim -->
+ *
+ * <------------------------ Original Total Blocks ----------------------->
+ *
+ */
+ if (blkstomove >= hfs_freeblks(hfsmp, 1)) {
+ printf("hfs_truncatefs: insufficient space (need %lu blocks; have %u blocks)\n", blkstomove, hfs_freeblks(hfsmp, 1));
+ error = ENOSPC;
+ goto out;
+ }
+ hfsmp->hfs_resize_filesmoved = 0;
+ hfsmp->hfs_resize_totalfiles = filecnt;
+
+ /* Now move any files that are in the way. */
+ for (i = 0; i < filecnt; ++i) {
+ struct vnode * rvp;
+
+ if (hfs_vget(hfsmp, cnidbufp[i], &vp, 0) != 0)
+ continue;
+
+ /* Relocate any data fork blocks. */
+ if (VTOF(vp)->ff_blocks > 0) {
+ error = hfs_relocate(vp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc());
+ }
+ if (error)
+ break;
+
+ /* Relocate any resource fork blocks. */
+ if ((VTOC((vp))->c_blocks - VTOF((vp))->ff_blocks) > 0) {
+ error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE);
+ if (error)
+ break;
+ error = hfs_relocate(rvp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc());
+ VTOC(rvp)->c_flag |= C_NEED_RVNODE_PUT;
+ if (error)
+ break;
+ }
+ hfs_unlock(VTOC(vp));
+ vnode_put(vp);
+ vp = NULL;
+
+ ++hfsmp->hfs_resize_filesmoved;
+
+ /* Report intermediate progress. */
+ if (filecnt > 100) {
+ int progress;
+
+ progress = (i * 100) / filecnt;
+ if (progress > (lastprogress + 9)) {
+ printf("hfs_reclaimspace: %d%% done...\n", progress);
+ lastprogress = progress;
+ }
+ }
+ }
+ if (vp) {
+ hfs_unlock(VTOC(vp));
+ vnode_put(vp);
+ vp = NULL;
+ }
+ if (hfsmp->hfs_resize_filesmoved != 0) {
+ printf("hfs_reclaimspace: relocated %d files on \"%s\"\n",
+ (int)hfsmp->hfs_resize_filesmoved, hfsmp->vcbVN);
+ }
+out:
+ kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
+ kmem_free(kernel_map, (vm_offset_t)cnidbufp, cnidbufsize);
+
+ /*
+ * Restore the roving allocation pointer on errors.
+ * (but only if we didn't move any files)
+ */
+ if (error && hfsmp->hfs_resize_filesmoved == 0) {
+ HFS_UPDATE_NEXT_ALLOCATION(hfsmp, saved_next_allocation);
+ }
+ return (error);
+}
+
+
+/*
+ * Check if there are any overflow extents that overlap.
+ */
+static int
+hfs_overlapped_overflow_extents(struct hfsmount *hfsmp, u_int32_t startblk, u_int32_t catblks, u_int32_t fileID, int rsrcfork)
+{
+ struct BTreeIterator * iterator = NULL;
+ struct FSBufferDescriptor btdata;
+ HFSPlusExtentRecord extrec;
+ HFSPlusExtentKey *extkeyptr;
+ FCB *fcb;
+ u_int32_t block;
+ u_int8_t forktype;
+ int overlapped = 0;
+ int i;
+ int error;
+
+ forktype = rsrcfork ? 0xFF : 0;
+ if (kmem_alloc(kernel_map, (vm_offset_t *)&iterator, sizeof(*iterator))) {
+ return (0);
+ }
+ bzero(iterator, sizeof(*iterator));
+ extkeyptr = (HFSPlusExtentKey *)&iterator->key;
+ extkeyptr->keyLength = kHFSPlusExtentKeyMaximumLength;
+ extkeyptr->forkType = forktype;
+ extkeyptr->fileID = fileID;
+ extkeyptr->startBlock = catblks;
+
+ btdata.bufferAddress = &extrec;
+ btdata.itemSize = sizeof(extrec);
+ btdata.itemCount = 1;
+
+ fcb = VTOF(hfsmp->hfs_extents_vp);
+
+ error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator);
+ while (error == 0) {
+ /* Stop when we encounter a different file. */
+ if ((extkeyptr->fileID != fileID) ||
+ (extkeyptr->forkType != forktype)) {
+ break;
+ }
+ /*
+ * Check if the file overlaps target space.
+ */
+ for (i = 0; i < kHFSPlusExtentDensity; ++i) {
+ if (extrec[i].blockCount == 0) {
+ break;
+ }
+ block = extrec[i].startBlock + extrec[i].blockCount;
+ if (block >= startblk) {
+ overlapped = 1;
+ break;
+ }
+ }
+ /* Look for more records. */
+ error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
+ }
+
+ kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
+ return (overlapped);
+}
+
+
+/*
+ * Calculate the progress of a file system resize operation.
+ */
+__private_extern__
+int
+hfs_resize_progress(struct hfsmount *hfsmp, u_int32_t *progress)
+{
+ if ((hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS) == 0) {
+ return (ENXIO);
+ }
+
+ if (hfsmp->hfs_resize_totalfiles > 0)
+ *progress = (hfsmp->hfs_resize_filesmoved * 100) / hfsmp->hfs_resize_totalfiles;
+ else
+ *progress = 0;
+
+ return (0);
+}
+
+
+/*
+ * Get file system attributes.
+ */
+static int
+hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
+{
+#define HFS_ATTR_CMN_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST))
+#define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST))
+
+ ExtendedVCB *vcb = VFSTOVCB(mp);
+ struct hfsmount *hfsmp = VFSTOHFS(mp);
+ u_long freeCNIDs;
+
+ freeCNIDs = (u_long)0xFFFFFFFF - (u_long)hfsmp->vcbNxtCNID;
+
+ VFSATTR_RETURN(fsap, f_objcount, (u_int64_t)hfsmp->vcbFilCnt + (u_int64_t)hfsmp->vcbDirCnt);
+ VFSATTR_RETURN(fsap, f_filecount, (u_int64_t)hfsmp->vcbFilCnt);
+ VFSATTR_RETURN(fsap, f_dircount, (u_int64_t)hfsmp->vcbDirCnt);
+ VFSATTR_RETURN(fsap, f_maxobjcount, (u_int64_t)0xFFFFFFFF);
+ VFSATTR_RETURN(fsap, f_iosize, (size_t)cluster_max_io_size(mp, 0));
+ VFSATTR_RETURN(fsap, f_blocks, (u_int64_t)hfsmp->totalBlocks);
+ VFSATTR_RETURN(fsap, f_bfree, (u_int64_t)hfs_freeblks(hfsmp, 0));
+ VFSATTR_RETURN(fsap, f_bavail, (u_int64_t)hfs_freeblks(hfsmp, 1));
+ VFSATTR_RETURN(fsap, f_bsize, (u_int32_t)vcb->blockSize);
+ /* XXX needs clarification */
+ VFSATTR_RETURN(fsap, f_bused, hfsmp->totalBlocks - hfs_freeblks(hfsmp, 1));
+ /* Maximum files is constrained by total blocks. */
+ VFSATTR_RETURN(fsap, f_files, (u_int64_t)(hfsmp->totalBlocks - 2));
+ VFSATTR_RETURN(fsap, f_ffree, MIN((u_int64_t)freeCNIDs, (u_int64_t)hfs_freeblks(hfsmp, 1)));
+
+ fsap->f_fsid.val[0] = hfsmp->hfs_raw_dev;
+ fsap->f_fsid.val[1] = vfs_typenum(mp);
+ VFSATTR_SET_SUPPORTED(fsap, f_fsid);
+
+ VFSATTR_RETURN(fsap, f_signature, vcb->vcbSigWord);
+ VFSATTR_RETURN(fsap, f_carbon_fsid, 0);
+
+ if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
+ vol_capabilities_attr_t *cap;
+
+ cap = &fsap->f_capabilities;
+
+ if (hfsmp->hfs_flags & HFS_STANDARD) {
+ cap->capabilities[VOL_CAPABILITIES_FORMAT] =
+ VOL_CAP_FMT_PERSISTENTOBJECTIDS |
+ VOL_CAP_FMT_CASE_PRESERVING |
+ VOL_CAP_FMT_FAST_STATFS |
+ VOL_CAP_FMT_HIDDEN_FILES |
+ VOL_CAP_FMT_PATH_FROM_ID;
+ } else {
+ cap->capabilities[VOL_CAPABILITIES_FORMAT] =
+ VOL_CAP_FMT_PERSISTENTOBJECTIDS |
+ VOL_CAP_FMT_SYMBOLICLINKS |
+ VOL_CAP_FMT_HARDLINKS |
+ VOL_CAP_FMT_JOURNAL |
+ VOL_CAP_FMT_ZERO_RUNS |
+ (hfsmp->jnl ? VOL_CAP_FMT_JOURNAL_ACTIVE : 0) |
+ (hfsmp->hfs_flags & HFS_CASE_SENSITIVE ? VOL_CAP_FMT_CASE_SENSITIVE : 0) |
+ VOL_CAP_FMT_CASE_PRESERVING |
+ VOL_CAP_FMT_FAST_STATFS |
+ VOL_CAP_FMT_2TB_FILESIZE |
+ VOL_CAP_FMT_HIDDEN_FILES |
+ VOL_CAP_FMT_PATH_FROM_ID;
+ }
+ cap->capabilities[VOL_CAPABILITIES_INTERFACES] =
+ VOL_CAP_INT_SEARCHFS |
+ VOL_CAP_INT_ATTRLIST |
+ VOL_CAP_INT_NFSEXPORT |
+ VOL_CAP_INT_READDIRATTR |
+ VOL_CAP_INT_EXCHANGEDATA |
+ VOL_CAP_INT_ALLOCATE |
+ VOL_CAP_INT_VOL_RENAME |
+ VOL_CAP_INT_ADVLOCK |
+ VOL_CAP_INT_FLOCK |
+#if NAMEDSTREAMS
+ VOL_CAP_INT_EXTENDED_ATTR |
+ VOL_CAP_INT_NAMEDSTREAMS;
+#else
+ VOL_CAP_INT_EXTENDED_ATTR;
+#endif
+ cap->capabilities[VOL_CAPABILITIES_RESERVED1] = 0;
+ cap->capabilities[VOL_CAPABILITIES_RESERVED2] = 0;
+
+ cap->valid[VOL_CAPABILITIES_FORMAT] =
+ VOL_CAP_FMT_PERSISTENTOBJECTIDS |
+ VOL_CAP_FMT_SYMBOLICLINKS |
+ VOL_CAP_FMT_HARDLINKS |
+ VOL_CAP_FMT_JOURNAL |
+ VOL_CAP_FMT_JOURNAL_ACTIVE |
+ VOL_CAP_FMT_NO_ROOT_TIMES |
+ VOL_CAP_FMT_SPARSE_FILES |
+ VOL_CAP_FMT_ZERO_RUNS |
+ VOL_CAP_FMT_CASE_SENSITIVE |
+ VOL_CAP_FMT_CASE_PRESERVING |
+ VOL_CAP_FMT_FAST_STATFS |
+ VOL_CAP_FMT_2TB_FILESIZE |
+ VOL_CAP_FMT_OPENDENYMODES |
+ VOL_CAP_FMT_HIDDEN_FILES |
+ VOL_CAP_FMT_PATH_FROM_ID;
+ cap->valid[VOL_CAPABILITIES_INTERFACES] =
+ VOL_CAP_INT_SEARCHFS |
+ VOL_CAP_INT_ATTRLIST |
+ VOL_CAP_INT_NFSEXPORT |
+ VOL_CAP_INT_READDIRATTR |
+ VOL_CAP_INT_EXCHANGEDATA |
+ VOL_CAP_INT_COPYFILE |
+ VOL_CAP_INT_ALLOCATE |
+ VOL_CAP_INT_VOL_RENAME |
+ VOL_CAP_INT_ADVLOCK |
+ VOL_CAP_INT_FLOCK |
+ VOL_CAP_INT_MANLOCK |
+#if NAMEDSTREAMS
+ VOL_CAP_INT_EXTENDED_ATTR |
+ VOL_CAP_INT_NAMEDSTREAMS;
+#else
+ VOL_CAP_INT_EXTENDED_ATTR;
+#endif
+ cap->valid[VOL_CAPABILITIES_RESERVED1] = 0;
+ cap->valid[VOL_CAPABILITIES_RESERVED2] = 0;
+ VFSATTR_SET_SUPPORTED(fsap, f_capabilities);
+ }
+ if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
+ vol_attributes_attr_t *attrp = &fsap->f_attributes;
+
+ attrp->validattr.commonattr = HFS_ATTR_CMN_VALIDMASK;
+ attrp->validattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
+ attrp->validattr.dirattr = ATTR_DIR_VALIDMASK;
+ attrp->validattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
+ attrp->validattr.forkattr = 0;
+
+ attrp->nativeattr.commonattr = HFS_ATTR_CMN_VALIDMASK;
+ attrp->nativeattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
+ attrp->nativeattr.dirattr = ATTR_DIR_VALIDMASK;
+ attrp->nativeattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
+ attrp->nativeattr.forkattr = 0;
+ VFSATTR_SET_SUPPORTED(fsap, f_attributes);
+ }
+ fsap->f_create_time.tv_sec = hfsmp->vcbCrDate;
+ fsap->f_create_time.tv_nsec = 0;
+ VFSATTR_SET_SUPPORTED(fsap, f_create_time);
+ fsap->f_modify_time.tv_sec = hfsmp->vcbLsMod;
+ fsap->f_modify_time.tv_nsec = 0;
+ VFSATTR_SET_SUPPORTED(fsap, f_modify_time);
+
+ fsap->f_backup_time.tv_sec = hfsmp->vcbVolBkUp;
+ fsap->f_backup_time.tv_nsec = 0;
+ VFSATTR_SET_SUPPORTED(fsap, f_backup_time);
+ if (VFSATTR_IS_ACTIVE(fsap, f_fssubtype)) {
+ u_int16_t subtype = 0;
+
+ /*
+ * Subtypes (flavors) for HFS
+ * 0: Mac OS Extended
+ * 1: Mac OS Extended (Journaled)
+ * 2: Mac OS Extended (Case Sensitive)
+ * 3: Mac OS Extended (Case Sensitive, Journaled)
+ * 4 - 127: Reserved
+ * 128: Mac OS Standard
+ *
+ */
+ if (hfsmp->hfs_flags & HFS_STANDARD) {
+ subtype = HFS_SUBTYPE_STANDARDHFS;
+ } else /* HFS Plus */ {
+ if (hfsmp->jnl)
+ subtype |= HFS_SUBTYPE_JOURNALED;
+ if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)
+ subtype |= HFS_SUBTYPE_CASESENSITIVE;
+ }
+ fsap->f_fssubtype = subtype;
+ VFSATTR_SET_SUPPORTED(fsap, f_fssubtype);
+ }
+
+ if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
+ strlcpy(fsap->f_vol_name, (char *) hfsmp->vcbVN, MAXPATHLEN);
+ VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
+ }
+ return (0);
+}
+
+/*
+ * Perform a volume rename. Requires the FS' root vp.
+ */
+static int
+hfs_rename_volume(struct vnode *vp, const char *name, proc_t p)
+{
+ ExtendedVCB *vcb = VTOVCB(vp);
+ struct cnode *cp = VTOC(vp);
+ struct hfsmount *hfsmp = VTOHFS(vp);
+ struct cat_desc to_desc;
+ struct cat_desc todir_desc;
+ struct cat_desc new_desc;
+ cat_cookie_t cookie;
+ int lockflags;
+ int error = 0;
+
+ /*
+ * Ignore attempts to rename a volume to a zero-length name.
+ */
+ if (name[0] == 0)
+ return(0);
+
+ bzero(&to_desc, sizeof(to_desc));
+ bzero(&todir_desc, sizeof(todir_desc));
+ bzero(&new_desc, sizeof(new_desc));
+ bzero(&cookie, sizeof(cookie));
+
+ todir_desc.cd_parentcnid = kHFSRootParentID;
+ todir_desc.cd_cnid = kHFSRootFolderID;
+ todir_desc.cd_flags = CD_ISDIR;
+
+ to_desc.cd_nameptr = (const u_int8_t *)name;
+ to_desc.cd_namelen = strlen(name);
+ to_desc.cd_parentcnid = kHFSRootParentID;
+ to_desc.cd_cnid = cp->c_cnid;
+ to_desc.cd_flags = CD_ISDIR;
+
+ if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK)) == 0) {
+ if ((error = hfs_start_transaction(hfsmp)) == 0) {
+ if ((error = cat_preflight(hfsmp, CAT_RENAME, &cookie, p)) == 0) {
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
+
+ error = cat_rename(hfsmp, &cp->c_desc, &todir_desc, &to_desc, &new_desc);
+
+ /*
+ * If successful, update the name in the VCB, ensure it's terminated.
+ */
+ if (!error) {
+ strlcpy((char *)vcb->vcbVN, name, sizeof(vcb->vcbVN));
+ }
+
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ cat_postflight(hfsmp, &cookie, p);
+
+ if (error)
+ MarkVCBDirty(vcb);
+ (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
+ }
+ hfs_end_transaction(hfsmp);
+ }
+ if (!error) {
+ /* Release old allocated name buffer */
+ if (cp->c_desc.cd_flags & CD_HASBUF) {
+ const char *tmp_name = (const char *)cp->c_desc.cd_nameptr;
+
+ cp->c_desc.cd_nameptr = 0;
+ cp->c_desc.cd_namelen = 0;
+ cp->c_desc.cd_flags &= ~CD_HASBUF;
+ vfs_removename(tmp_name);
+ }
+ /* Update cnode's catalog descriptor */
+ replace_desc(cp, &new_desc);
+ vcb->volumeNameEncodingHint = new_desc.cd_encoding;
+ cp->c_touch_chgtime = TRUE;
+ }
+
+ hfs_unlock(cp);
+ }
+
+ return(error);
+}
+
+/*
+ * Get file system attributes.
+ */
+static int
+hfs_vfs_setattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
+{
+ kauth_cred_t cred = vfs_context_ucred(context);
+ int error = 0;
+
+ /*
+ * Must be superuser or owner of filesystem to change volume attributes
+ */
+ if (!kauth_cred_issuser(cred) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner))
+ return(EACCES);
+
+ if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
+ vnode_t root_vp;
+
+ error = hfs_vfs_root(mp, &root_vp, context);
+ if (error)
+ goto out;
+
+ error = hfs_rename_volume(root_vp, fsap->f_vol_name, vfs_context_proc(context));
+ (void) vnode_put(root_vp);
+ if (error)
+ goto out;
+
+ VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
+ }
+
+out:
+ return error;
+}
+
+/* If a runtime corruption is detected, set the volume inconsistent
+ * bit in the volume attributes. The volume inconsistent bit is a persistent
+ * bit which represents that the volume is corrupt and needs repair.
+ * The volume inconsistent bit can be set from the kernel when it detects
+ * runtime corruption or from file system repair utilities like fsck_hfs when
+ * a repair operation fails. The bit should be cleared only from file system
+ * verify/repair utility like fsck_hfs when a verify/repair succeeds.
+ */
+void hfs_mark_volume_inconsistent(struct hfsmount *hfsmp)
+{
+ HFS_MOUNT_LOCK(hfsmp, TRUE);
+ if ((hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) == 0) {
+ hfsmp->vcbAtrb |= kHFSVolumeInconsistentMask;
+ MarkVCBDirty(hfsmp);
+ }
+ /* Log information to ASL log */
+ fslog_fs_corrupt(hfsmp->hfs_mp);
+ printf("HFS: Runtime corruption detected on %s, fsck will be forced on next mount.\n", hfsmp->vcbVN);
+ HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+}
+
+/* Replay the journal on the device node provided. Returns zero if
+ * journal replay succeeded or no journal was supposed to be replayed.
+ */
+static int hfs_journal_replay(const char *devnode, vfs_context_t context)
+{
+ int retval = 0;
+ struct vnode *devvp = NULL;
+ struct mount *mp = NULL;
+ struct hfs_mount_args *args = NULL;
+
+ /* Lookup vnode for given raw device path */
+ retval = vnode_open(devnode, FREAD|FWRITE, 0, 0, &devvp, NULL);
+ if (retval) {
+ goto out;
+ }
+
+ /* Replay allowed only on raw devices */
+ if (!vnode_ischr(devvp)) {
+ retval = EINVAL;
+ goto out;
+ }
+
+ /* Create dummy mount structures */
+ MALLOC(mp, struct mount *, sizeof(struct mount), M_TEMP, M_WAITOK);
+ bzero(mp, sizeof(struct mount));
+ mount_lock_init(mp);
+
+ MALLOC(args, struct hfs_mount_args *, sizeof(struct hfs_mount_args), M_TEMP, M_WAITOK);
+ bzero(args, sizeof(struct hfs_mount_args));
+
+ retval = hfs_mountfs(devvp, mp, args, 1, context);
+ buf_flushdirtyblks(devvp, MNT_WAIT, 0, "hfs_journal_replay");
+
+out:
+ if (mp) {
+ mount_lock_destroy(mp);
+ FREE(mp, M_TEMP);
+ }
+ if (args) {
+ FREE(args, M_TEMP);
+ }
+ if (devvp) {
+ vnode_close(devvp, FREAD|FWRITE, NULL);
+ }
+ return retval;
+}