X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4a3eedf9ecc9bbe3f3a5c6ce5e53ad199d639d32..13f56ec4e58bf8687e2a68032c093c0213dd519b:/bsd/hfs/hfs_vfsops.c diff --git a/bsd/hfs/hfs_vfsops.c b/bsd/hfs/hfs_vfsops.c index c0dc7253d..4e5b76b14 100644 --- a/bsd/hfs/hfs_vfsops.c +++ b/bsd/hfs/hfs_vfsops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2008 Apple Inc. All rights reserved. + * Copyright (c) 1999-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -75,6 +75,7 @@ #include #include +#include #include #include #include @@ -86,6 +87,7 @@ #include #include #include +#include #include @@ -94,6 +96,9 @@ #include #include +#include +#include + #include "hfs.h" #include "hfs_catalog.h" #include "hfs_cnode.h" @@ -101,22 +106,40 @@ #include "hfs_endian.h" #include "hfs_hotfiles.h" #include "hfs_quota.h" +#include "hfs_btreeio.h" #include "hfscommon/headers/FileMgrInternal.h" #include "hfscommon/headers/BTreesInternal.h" +#if CONFIG_PROTECT +#include +#endif + +#if CONFIG_HFS_ALLOC_RBTREE +#include "hfscommon/headers/HybridAllocator.h" +#endif + +#define HFS_MOUNT_DEBUG 1 + #if HFS_DIAGNOSTIC int hfs_dbg_all = 0; int hfs_dbg_err = 0; #endif +/* Enable/disable debugging code for live volume resizing */ +int hfs_resize_debug = 0; lck_grp_attr_t * hfs_group_attr; lck_attr_t * hfs_lock_attr; lck_grp_t * hfs_mutex_group; lck_grp_t * hfs_rwlock_group; +lck_grp_t * hfs_spinlock_group; extern struct vnodeopv_desc hfs_vnodeop_opv_desc; +extern struct vnodeopv_desc hfs_std_vnodeop_opv_desc; + +/* not static so we can re-use in hfs_readwrite.c for build_path calls */ +int hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context); static int hfs_changefs(struct mount *mp, struct hfs_mount_args *args); static int hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, vfs_context_t context); @@ -124,31 +147,30 @@ static int hfs_flushfiles(struct mount *, int, struct proc *); static int hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush); static int hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp); static int hfs_init(struct vfsconf *vfsp); -static int hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context); -static int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int journal_replay_only, vfs_context_t context); -static int hfs_reload(struct mount *mp); static int hfs_vfs_root(struct mount *mp, struct vnode **vpp, vfs_context_t context); static int hfs_quotactl(struct mount *, int, uid_t, caddr_t, vfs_context_t context); static int hfs_start(struct mount *mp, int flags, vfs_context_t context); -static int hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, vfs_context_t context); -static int hfs_sync(struct mount *mp, int waitfor, vfs_context_t context); -static int hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, - user_addr_t newp, size_t newlen, vfs_context_t context); -static int hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context); -static int hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context); static int hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t context); - -static int hfs_reclaimspace(struct hfsmount *hfsmp, u_long startblk, u_long reclaimblks, vfs_context_t context); -static int hfs_overlapped_overflow_extents(struct hfsmount *hfsmp, u_int32_t startblk, - u_int32_t catblks, u_int32_t fileID, int rsrcfork); -static int hfs_journal_replay(const char *devnode, vfs_context_t context); - +static int hfs_file_extent_overlaps(struct hfsmount *hfsmp, u_int32_t allocLimit, struct HFSPlusCatalogFile *filerec); +static int hfs_journal_replay(vnode_t devvp, vfs_context_t context); +static int hfs_reclaimspace(struct hfsmount *hfsmp, u_int32_t allocLimit, u_int32_t reclaimblks, vfs_context_t context); + +void hfs_initialize_allocator (struct hfsmount *hfsmp); +int hfs_teardown_allocator (struct hfsmount *hfsmp); + +int hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context); +int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int journal_replay_only, vfs_context_t context); +int hfs_reload(struct mount *mp); +int hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, vfs_context_t context); +int hfs_sync(struct mount *mp, int waitfor, vfs_context_t context); +int hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, + user_addr_t newp, size_t newlen, vfs_context_t context); +int hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context); /* * Called by vfs_mountroot when mounting HFS Plus as root. */ -__private_extern__ int hfs_mountroot(mount_t mp, vnode_t rvp, vfs_context_t context) { @@ -157,10 +179,13 @@ hfs_mountroot(mount_t mp, vnode_t rvp, vfs_context_t context) struct vfsstatfs *vfsp; int error; - hfs_chashinit_finish(); - - if ((error = hfs_mountfs(rvp, mp, NULL, 0, context))) + if ((error = hfs_mountfs(rvp, mp, NULL, 0, context))) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountroot: hfs_mountfs returned %d, rvp (%p) name (%s) \n", + error, rvp, (rvp->v_name ? rvp->v_name : "unknown device")); + } return (error); + } /* Init hfsmp */ hfsmp = VFSTOHFS(mp); @@ -188,7 +213,7 @@ hfs_mountroot(mount_t mp, vnode_t rvp, vfs_context_t context) * mount system call */ -static int +int hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context) { struct proc *p = vfs_context_proc(context); @@ -198,6 +223,9 @@ hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t conte u_int32_t cmdflags; if ((retval = copyin(data, (caddr_t)&args, sizeof(args)))) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mount: copyin returned %d for fs\n", retval); + } return (retval); } cmdflags = (u_int32_t)vfs_flags(mp) & MNT_CMDFLAGS; @@ -206,10 +234,19 @@ hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t conte /* Reload incore data after an fsck. */ if (cmdflags & MNT_RELOAD) { - if (vfs_isrdonly(mp)) - return hfs_reload(mp); - else + if (vfs_isrdonly(mp)) { + int error = hfs_reload(mp); + if (error && HFS_MOUNT_DEBUG) { + printf("hfs_mount: hfs_reload returned %d on %s \n", error, hfsmp->vcbVN); + } + return error; + } + else { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mount: MNT_RELOAD not supported on rdwr filesystem %s\n", hfsmp->vcbVN); + } return (EINVAL); + } } /* Change to a read-only file system. */ @@ -217,19 +254,43 @@ hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t conte vfs_isrdonly(mp)) { int flags; + /* Set flag to indicate that a downgrade to read-only + * is in progress and therefore block any further + * modifications to the file system. + */ + hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK); + hfsmp->hfs_flags |= HFS_RDONLY_DOWNGRADE; + hfsmp->hfs_downgrading_proc = current_thread(); + hfs_unlock_global (hfsmp); + /* use VFS_SYNC to push out System (btree) files */ retval = VFS_SYNC(mp, MNT_WAIT, context); - if (retval && ((cmdflags & MNT_FORCE) == 0)) + if (retval && ((cmdflags & MNT_FORCE) == 0)) { + hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE; + hfsmp->hfs_downgrading_proc = NULL; + if (HFS_MOUNT_DEBUG) { + printf("hfs_mount: VFS_SYNC returned %d during b-tree sync of %s \n", retval, hfsmp->vcbVN); + } goto out; + } flags = WRITECLOSE; if (cmdflags & MNT_FORCE) flags |= FORCECLOSE; - if ((retval = hfs_flushfiles(mp, flags, p))) + if ((retval = hfs_flushfiles(mp, flags, p))) { + hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE; + hfsmp->hfs_downgrading_proc = NULL; + if (HFS_MOUNT_DEBUG) { + printf("hfs_mount: hfs_flushfiles returned %d on %s \n", retval, hfsmp->vcbVN); + } goto out; - hfsmp->hfs_flags |= HFS_READ_ONLY; + } + + /* mark the volume cleanly unmounted */ + hfsmp->vcbAtrb |= kHFSVolumeUnmountedMask; retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); + hfsmp->hfs_flags |= HFS_READ_ONLY; /* also get the volume bitmap blocks */ if (!retval) { @@ -242,11 +303,16 @@ hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t conte } } if (retval) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mount: FSYNC on devvp returned %d for fs %s\n", retval, hfsmp->vcbVN); + } + hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE; + hfsmp->hfs_downgrading_proc = NULL; hfsmp->hfs_flags &= ~HFS_READ_ONLY; goto out; } if (hfsmp->jnl) { - hfs_global_exclusive_lock_acquire(hfsmp); + hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK); journal_close(hfsmp->jnl); hfsmp->jnl = NULL; @@ -255,12 +321,20 @@ hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t conte // access to the jvp because we may need // it later if we go back to being read-write. - hfs_global_exclusive_lock_release(hfsmp); + hfs_unlock_global (hfsmp); } + +#if CONFIG_HFS_ALLOC_RBTREE + (void) hfs_teardown_allocator(hfsmp); +#endif + hfsmp->hfs_downgrading_proc = NULL; } /* Change to a writable file system. */ if (vfs_iswriteupgrade(mp)) { +#if CONFIG_HFS_ALLOC_RBTREE + thread_t allocator_thread; +#endif /* * On inconsistent disks, do not allow read-write mount @@ -268,15 +342,13 @@ hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t conte */ if (!(vfs_flags(mp) & MNT_ROOTFS) && (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mount: attempting to mount inconsistent non-root volume %s\n", (hfsmp->vcbVN)); + } retval = EINVAL; goto out; } - - retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); - if (retval != E_NONE) - goto out; - // If the journal was shut-down previously because we were // asked to be read-only, let's start it back up again now @@ -287,36 +359,76 @@ hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t conte if (hfsmp->hfs_flags & HFS_NEED_JNL_RESET) { jflags = JOURNAL_RESET; - } else { + } else { jflags = 0; - } - - hfs_global_exclusive_lock_acquire(hfsmp); - - hfsmp->jnl = journal_open(hfsmp->jvp, - (hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset, - hfsmp->jnl_size, - hfsmp->hfs_devvp, - hfsmp->hfs_phys_block_size, - jflags, - 0, - hfs_sync_metadata, hfsmp->hfs_mp); - - hfs_global_exclusive_lock_release(hfsmp); - - if (hfsmp->jnl == NULL) { - retval = EINVAL; - goto out; - } else { - hfsmp->hfs_flags &= ~HFS_NEED_JNL_RESET; - } + } + + hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK); + + hfsmp->jnl = journal_open(hfsmp->jvp, + (hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset, + hfsmp->jnl_size, + hfsmp->hfs_devvp, + hfsmp->hfs_logical_block_size, + jflags, + 0, + hfs_sync_metadata, hfsmp->hfs_mp); + + /* + * Set up the trim callback function so that we can add + * recently freed extents to the free extent cache once + * the transaction that freed them is written to the + * journal on disk. + */ + if (hfsmp->jnl) + journal_trim_set_callback(hfsmp->jnl, hfs_trim_callback, hfsmp); + + hfs_unlock_global (hfsmp); + + if (hfsmp->jnl == NULL) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mount: journal_open == NULL; couldn't be opened on %s \n", (hfsmp->vcbVN)); + } + retval = EINVAL; + goto out; + } else { + hfsmp->hfs_flags &= ~HFS_NEED_JNL_RESET; + } + + } + /* See if we need to erase unused Catalog nodes due to . */ + retval = hfs_erase_unused_nodes(hfsmp); + if (retval != E_NONE) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mount: hfs_erase_unused_nodes returned %d for fs %s\n", retval, hfsmp->vcbVN); + } + goto out; } - /* Only clear HFS_READ_ONLY after a successfull write */ + /* If this mount point was downgraded from read-write + * to read-only, clear that information as we are now + * moving back to read-write. + */ + hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE; + hfsmp->hfs_downgrading_proc = NULL; + + /* mark the volume dirty (clear clean unmount bit) */ + hfsmp->vcbAtrb &= ~kHFSVolumeUnmountedMask; + + retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); + if (retval != E_NONE) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mount: hfs_flushvolumeheader returned %d for fs %s\n", retval, hfsmp->vcbVN); + } + goto out; + } + + /* Only clear HFS_READ_ONLY after a successful write */ hfsmp->hfs_flags &= ~HFS_READ_ONLY; - if (!(hfsmp->hfs_flags & (HFS_READ_ONLY & HFS_STANDARD))) { + + if (!(hfsmp->hfs_flags & (HFS_READ_ONLY | HFS_STANDARD))) { /* Setup private/hidden directories for hardlinks. */ hfs_privatedir_init(hfsmp, FILE_HARDLINKS); hfs_privatedir_init(hfsmp, DIR_HARDLINKS); @@ -326,7 +438,8 @@ hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t conte /* * Allow hot file clustering if conditions allow. */ - if (hfsmp->hfs_flags & HFS_METADATA_ZONE) { + if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) && + ((hfsmp->hfs_flags & HFS_SSD) == 0)) { (void) hfs_recording_init(hfsmp); } /* Force ACLs on HFS+ file systems. */ @@ -334,19 +447,90 @@ hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t conte vfs_setextendedsecurity(HFSTOVFS(hfsmp)); } } + +#if CONFIG_HFS_ALLOC_RBTREE + /* + * Like the normal mount case, we need to handle creation of the allocation red-black tree + * if we're upgrading from read-only to read-write. + * + * We spawn a thread to create the pair of red-black trees for this volume. + * However, in so doing, we must be careful to ensure that if this thread is still + * running after mount has finished, it doesn't interfere with an unmount. Specifically, + * we'll need to set a bit that indicates we're in progress building the trees here. + * Unmount will check for this bit, and then if it's set, mark a corresponding bit that + * notifies the tree generation code that an unmount is waiting. Also, mark the extent + * tree flags that the allocator is enabled for use before we spawn the thread that will start + * scanning the RB tree. + * + * Only do this if we're operating on a read-write mount (we wouldn't care for read-only), + * which has not previously encountered a bad error on the red-black tree code. Also, don't + * try to re-build a tree that already exists. + */ + + if (hfsmp->extent_tree_flags == 0) { + hfsmp->extent_tree_flags |= (HFS_ALLOC_TREEBUILD_INFLIGHT | HFS_ALLOC_RB_ENABLED); + /* Initialize EOF counter so that the thread can assume it started at initial values */ + hfsmp->offset_block_end = 0; + + InitTree(hfsmp); + + kernel_thread_start ((thread_continue_t) hfs_initialize_allocator , hfsmp, &allocator_thread); + thread_deallocate(allocator_thread); + } + +#endif } /* Update file system parameters. */ retval = hfs_changefs(mp, &args); + if (retval && HFS_MOUNT_DEBUG) { + printf("hfs_mount: hfs_changefs returned %d for %s\n", retval, hfsmp->vcbVN); + } } else /* not an update request */ { /* Set the mount flag to indicate that we support volfs */ vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_DOVOLFS)); - hfs_chashinit_finish(); - retval = hfs_mountfs(devvp, mp, &args, 0, context); + if (retval && HFS_MOUNT_DEBUG) { + printf("hfs_mount: hfs_mountfs returned %d\n", retval); + } +#if CONFIG_PROTECT + /* + * If above mount call was successful, and this mount is content protection + * enabled, then verify the on-disk EA on the root to ensure that the filesystem + * is of a suitable vintage to allow the mount to proceed. + */ + if ((retval == 0) && (cp_fs_protected (mp))) { + int err = 0; + struct cp_root_xattr xattr; + bzero (&xattr, sizeof(struct cp_root_xattr)); + hfsmp = vfs_fsprivate(mp); + + /* go get the EA to get the version information */ + err = cp_getrootxattr (hfsmp, &xattr); + /* If there was no EA there, then write one out. */ + if (err == ENOATTR) { + bzero(&xattr, sizeof(struct cp_root_xattr)); + xattr.major_version = CP_CURRENT_MAJOR_VERS; + xattr.minor_version = CP_CURRENT_MINOR_VERS; + xattr.flags = 0; + + err = cp_setrootxattr (hfsmp, &xattr); + } + /* + * For any other error, including having an out of date CP version in the + * EA, or for an error out of cp_setrootxattr, deny the mount + * and do not proceed further. + */ + if (err || xattr.major_version != CP_CURRENT_MAJOR_VERS) { + /* Deny the mount and tear down. */ + retval = EPERM; + (void) hfs_unmount (mp, MNT_FORCE, context); + } + } +#endif } out: if (retval == 0) { @@ -371,13 +555,18 @@ hfs_changefs_callback(struct vnode *vp, void *cargs) struct cat_desc cndesc; struct cat_attr cnattr; struct hfs_changefs_cargs *args; + int lockflags; + int error; args = (struct hfs_changefs_cargs *)cargs; cp = VTOC(vp); vcb = HFSTOVCB(args->hfsmp); - if (cat_lookup(args->hfsmp, &cp->c_desc, 0, &cndesc, &cnattr, NULL, NULL)) { + lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + error = cat_lookup(args->hfsmp, &cp->c_desc, 0, &cndesc, &cnattr, NULL, NULL); + hfs_systemfile_unlock(args->hfsmp, lockflags); + if (error) { /* * If we couldn't find this guy skip to the next one */ @@ -423,7 +612,7 @@ hfs_changefs(struct mount *mp, struct hfs_mount_args *args) ExtendedVCB *vcb; hfs_to_unicode_func_t get_unicode_func; unicode_to_hfs_func_t get_hfsname_func; - u_long old_encoding = 0; + u_int32_t old_encoding = 0; struct hfs_changefs_cargs cargs; u_int32_t mount_flags; @@ -431,6 +620,8 @@ hfs_changefs(struct mount *mp, struct hfs_mount_args *args) vcb = HFSTOVCB(hfsmp); mount_flags = (unsigned int)vfs_flags(mp); + hfsmp->hfs_flags |= HFS_IN_CHANGEFS; + permswitch = (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) && ((mount_flags & MNT_UNKNOWNPERMISSIONS) == 0)) || (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) == 0) && @@ -439,7 +630,8 @@ hfs_changefs(struct mount *mp, struct hfs_mount_args *args) /* The root filesystem must operate with actual permissions: */ if (permswitch && (mount_flags & MNT_ROOTFS) && (mount_flags & MNT_UNKNOWNPERMISSIONS)) { vfs_clearflags(mp, (u_int64_t)((unsigned int)MNT_UNKNOWNPERMISSIONS)); /* Just say "No". */ - return EINVAL; + retval = EINVAL; + goto exit; } if (mount_flags & MNT_UNKNOWNPERMISSIONS) hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS; @@ -485,7 +677,7 @@ hfs_changefs(struct mount *mp, struct hfs_mount_args *args) /* Change the hfs encoding value (hfs only) */ if ((vcb->vcbSigWord == kHFSSigWord) && - (args->hfs_encoding != (u_long)VNOVAL) && + (args->hfs_encoding != (u_int32_t)VNOVAL) && (hfsmp->hfs_encoding != args->hfs_encoding)) { retval = hfs_getconverter(args->hfs_encoding, &get_unicode_func, &get_hfsname_func); @@ -525,8 +717,9 @@ hfs_changefs(struct mount *mp, struct hfs_mount_args *args) * * hfs_changefs_callback will be called for each vnode * hung off of this mount point - * the vnode will be - * properly referenced and unreferenced around the callback + * + * The vnode will be properly referenced and unreferenced + * around the callback */ cargs.hfsmp = hfsmp; cargs.namefix = namefix; @@ -546,6 +739,7 @@ hfs_changefs(struct mount *mp, struct hfs_mount_args *args) (void) hfs_relconverter(old_encoding); } exit: + hfsmp->hfs_flags &= ~HFS_IN_CHANGEFS; return (retval); } @@ -560,6 +754,7 @@ hfs_reload_callback(struct vnode *vp, void *cargs) { struct cnode *cp; struct hfs_reload_cargs *args; + int lockflags; args = (struct hfs_reload_cargs *)cargs; /* @@ -577,15 +772,19 @@ hfs_reload_callback(struct vnode *vp, void *cargs) /* * Re-read cnode data for all active vnodes (non-metadata files). */ - if (!vnode_issystem(vp) && !VNODE_IS_RSRC(vp)) { + if (!vnode_issystem(vp) && !VNODE_IS_RSRC(vp) && (cp->c_fileid >= kHFSFirstUserCatalogNodeID)) { struct cat_fork *datafork; struct cat_desc desc; datafork = cp->c_datafork ? &cp->c_datafork->ff_data : NULL; /* lookup by fileID since name could have changed */ - if ((args->error = cat_idlookup(args->hfsmp, cp->c_fileid, 0, &desc, &cp->c_attr, datafork))) + lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + args->error = cat_idlookup(args->hfsmp, cp->c_fileid, 0, &desc, &cp->c_attr, datafork); + hfs_systemfile_unlock(args->hfsmp, lockflags); + if (args->error) { return (VNODE_RETURNED_DONE); + } /* update cnode's catalog descriptor */ (void) replace_desc(cp, &desc); @@ -607,12 +806,11 @@ hfs_reload_callback(struct vnode *vp, void *cargs) * re-load B-tree header data. * re-read cnode data for all active vnodes. */ -static int +int hfs_reload(struct mount *mountp) { register struct vnode *devvp; struct buf *bp; - int sectorsize; int error, i; struct hfsmount *hfsmp; struct HFSPlusVolumeHeader *vhp; @@ -620,6 +818,7 @@ hfs_reload(struct mount *mountp) struct filefork *forkp; struct cat_desc cndesc; struct hfs_reload_cargs args; + daddr64_t priIDSector; hfsmp = VFSTOHFS(mountp); vcb = HFSTOVCB(hfsmp); @@ -651,18 +850,19 @@ hfs_reload(struct mount *mountp) /* * Re-read VolumeHeader from disk. */ - sectorsize = hfsmp->hfs_phys_block_size; + priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + + HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size)); error = (int)buf_meta_bread(hfsmp->hfs_devvp, - (daddr64_t)((vcb->hfsPlusIOPosOffset / sectorsize) + HFS_PRI_SECTOR(sectorsize)), - sectorsize, NOCRED, &bp); + HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, NOCRED, &bp); if (error) { if (bp != NULL) buf_brelse(bp); return (error); } - vhp = (HFSPlusVolumeHeader *) (buf_dataptr(bp) + HFS_PRI_OFFSET(sectorsize)); + vhp = (HFSPlusVolumeHeader *) (buf_dataptr(bp) + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size)); /* Do a quick sanity check */ if ((SWAP_BE16(vhp->signature) != kHFSPlusSigWord && @@ -778,19 +978,252 @@ hfs_reload(struct mount *mountp) } + +static void +hfs_syncer(void *arg0, void *unused) +{ +#pragma unused(unused) + + struct hfsmount *hfsmp = arg0; + clock_sec_t secs; + clock_usec_t usecs; + uint32_t delay = HFS_META_DELAY; + uint64_t now; + static int no_max=1; + + clock_get_calendar_microtime(&secs, &usecs); + now = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs; + + // + // If the amount of pending writes is more than our limit, wait + // for 2/3 of it to drain and then flush the journal. + // + if (hfsmp->hfs_mp->mnt_pending_write_size > hfsmp->hfs_max_pending_io) { + int counter=0; + uint64_t pending_io, start, rate = 0; + + no_max = 0; + + hfs_start_transaction(hfsmp); // so we hold off any new i/o's + + pending_io = hfsmp->hfs_mp->mnt_pending_write_size; + + clock_get_calendar_microtime(&secs, &usecs); + start = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs; + + while(hfsmp->hfs_mp->mnt_pending_write_size > (pending_io/3) && counter++ < 500) { + tsleep((caddr_t)hfsmp, PRIBIO, "hfs-wait-for-io-to-drain", 10); + } + + if (counter >= 500) { + printf("hfs: timed out waiting for io to drain (%lld)\n", (int64_t)hfsmp->hfs_mp->mnt_pending_write_size); + } + + if (hfsmp->jnl) { + journal_flush(hfsmp->jnl, FALSE); + } else { + hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_kernel()); + } + + clock_get_calendar_microtime(&secs, &usecs); + now = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs; + hfsmp->hfs_last_sync_time = now; + if (now != start) { + rate = ((pending_io * 1000000ULL) / (now - start)); // yields bytes per second + } + + hfs_end_transaction(hfsmp); + + // + // If a reasonable amount of time elapsed then check the + // i/o rate. If it's taking less than 1 second or more + // than 2 seconds, adjust hfs_max_pending_io so that we + // will allow about 1.5 seconds of i/o to queue up. + // + if (((now - start) >= 300000) && (rate != 0)) { + uint64_t scale = (pending_io * 100) / rate; + + if (scale < 100 || scale > 200) { + // set it so that it should take about 1.5 seconds to drain + hfsmp->hfs_max_pending_io = (rate * 150ULL) / 100ULL; + } + } + + } else if ( ((now - hfsmp->hfs_last_sync_time) >= 5000000ULL) + || (((now - hfsmp->hfs_last_sync_time) >= 100000LL) + && ((now - hfsmp->hfs_last_sync_request_time) >= 100000LL) + && (hfsmp->hfs_active_threads == 0) + && (hfsmp->hfs_global_lock_nesting == 0))) { + + // + // Flush the journal if more than 5 seconds elapsed since + // the last sync OR we have not sync'ed recently and the + // last sync request time was more than 100 milliseconds + // ago and no one is in the middle of a transaction right + // now. Else we defer the sync and reschedule it. + // + if (hfsmp->jnl) { + hfs_lock_global (hfsmp, HFS_SHARED_LOCK); + + journal_flush(hfsmp->jnl, FALSE); + + hfs_unlock_global (hfsmp); + } else { + hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_kernel()); + } + + clock_get_calendar_microtime(&secs, &usecs); + now = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs; + hfsmp->hfs_last_sync_time = now; + + } else if (hfsmp->hfs_active_threads == 0) { + uint64_t deadline; + + clock_interval_to_deadline(delay, HFS_MILLISEC_SCALE, &deadline); + thread_call_enter_delayed(hfsmp->hfs_syncer, deadline); + + // note: we intentionally return early here and do not + // decrement the sync_scheduled and sync_incomplete + // variables because we rescheduled the timer. + + return; + } + + // + // NOTE: we decrement these *after* we're done the journal_flush() since + // it can take a significant amount of time and so we don't want more + // callbacks scheduled until we're done this one. + // + OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_scheduled); + OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_incomplete); + wakeup((caddr_t)&hfsmp->hfs_sync_incomplete); +} + + +extern int IOBSDIsMediaEjectable( const char *cdev_name ); + +/* + * Initialization code for Red-Black Tree Allocator + * + * This function will build the two red-black trees necessary for allocating space + * from the metadata zone as well as normal allocations. Currently, we use + * an advisory read to get most of the data into the buffer cache. + * This function is intended to be run in a separate thread so as not to slow down mount. + * + */ + +void +hfs_initialize_allocator (struct hfsmount *hfsmp) { + +#if CONFIG_HFS_ALLOC_RBTREE + u_int32_t err; + + /* + * Take the allocation file lock. Journal transactions will block until + * we're done here. + */ + int flags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + + /* + * GenerateTree assumes that the bitmap lock is held when you call the function. + * It will drop and re-acquire the lock periodically as needed to let other allocations + * through. It returns with the bitmap lock held. Since we only maintain one tree, + * we don't need to specify a start block (always starts at 0). + */ + err = GenerateTree(hfsmp, hfsmp->totalBlocks, &flags, 1); + if (err) { + goto bailout; + } + /* Mark offset tree as built */ + hfsmp->extent_tree_flags |= HFS_ALLOC_RB_ACTIVE; + +bailout: + /* + * GenerateTree may drop the bitmap lock during operation in order to give other + * threads a chance to allocate blocks, but it will always return with the lock held, so + * we don't need to re-grab the lock in order to update the TREEBUILD_INFLIGHT bit. + */ + hfsmp->extent_tree_flags &= ~HFS_ALLOC_TREEBUILD_INFLIGHT; + if (err != 0) { + /* Wakeup any waiters on the allocation bitmap lock */ + wakeup((caddr_t)&hfsmp->extent_tree_flags); + } + + hfs_systemfile_unlock(hfsmp, flags); +#else +#pragma unused (hfsmp) +#endif +} + + +/* + * Teardown code for the Red-Black Tree allocator. + * This function consolidates the code which serializes with respect + * to a thread that may be potentially still building the tree when we need to begin + * tearing it down. Since the red-black tree may not be live when we enter this function + * we return: + * 1 -> Tree was live. + * 0 -> Tree was not active at time of call. + */ + +int +hfs_teardown_allocator (struct hfsmount *hfsmp) { + int rb_used = 0; + +#if CONFIG_HFS_ALLOC_RBTREE + + int flags = 0; + + /* + * Check to see if the tree-generation is still on-going. + * If it is, then block until it's done. + */ + + flags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + + + while (hfsmp->extent_tree_flags & HFS_ALLOC_TREEBUILD_INFLIGHT) { + hfsmp->extent_tree_flags |= HFS_ALLOC_TEARDOWN_INFLIGHT; + + lck_rw_sleep(&(VTOC(hfsmp->hfs_allocation_vp))->c_rwlock, LCK_SLEEP_EXCLUSIVE, + &hfsmp->extent_tree_flags, THREAD_UNINT); + } + + if (hfs_isrbtree_active (hfsmp)) { + rb_used = 1; + + /* Tear down the RB Trees while we have the bitmap locked */ + DestroyTrees(hfsmp); + + } + + hfs_systemfile_unlock(hfsmp, flags); +#else + #pragma unused (hfsmp) +#endif + return rb_used; + +} + + +static int hfs_root_unmounted_cleanly = 0; + +SYSCTL_DECL(_vfs_generic); +SYSCTL_INT(_vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &hfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly"); + /* * Common code for mount and mountroot */ -static int +int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int journal_replay_only, vfs_context_t context) { struct proc *p = vfs_context_proc(context); int retval = E_NONE; - struct hfsmount *hfsmp; + struct hfsmount *hfsmp = NULL; struct buf *bp; dev_t dev; - HFSMasterDirectoryBlock *mdbp; + HFSMasterDirectoryBlock *mdbp = NULL; int ronly; #if QUOTA int i; @@ -798,12 +1231,23 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int mntwrapper; kauth_cred_t cred; u_int64_t disksize; - daddr64_t blkcnt; - u_int32_t blksize; + daddr64_t log_blkcnt; + u_int32_t log_blksize; + u_int32_t phys_blksize; u_int32_t minblksize; u_int32_t iswritable; daddr64_t mdb_offset; int isvirtual = 0; + int isroot = 0; + int isssd; +#if CONFIG_HFS_ALLOC_RBTREE + thread_t allocator_thread; +#endif + + if (args == NULL) { + /* only hfs_mountroot passes us NULL as the 'args' argument */ + isroot = 1; + } ronly = vfs_isrdonly(mp); dev = vnode_specrdev(devvp); @@ -818,49 +1262,106 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, /* Advisory locking should be handled at the VFS layer */ vfs_setlocklocal(mp); - /* Get the real physical block size. */ - if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, context)) { + /* Get the logical block size (treated as physical block size everywhere) */ + if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&log_blksize, 0, context)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: DKIOCGETBLOCKSIZE failed\n"); + } + retval = ENXIO; + goto error_exit; + } + if (log_blksize == 0 || log_blksize > 1024*1024*1024) { + printf("hfs: logical block size 0x%x looks bad. Not mounting.\n", log_blksize); + retval = ENXIO; + goto error_exit; + } + + /* Get the physical block size. */ + retval = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_blksize, 0, context); + if (retval) { + if ((retval != ENOTSUP) && (retval != ENOTTY)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: DKIOCGETPHYSICALBLOCKSIZE failed\n"); + } + retval = ENXIO; + goto error_exit; + } + /* If device does not support this ioctl, assume that physical + * block size is same as logical block size + */ + phys_blksize = log_blksize; + } + if (phys_blksize == 0 || phys_blksize > 1024*1024*1024) { + printf("hfs: physical block size 0x%x looks bad. Not mounting.\n", phys_blksize); retval = ENXIO; goto error_exit; } + /* Switch to 512 byte sectors (temporarily) */ - if (blksize > 512) { + if (log_blksize > 512) { u_int32_t size512 = 512; if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, context)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: DKIOCSETBLOCKSIZE failed \n"); + } retval = ENXIO; goto error_exit; } } /* Get the number of 512 byte physical blocks. */ - if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, context)) { + if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) { /* resetting block size may fail if getting block count did */ - (void)VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, context); - + (void)VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context); + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: DKIOCGETBLOCKCOUNT failed\n"); + } retval = ENXIO; goto error_exit; } /* Compute an accurate disk size (i.e. within 512 bytes) */ - disksize = (u_int64_t)blkcnt * (u_int64_t)512; + disksize = (u_int64_t)log_blkcnt * (u_int64_t)512; /* * On Tiger it is not necessary to switch the device * block size to be 4k if there are more than 31-bits * worth of blocks but to insure compatibility with * pre-Tiger systems we have to do it. + * + * If the device size is not a multiple of 4K (8 * 512), then + * switching the logical block size isn't going to help because + * we will be unable to write the alternate volume header. + * In this case, just leave the logical block size unchanged. */ - if (blkcnt > 0x000000007fffffff) { - minblksize = blksize = 4096; + if (log_blkcnt > 0x000000007fffffff && (log_blkcnt & 7) == 0) { + minblksize = log_blksize = 4096; + if (phys_blksize < log_blksize) + phys_blksize = log_blksize; } + /* + * The cluster layer is not currently prepared to deal with a logical + * block size larger than the system's page size. (It can handle + * blocks per page, but not multiple pages per block.) So limit the + * logical block size to the page size. + */ + if (log_blksize > PAGE_SIZE) + log_blksize = PAGE_SIZE; + /* Now switch to our preferred physical block size. */ - if (blksize > 512) { - if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, context)) { + if (log_blksize > 512) { + if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: DKIOCSETBLOCKSIZE (2) failed\n"); + } retval = ENXIO; goto error_exit; } /* Get the count of physical blocks. */ - if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, context)) { + if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (2) failed\n"); + } retval = ENXIO; goto error_exit; } @@ -868,22 +1369,54 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, /* * At this point: * minblksize is the minimum physical block size - * blksize has our preferred physical block size - * blkcnt has the total number of physical blocks + * log_blksize has our preferred physical block size + * log_blkcnt has the total number of physical blocks */ - mdb_offset = (daddr64_t)HFS_PRI_SECTOR(blksize); - if ((retval = (int)buf_meta_bread(devvp, mdb_offset, blksize, cred, &bp))) { + mdb_offset = (daddr64_t)HFS_PRI_SECTOR(log_blksize); + if ((retval = (int)buf_meta_bread(devvp, + HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (phys_blksize/log_blksize)), + phys_blksize, cred, &bp))) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: buf_meta_bread failed with %d\n", retval); + } goto error_exit; } MALLOC(mdbp, HFSMasterDirectoryBlock *, kMDBSize, M_TEMP, M_WAITOK); - bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(blksize), mdbp, kMDBSize); + if (mdbp == NULL) { + retval = ENOMEM; + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: MALLOC failed\n"); + } + goto error_exit; + } + bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, kMDBSize); buf_brelse(bp); bp = NULL; MALLOC(hfsmp, struct hfsmount *, sizeof(struct hfsmount), M_HFSMNT, M_WAITOK); + if (hfsmp == NULL) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: MALLOC (2) failed\n"); + } + retval = ENOMEM; + goto error_exit; + } bzero(hfsmp, sizeof(struct hfsmount)); + hfs_chashinit_finish(hfsmp); + + /* + * See if the disk is a solid state device. We need this to decide what to do about + * hotfiles. + */ + if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, context) == 0) { + if (isssd) { + hfsmp->hfs_flags |= HFS_SSD; + } + } + + /* * Init the volume information structure */ @@ -892,14 +1425,17 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, lck_mtx_init(&hfsmp->hfc_mutex, hfs_mutex_group, hfs_lock_attr); lck_rw_init(&hfsmp->hfs_global_lock, hfs_rwlock_group, hfs_lock_attr); lck_rw_init(&hfsmp->hfs_insync, hfs_rwlock_group, hfs_lock_attr); - + lck_spin_init(&hfsmp->vcbFreeExtLock, hfs_spinlock_group, hfs_lock_attr); + vfs_setfsprivate(mp, hfsmp); hfsmp->hfs_mp = mp; /* Make VFSTOHFS work */ hfsmp->hfs_raw_dev = vnode_specrdev(devvp); hfsmp->hfs_devvp = devvp; vnode_ref(devvp); /* Hold a ref on the device, dropped when hfsmp is freed. */ - hfsmp->hfs_phys_block_size = blksize; - hfsmp->hfs_phys_block_count = blkcnt; + hfsmp->hfs_logical_block_size = log_blksize; + hfsmp->hfs_logical_block_count = log_blkcnt; + hfsmp->hfs_physical_block_size = phys_blksize; + hfsmp->hfs_log_per_phys = (phys_blksize / log_blksize); hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA; if (ronly) hfsmp->hfs_flags |= HFS_READ_ONLY; @@ -958,7 +1494,19 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, if ((SWAP_BE16(mdbp->drSigWord) == kHFSSigWord) && (mntwrapper || (SWAP_BE16(mdbp->drEmbedSigWord) != kHFSPlusSigWord))) { - /* If only journal replay is requested, exit immediately */ + /* On 10.6 and beyond, non read-only mounts for HFS standard vols get rejected */ + if (vfs_isrdwr(mp)) { + retval = EROFS; + goto error_exit; + } + + printf("hfs_mountfs: Mounting HFS Standard volumes was deprecated in Mac OS 10.7 \n"); + + /* Treat it as if it's read-only and not writeable */ + hfsmp->hfs_flags |= HFS_READ_ONLY; + hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA; + + /* If only journal replay is requested, exit immediately */ if (journal_replay_only) { retval = 0; goto error_exit; @@ -969,18 +1517,20 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, goto error_exit; } /* HFS disks can only use 512 byte physical blocks */ - if (blksize > kHFSBlockSize) { - blksize = kHFSBlockSize; - if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, context)) { + if (log_blksize > kHFSBlockSize) { + log_blksize = kHFSBlockSize; + if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) { retval = ENXIO; goto error_exit; } - if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, context)) { + if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) { retval = ENXIO; goto error_exit; } - hfsmp->hfs_phys_block_size = blksize; - hfsmp->hfs_phys_block_count = blkcnt; + hfsmp->hfs_logical_block_size = log_blksize; + hfsmp->hfs_logical_block_count = log_blkcnt; + hfsmp->hfs_physical_block_size = log_blksize; + hfsmp->hfs_log_per_phys = 1; } if (args) { hfsmp->hfs_encoding = args->hfs_encoding; @@ -1016,37 +1566,54 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, * block size so everything will line up on a block * boundary. */ - if ((embeddedOffset % blksize) != 0) { - printf("HFS Mount: embedded volume offset not" + if ((embeddedOffset % log_blksize) != 0) { + printf("hfs_mountfs: embedded volume offset not" " a multiple of physical block size (%d);" - " switching to 512\n", blksize); - blksize = 512; + " switching to 512\n", log_blksize); + log_blksize = 512; if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, - (caddr_t)&blksize, FWRITE, context)) { + (caddr_t)&log_blksize, FWRITE, context)) { + + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: DKIOCSETBLOCKSIZE (3) failed\n"); + } retval = ENXIO; goto error_exit; } if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, - (caddr_t)&blkcnt, 0, context)) { + (caddr_t)&log_blkcnt, 0, context)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (3) failed\n"); + } retval = ENXIO; goto error_exit; } /* Note: relative block count adjustment */ - hfsmp->hfs_phys_block_count *= - hfsmp->hfs_phys_block_size / blksize; - hfsmp->hfs_phys_block_size = blksize; + hfsmp->hfs_logical_block_count *= + hfsmp->hfs_logical_block_size / log_blksize; + + /* Update logical /physical block size */ + hfsmp->hfs_logical_block_size = log_blksize; + hfsmp->hfs_physical_block_size = log_blksize; + phys_blksize = log_blksize; + hfsmp->hfs_log_per_phys = 1; } disksize = (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.blockCount) * (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz); - hfsmp->hfs_phys_block_count = disksize / blksize; + hfsmp->hfs_logical_block_count = disksize / log_blksize; - mdb_offset = (daddr64_t)((embeddedOffset / blksize) + HFS_PRI_SECTOR(blksize)); - retval = (int)buf_meta_bread(devvp, mdb_offset, blksize, cred, &bp); - if (retval) + mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize)); + retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), + phys_blksize, cred, &bp); + if (retval) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: buf_meta_bread (2) failed with %d\n", retval); + } goto error_exit; - bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(blksize), mdbp, 512); + } + bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, 512); buf_brelse(bp); bp = NULL; vhp = (HFSPlusVolumeHeader*) mdbp; @@ -1056,13 +1623,26 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, vhp = (HFSPlusVolumeHeader*) mdbp; } + if (isroot) { + hfs_root_unmounted_cleanly = (SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) != 0; + } + /* * On inconsistent disks, do not allow read-write mount - * unless it is the boot volume being mounted. + * unless it is the boot volume being mounted. We also + * always want to replay the journal if the journal_replay_only + * flag is set because that will (most likely) get the + * disk into a consistent state before fsck_hfs starts + * looking at it. */ - if (!(vfs_flags(mp) & MNT_ROOTFS) && - (SWAP_BE32(vhp->attributes) & kHFSVolumeInconsistentMask) && - !(hfsmp->hfs_flags & HFS_READ_ONLY)) { + if ( !(vfs_flags(mp) & MNT_ROOTFS) + && (SWAP_BE32(vhp->attributes) & kHFSVolumeInconsistentMask) + && !journal_replay_only + && !(hfsmp->hfs_flags & HFS_READ_ONLY)) { + + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: failed to mount non-root inconsistent disk\n"); + } retval = EINVAL; goto error_exit; } @@ -1093,25 +1673,42 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, // if we're able to init the journal, mark the mount // point as journaled. // - if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) { + if ((retval = hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred)) == 0) { vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED)); } else { + if (retval == EROFS) { + // EROFS is a special error code that means the volume has an external + // journal which we couldn't find. in that case we do not want to + // rewrite the volume header - we'll just refuse to mount the volume. + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: hfs_early_journal_init indicated external jnl \n"); + } + retval = EINVAL; + goto error_exit; + } + // if the journal failed to open, then set the lastMountedVersion // to be "FSK!" which fsck_hfs will see and force the fsck instead // of just bailing out because the volume is journaled. if (!ronly) { - HFSPlusVolumeHeader *jvhp; + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: hfs_early_journal_init failed, setting to FSK \n"); + } + + HFSPlusVolumeHeader *jvhp; hfsmp->hfs_flags |= HFS_NEED_JNL_RESET; if (mdb_offset == 0) { - mdb_offset = (daddr64_t)((embeddedOffset / blksize) + HFS_PRI_SECTOR(blksize)); + mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize)); } bp = NULL; - retval = (int)buf_meta_bread(devvp, mdb_offset, blksize, cred, &bp); + retval = (int)buf_meta_bread(devvp, + HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), + phys_blksize, cred, &bp); if (retval == 0) { - jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(blksize)); + jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize)); if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) { printf ("hfs(1): Journal replay fail. Writing lastMountVersion as FSK!\n"); @@ -1133,6 +1730,9 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, // in the hopes that fsck_hfs will be able to // fix any damage that exists on the volume. if ( !(vfs_flags(mp) & MNT_ROOTFS)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: hfs_early_journal_init failed, erroring out \n"); + } retval = EINVAL; goto error_exit; } @@ -1156,24 +1756,31 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, * If the backend didn't like our physical blocksize * then retry with physical blocksize of 512. */ - if ((retval == ENXIO) && (blksize > 512) && (blksize != minblksize)) { - printf("HFS Mount: could not use physical block size " - "(%d) switching to 512\n", blksize); - blksize = 512; - if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, context)) { + if ((retval == ENXIO) && (log_blksize > 512) && (log_blksize != minblksize)) { + printf("hfs_mountfs: could not use physical block size " + "(%d) switching to 512\n", log_blksize); + log_blksize = 512; + if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: DKIOCSETBLOCKSIZE (4) failed \n"); + } retval = ENXIO; goto error_exit; } - if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, context)) { + if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (4) failed \n"); + } retval = ENXIO; goto error_exit; } - devvp->v_specsize = blksize; + devvp->v_specsize = log_blksize; /* Note: relative block count adjustment (in case this is an embedded volume). */ - hfsmp->hfs_phys_block_count *= hfsmp->hfs_phys_block_size / blksize; - hfsmp->hfs_phys_block_size = blksize; + hfsmp->hfs_logical_block_count *= hfsmp->hfs_logical_block_size / log_blksize; + hfsmp->hfs_logical_block_size = log_blksize; + hfsmp->hfs_log_per_phys = hfsmp->hfs_physical_block_size / log_blksize; - if (hfsmp->jnl) { + if (hfsmp->jnl && hfsmp->jvp == devvp) { // close and re-open this with the new block size journal_close(hfsmp->jnl); hfsmp->jnl = NULL; @@ -1184,18 +1791,22 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, // to be "FSK!" which fsck_hfs will see and force the fsck instead // of just bailing out because the volume is journaled. if (!ronly) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: hfs_early_journal_init (2) resetting.. \n"); + } HFSPlusVolumeHeader *jvhp; hfsmp->hfs_flags |= HFS_NEED_JNL_RESET; if (mdb_offset == 0) { - mdb_offset = (daddr64_t)((embeddedOffset / blksize) + HFS_PRI_SECTOR(blksize)); + mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize)); } bp = NULL; - retval = (int)buf_meta_bread(devvp, mdb_offset, blksize, cred, &bp); + retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), + phys_blksize, cred, &bp); if (retval == 0) { - jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(blksize)); + jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize)); if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) { printf ("hfs(2): Journal replay fail. Writing lastMountVersion as FSK!\n"); @@ -1217,6 +1828,9 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, // in the hopes that fsck_hfs will be able to // fix any damage that exists on the volume. if ( !(vfs_flags(mp) & MNT_ROOTFS)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: hfs_early_journal_init (2) failed \n"); + } retval = EINVAL; goto error_exit; } @@ -1225,6 +1839,9 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, /* Try again with a smaller block size... */ retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred); + if (retval && HFS_MOUNT_DEBUG) { + printf("hfs_MountHFSPlusVolume (late) returned %d\n",retval); + } } if (retval) (void) hfs_relconverter(0); @@ -1235,13 +1852,16 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, hfsmp->hfs_last_mounted_mtime = hfsmp->hfs_mtime; if ( retval ) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_mountfs: encountered failure %d \n", retval); + } goto error_exit; } mp->mnt_vfsstat.f_fsid.val[0] = (long)dev; mp->mnt_vfsstat.f_fsid.val[1] = vfs_typenum(mp); vfs_setmaxsymlen(mp, 0); - mp->mnt_vtable->vfc_threadsafe = TRUE; + mp->mnt_vtable->vfc_vfsflags |= VFC_VFSNATIVEXATTR; #if NAMEDSTREAMS mp->mnt_kern_flag |= MNTK_NAMED_STREAMS; @@ -1251,19 +1871,21 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, mp->mnt_vtable->vfc_vfsflags |= VFC_VFSDIRLINKS; } else { /* HFS standard doesn't support extended readdir! */ - mp->mnt_vtable->vfc_vfsflags &= ~VFC_VFSREADDIR_EXTENDED; + mount_set_noreaddirext (mp); } if (args) { /* * Set the free space warning levels for a non-root volume: * - * Set the lower freespace limit (the level that will trigger a warning) - * to 5% of the volume size or 250MB, whichever is less, and the desired - * level (which will cancel the alert request) to 1/2 above that limit. - * Start looking for free space to drop below this level and generate a - * warning immediately if needed: + * Set the "danger" limit to 1% of the volume size or 100MB, whichever + * is less. Set the "warning" limit to 2% of the volume size or 150MB, + * whichever is less. And last, set the "desired" freespace level to + * to 3% of the volume size or 200MB, whichever is less. */ + hfsmp->hfs_freespace_notify_dangerlimit = + MIN(HFS_VERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize, + (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_VERYLOWDISKTRIGGERFRACTION); hfsmp->hfs_freespace_notify_warninglimit = MIN(HFS_LOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize, (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKTRIGGERFRACTION); @@ -1274,10 +1896,14 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, /* * Set the free space warning levels for the root volume: * - * Set the lower freespace limit (the level that will trigger a warning) - * to 1% of the volume size or 50MB, whichever is less, and the desired - * level (which will cancel the alert request) to 2% or 75MB, whichever is less. + * Set the "danger" limit to 5% of the volume size or 512MB, whichever + * is less. Set the "warning" limit to 10% of the volume size or 1GB, + * whichever is less. And last, set the "desired" freespace level to + * to 11% of the volume size or 1.25GB, whichever is less. */ + hfsmp->hfs_freespace_notify_dangerlimit = + MIN(HFS_ROOTVERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize, + (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTVERYLOWDISKTRIGGERFRACTION); hfsmp->hfs_freespace_notify_warninglimit = MIN(HFS_ROOTLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize, (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKTRIGGERFRACTION); @@ -1293,7 +1919,46 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, } } - /* + /* do not allow ejectability checks on the root device */ + if (isroot == 0) { + if ((hfsmp->hfs_flags & HFS_VIRTUAL_DEVICE) == 0 && + IOBSDIsMediaEjectable(mp->mnt_vfsstat.f_mntfromname)) { + hfsmp->hfs_max_pending_io = 4096*1024; // a reasonable value to start with. + hfsmp->hfs_syncer = thread_call_allocate(hfs_syncer, hfsmp); + if (hfsmp->hfs_syncer == NULL) { + printf("hfs: failed to allocate syncer thread callback for %s (%s)\n", + mp->mnt_vfsstat.f_mntfromname, mp->mnt_vfsstat.f_mntonname); + } + } + } + +#if CONFIG_HFS_ALLOC_RBTREE + /* + * We spawn a thread to create the pair of red-black trees for this volume. + * However, in so doing, we must be careful to ensure that if this thread is still + * running after mount has finished, it doesn't interfere with an unmount. Specifically, + * we'll need to set a bit that indicates we're in progress building the trees here. + * Unmount will check for this bit, and then if it's set, mark a corresponding bit that + * notifies the tree generation code that an unmount is waiting. Also mark the bit that + * indicates the tree is live and operating. + * + * Only do this if we're operating on a read-write mount (we wouldn't care for read-only). + */ + + if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) { + hfsmp->extent_tree_flags |= (HFS_ALLOC_TREEBUILD_INFLIGHT | HFS_ALLOC_RB_ENABLED); + + /* Initialize EOF counter so that the thread can assume it started at initial values */ + hfsmp->offset_block_end = 0; + InitTree(hfsmp); + + kernel_thread_start ((thread_continue_t) hfs_initialize_allocator , hfsmp, &allocator_thread); + thread_deallocate(allocator_thread); + } + +#endif + + /* * Start looking for free space to drop below this level and generate a * warning immediately if needed: */ @@ -1313,13 +1978,16 @@ error_exit: FREE(mdbp, M_TEMP); if (hfsmp && hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) { - (void)VNOP_CLOSE(hfsmp->jvp, ronly ? FREAD : FREAD|FWRITE, context); + vnode_clearmountedon(hfsmp->jvp); + (void)VNOP_CLOSE(hfsmp->jvp, ronly ? FREAD : FREAD|FWRITE, vfs_context_kernel()); hfsmp->jvp = NULL; } if (hfsmp) { if (hfsmp->hfs_devvp) { vnode_rele(hfsmp->hfs_devvp); } + hfs_delete_chash(hfsmp); + FREE(hfsmp, M_HFSMNT); vfs_setfsprivate(mp, NULL); } @@ -1342,7 +2010,7 @@ hfs_start(__unused struct mount *mp, __unused int flags, __unused vfs_context_t /* * unmount system call */ -static int +int hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) { struct proc *p = vfs_context_proc(context); @@ -1351,6 +2019,7 @@ hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) int flags; int force; int started_tr = 0; + int rb_used = 0; flags = 0; force = 0; @@ -1365,6 +2034,42 @@ hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) if (hfsmp->hfs_flags & HFS_METADATA_ZONE) (void) hfs_recording_suspend(hfsmp); + /* + * Cancel any pending timers for this volume. Then wait for any timers + * which have fired, but whose callbacks have not yet completed. + */ + if (hfsmp->hfs_syncer) + { + struct timespec ts = {0, 100000000}; /* 0.1 seconds */ + + /* + * Cancel any timers that have been scheduled, but have not + * fired yet. NOTE: The kernel considers a timer complete as + * soon as it starts your callback, so the kernel does not + * keep track of the number of callbacks in progress. + */ + if (thread_call_cancel(hfsmp->hfs_syncer)) + OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_incomplete); + thread_call_free(hfsmp->hfs_syncer); + hfsmp->hfs_syncer = NULL; + + /* + * This waits for all of the callbacks that were entered before + * we did thread_call_cancel above, but have not completed yet. + */ + while(hfsmp->hfs_sync_incomplete > 0) + { + msleep((caddr_t)&hfsmp->hfs_sync_incomplete, NULL, PWAIT, "hfs_unmount", &ts); + } + + if (hfsmp->hfs_sync_incomplete < 0) + panic("hfs_unmount: pm_sync_incomplete underflow!\n"); + } + +#if CONFIG_HFS_ALLOC_RBTREE + rb_used = hfs_teardown_allocator(hfsmp); +#endif + /* * Flush out the b-trees, volume bitmap and Volume Header */ @@ -1427,6 +2132,32 @@ hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask; } + + if (rb_used) { + /* If the rb-tree was live, just set min_start to 0 */ + hfsmp->nextAllocation = 0; + } + else { + if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) { + int i; + u_int32_t min_start = hfsmp->totalBlocks; + + // set the nextAllocation pointer to the smallest free block number + // we've seen so on the next mount we won't rescan unnecessarily + lck_spin_lock(&hfsmp->vcbFreeExtLock); + for(i=0; i < (int)hfsmp->vcbFreeExtCnt; i++) { + if (hfsmp->vcbFreeExt[i].startBlock < min_start) { + min_start = hfsmp->vcbFreeExt[i].startBlock; + } + } + lck_spin_unlock(&hfsmp->vcbFreeExtLock); + if (min_start < hfsmp->nextAllocation) { + hfsmp->nextAllocation = min_start; + } + } + } + + retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); if (retval) { HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask; @@ -1441,7 +2172,7 @@ hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) } if (hfsmp->jnl) { - journal_flush(hfsmp->jnl); + hfs_journal_flush(hfsmp, FALSE); } /* @@ -1449,11 +2180,6 @@ hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) */ (void) hfsUnmount(hfsmp, p); - /* - * Last chance to dump unreferenced system files. - */ - (void) vflush(mp, NULLVP, FORCECLOSE); - if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) (void) hfs_relconverter(hfsmp->hfs_encoding); @@ -1466,15 +2192,21 @@ hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context); if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) { + vnode_clearmountedon(hfsmp->jvp); retval = VNOP_CLOSE(hfsmp->jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, - context); + vfs_context_kernel()); vnode_put(hfsmp->jvp); hfsmp->jvp = NULL; } // XXXdbg -#ifdef HFS_SPARSE_DEV + /* + * Last chance to dump unreferenced system files. + */ + (void) vflush(mp, NULLVP, FORCECLOSE); + +#if HFS_SPARSE_DEV /* Drop our reference on the backing fs (if any). */ if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && hfsmp->hfs_backingfs_rootvp) { struct vnode * tmpvp; @@ -1486,7 +2218,10 @@ hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) } #endif /* HFS_SPARSE_DEV */ lck_mtx_destroy(&hfsmp->hfc_mutex, hfs_mutex_group); + lck_spin_destroy(&hfsmp->vcbFreeExtLock, hfs_spinlock_group); vnode_rele(hfsmp->hfs_devvp); + + hfs_delete_chash(hfsmp); FREE(hfsmp, M_HFSMNT); return (0); @@ -1505,7 +2240,7 @@ hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) static int hfs_vfs_root(struct mount *mp, struct vnode **vpp, __unused vfs_context_t context) { - return hfs_vget(VFSTOHFS(mp), (cnid_t)kHFSRootFolderID, vpp, 1); + return hfs_vget(VFSTOHFS(mp), (cnid_t)kHFSRootFolderID, vpp, 1, 0); } @@ -1526,7 +2261,7 @@ hfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t int cmd, type, error; if (uid == ~0U) - uid = vfs_context_ucred(context)->cr_ruid; + uid = kauth_cred_getuid(vfs_context_ucred(context)); cmd = cmds >> SUBCMDSHIFT; switch (cmd) { @@ -1534,7 +2269,7 @@ hfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t case Q_QUOTASTAT: break; case Q_GETQUOTA: - if (uid == vfs_context_ucred(context)->cr_ruid) + if (uid == kauth_cred_getuid(vfs_context_ucred(context))) break; /* fall through */ default: @@ -1597,23 +2332,23 @@ hfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t /* * Get file system statistics. */ -static int +int hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, __unused vfs_context_t context) { ExtendedVCB *vcb = VFSTOVCB(mp); struct hfsmount *hfsmp = VFSTOHFS(mp); - u_long freeCNIDs; + u_int32_t freeCNIDs; u_int16_t subtype = 0; - freeCNIDs = (u_long)0xFFFFFFFF - (u_long)vcb->vcbNxtCNID; + freeCNIDs = (u_int32_t)0xFFFFFFFF - (u_int32_t)vcb->vcbNxtCNID; sbp->f_bsize = (u_int32_t)vcb->blockSize; - sbp->f_iosize = (size_t)(MAX_UPL_TRANSFER * PAGE_SIZE); - sbp->f_blocks = (u_int64_t)((unsigned long)vcb->totalBlocks); - sbp->f_bfree = (u_int64_t)((unsigned long )hfs_freeblks(hfsmp, 0)); - sbp->f_bavail = (u_int64_t)((unsigned long )hfs_freeblks(hfsmp, 1)); - sbp->f_files = (u_int64_t)((unsigned long )(vcb->totalBlocks - 2)); /* max files is constrained by total blocks */ - sbp->f_ffree = (u_int64_t)((unsigned long )(MIN(freeCNIDs, sbp->f_bavail))); + sbp->f_iosize = (size_t)cluster_max_io_size(mp, 0); + sbp->f_blocks = (u_int64_t)((u_int32_t)vcb->totalBlocks); + sbp->f_bfree = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 0)); + sbp->f_bavail = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 1)); + sbp->f_files = (u_int64_t)((u_int32_t )(vcb->totalBlocks - 2)); /* max files is constrained by total blocks */ + sbp->f_ffree = (u_int64_t)((u_int32_t )(MIN(freeCNIDs, sbp->f_bavail))); /* * Subtypes (flavors) for HFS @@ -1655,16 +2390,18 @@ hfs_sync_metadata(void *arg) struct hfsmount *hfsmp; ExtendedVCB *vcb; buf_t bp; - int sectorsize, retval; + int retval; daddr64_t priIDSector; hfsmp = VFSTOHFS(mp); vcb = HFSTOVCB(hfsmp); // now make sure the super block is flushed - sectorsize = hfsmp->hfs_phys_block_size; - priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / sectorsize) + - HFS_PRI_SECTOR(sectorsize)); - retval = (int)buf_meta_bread(hfsmp->hfs_devvp, priIDSector, sectorsize, NOCRED, &bp); + priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + + HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size)); + + retval = (int)buf_meta_bread(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, NOCRED, &bp); if ((retval != 0 ) && (retval != ENXIO)) { printf("hfs_sync_metadata: can't read volume header at %d! (retval 0x%x)\n", (int)priIDSector, retval); @@ -1681,7 +2418,9 @@ hfs_sync_metadata(void *arg) // hfs_btreeio.c:FlushAlternate() should flag when it was // written... if (hfsmp->hfs_alt_id_sector) { - retval = (int)buf_meta_bread(hfsmp->hfs_devvp, hfsmp->hfs_alt_id_sector, sectorsize, NOCRED, &bp); + retval = (int)buf_meta_bread(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_alt_id_sector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, NOCRED, &bp); if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) { buf_bwrite(bp); } else if (bp) { @@ -1734,7 +2473,7 @@ hfs_sync_callback(struct vnode *vp, void *cargs) * * Note: we are always called with the filesystem marked `MPBUSY'. */ -static int +int hfs_sync(struct mount *mp, int waitfor, vfs_context_t context) { struct proc *p = vfs_context_proc(context); @@ -1746,14 +2485,14 @@ hfs_sync(struct mount *mp, int waitfor, vfs_context_t context) int error, allerror = 0; struct hfs_sync_cargs args; + hfsmp = VFSTOHFS(mp); + /* - * During MNT_UPDATE hfs_changefs might be manipulating - * vnodes so back off + * hfs_changefs might be manipulating vnodes so back off */ - if (((u_int32_t)vfs_flags(mp)) & MNT_UPDATE) /* XXX MNT_UPDATE may not be visible here */ + if (hfsmp->hfs_flags & HFS_IN_CHANGEFS) return (0); - hfsmp = VFSTOHFS(mp); if (hfsmp->hfs_flags & HFS_READ_ONLY) return (EROFS); @@ -1838,7 +2577,17 @@ hfs_sync(struct mount *mp, int waitfor, vfs_context_t context) } if (hfsmp->jnl) { - journal_flush(hfsmp->jnl); + hfs_journal_flush(hfsmp, FALSE); + } + + { + clock_sec_t secs; + clock_usec_t usecs; + uint64_t now; + + clock_get_calendar_microtime(&secs, &usecs); + now = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs; + hfsmp->hfs_last_sync_time = now; } lck_rw_unlock_shared(&hfsmp->hfs_insync); @@ -1869,29 +2618,26 @@ hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, if (fhlen < (int)sizeof(struct hfsfid)) return (EINVAL); - result = hfs_vget(VFSTOHFS(mp), ntohl(hfsfhp->hfsfid_cnid), &nvp, 0); + result = hfs_vget(VFSTOHFS(mp), ntohl(hfsfhp->hfsfid_cnid), &nvp, 0, 0); if (result) { if (result == ENOENT) result = ESTALE; return result; } - - /* The createtime can be changed by hfs_setattr or hfs_setattrlist. - * For NFS, we are assuming that only if the createtime was moved - * forward would it mean the fileID got reused in that session by - * wrapping. We don't have a volume ID or other unique identifier to - * to use here for a generation ID across reboots, crashes where - * metadata noting lastFileID didn't make it to disk but client has - * it, or volume erasures where fileIDs start over again. Lastly, - * with HFS allowing "wraps" of fileIDs now, this becomes more - * error prone. Future, would be change the "wrap bit" to a unique - * wrap number and use that for generation number. For now do this. - */ - if (((time_t)(ntohl(hfsfhp->hfsfid_gen)) < VTOC(nvp)->c_itime)) { - hfs_unlock(VTOC(nvp)); - vnode_put(nvp); - return (ESTALE); - } + + /* + * We used to use the create time as the gen id of the file handle, + * but it is not static enough because it can change at any point + * via system calls. We still don't have another volume ID or other + * unique identifier to use for a generation ID across reboots that + * persists until the file is removed. Using only the CNID exposes + * us to the potential wrap-around case, but as of 2/2008, it would take + * over 2 months to wrap around if the machine did nothing but allocate + * CNIDs. Using some kind of wrap counter would only be effective if + * each file had the wrap counter associated with it. For now, + * we use only the CNID to identify the file as it's good enough. + */ + *vpp = nvp; hfs_unlock(VTOC(nvp)); @@ -1917,8 +2663,9 @@ hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, __unused vfs_conte cp = VTOC(vp); hfsfhp = (struct hfsfid *)fhp; + /* only the CNID is used to identify the file now */ hfsfhp->hfsfid_cnid = htonl(cp->c_fileid); - hfsfhp->hfsfid_gen = htonl(cp->c_itime); + hfsfhp->hfsfid_gen = htonl(cp->c_fileid); *fhlenp = sizeof(struct hfsfid); return (0); @@ -1946,7 +2693,11 @@ hfs_init(__unused struct vfsconf *vfsp) hfs_group_attr = lck_grp_attr_alloc_init(); hfs_mutex_group = lck_grp_alloc_init("hfs-mutex", hfs_group_attr); hfs_rwlock_group = lck_grp_alloc_init("hfs-rwlock", hfs_group_attr); + hfs_spinlock_group = lck_grp_alloc_init("hfs-spinlock", hfs_group_attr); +#if HFS_COMPRESSION + decmpfs_init(); +#endif return (0); } @@ -1983,7 +2734,7 @@ hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp) /* * HFS filesystem related variables. */ -static int +int hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen, vfs_context_t context) { @@ -2021,15 +2772,23 @@ hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, size_t bufsize; size_t bytes; u_int32_t hint; - u_int16_t *unicode_name; - char *filename; + u_int16_t *unicode_name = NULL; + char *filename = NULL; if ((newlen <= 0) || (newlen > MAXPATHLEN)) return (EINVAL); bufsize = MAX(newlen * 3, MAXPATHLEN); MALLOC(filename, char *, newlen, M_TEMP, M_WAITOK); + if (filename == NULL) { + error = ENOMEM; + goto encodinghint_exit; + } MALLOC(unicode_name, u_int16_t *, bufsize, M_TEMP, M_WAITOK); + if (filename == NULL) { + error = ENOMEM; + goto encodinghint_exit; + } error = copyin(newp, (caddr_t)filename, newlen); if (error == 0) { @@ -2040,8 +2799,12 @@ hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, error = sysctl_int(oldp, oldlenp, USER_ADDR_NULL, 0, (int32_t *)&hint); } } - FREE(unicode_name, M_TEMP); - FREE(filename, M_TEMP); + +encodinghint_exit: + if (unicode_name) + FREE(unicode_name, M_TEMP); + if (filename) + FREE(filename, M_TEMP); return (error); } else if (name[0] == HFS_ENABLE_JOURNALING) { @@ -2098,38 +2861,56 @@ hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n", (off_t)name[2], (off_t)name[3]); + // + // XXXdbg - note that currently (Sept, 08) hfs_util does not support + // enabling the journal on a separate device so it is safe + // to just copy hfs_devvp here. If hfs_util gets the ability + // to dynamically enable the journal on a separate device then + // we will have to do the same thing as hfs_early_journal_init() + // to locate and open the journal device. + // jvp = hfsmp->hfs_devvp; jnl = journal_create(jvp, (off_t)name[2] * (off_t)HFSTOVCB(hfsmp)->blockSize + HFSTOVCB(hfsmp)->hfsPlusIOPosOffset, (off_t)((unsigned)name[3]), hfsmp->hfs_devvp, - hfsmp->hfs_phys_block_size, + hfsmp->hfs_logical_block_size, 0, 0, hfs_sync_metadata, hfsmp->hfs_mp); + /* + * Set up the trim callback function so that we can add + * recently freed extents to the free extent cache once + * the transaction that freed them is written to the + * journal on disk. + */ + if (jnl) + journal_trim_set_callback(jnl, hfs_trim_callback, hfsmp); + if (jnl == NULL) { printf("hfs: FAILED to create the journal!\n"); if (jvp && jvp != hfsmp->hfs_devvp) { - VNOP_CLOSE(jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, context); + vnode_clearmountedon(jvp); + VNOP_CLOSE(jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, vfs_context_kernel()); } jvp = NULL; return EINVAL; } - hfs_global_exclusive_lock_acquire(hfsmp); - + hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK); + /* * Flush all dirty metadata buffers. */ - buf_flushdirtyblks(hfsmp->hfs_devvp, MNT_WAIT, 0, "hfs_sysctl"); - buf_flushdirtyblks(hfsmp->hfs_extents_vp, MNT_WAIT, 0, "hfs_sysctl"); - buf_flushdirtyblks(hfsmp->hfs_catalog_vp, MNT_WAIT, 0, "hfs_sysctl"); - buf_flushdirtyblks(hfsmp->hfs_allocation_vp, MNT_WAIT, 0, "hfs_sysctl"); + buf_flushdirtyblks(hfsmp->hfs_devvp, TRUE, 0, "hfs_sysctl"); + buf_flushdirtyblks(hfsmp->hfs_extents_vp, TRUE, 0, "hfs_sysctl"); + buf_flushdirtyblks(hfsmp->hfs_catalog_vp, TRUE, 0, "hfs_sysctl"); + buf_flushdirtyblks(hfsmp->hfs_allocation_vp, TRUE, 0, "hfs_sysctl"); if (hfsmp->hfs_attribute_vp) - buf_flushdirtyblks(hfsmp->hfs_attribute_vp, MNT_WAIT, 0, "hfs_sysctl"); + buf_flushdirtyblks(hfsmp->hfs_attribute_vp, TRUE, 0, "hfs_sysctl"); HFSTOVCB(hfsmp)->vcbJinfoBlock = name[1]; HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeJournaledMask; @@ -2144,9 +2925,16 @@ hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, vfs_setflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED)); - hfs_global_exclusive_lock_release(hfsmp); + hfs_unlock_global (hfsmp); hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1); + { + fsid_t fsid; + + fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev; + fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp)); + vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL); + } return 0; } else if (name[0] == HFS_DISABLE_JOURNALING) { // clear the journaling bit @@ -2172,14 +2960,16 @@ hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, printf("hfs: disabling journaling for mount @ %p\n", vnode_mount(vp)); - hfs_global_exclusive_lock_acquire(hfsmp); + hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK); // Lights out for you buddy! journal_close(hfsmp->jnl); hfsmp->jnl = NULL; if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) { - VNOP_CLOSE(hfsmp->jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, context); + vnode_clearmountedon(hfsmp->jvp); + VNOP_CLOSE(hfsmp->jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, vfs_context_kernel()); + vnode_put(hfsmp->jvp); } hfsmp->jvp = NULL; vfs_clearflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED)); @@ -2189,9 +2979,17 @@ hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeJournaledMask; - hfs_global_exclusive_lock_release(hfsmp); + hfs_unlock_global (hfsmp); + hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1); + { + fsid_t fsid; + + fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev; + fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp)); + vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL); + } return 0; } else if (name[0] == HFS_GET_JOURNAL_INFO) { vnode_t vp = vfs_context_cwd(context); @@ -2200,6 +2998,10 @@ hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, if (vp == NULLVP) return EINVAL; + /* 64-bit processes won't work with this sysctl -- can't fit a pointer into an int! */ + if (proc_is64bit(current_proc())) + return EINVAL; + hfsmp = VTOHFS(vp); if (hfsmp->jnl == NULL) { jnl_start = 0; @@ -2219,31 +3021,20 @@ hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, return 0; } else if (name[0] == HFS_SET_PKG_EXTENSIONS) { - return set_package_extensions_table((void *)name[1], name[2], name[3]); + return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]); } else if (name[0] == VFS_CTL_QUERY) { struct sysctl_req *req; - struct vfsidctl vc; - struct user_vfsidctl user_vc; + union union_vfsidctl vc; struct mount *mp; struct vfsquery vq; - boolean_t is_64_bit; - is_64_bit = proc_is64bit(p); req = CAST_DOWN(struct sysctl_req *, oldp); /* we're new style vfs sysctl. */ - if (is_64_bit) { - error = SYSCTL_IN(req, &user_vc, sizeof(user_vc)); - if (error) return (error); - - mp = vfs_getvfs(&user_vc.vc_fsid); - } - else { - error = SYSCTL_IN(req, &vc, sizeof(vc)); - if (error) return (error); - - mp = vfs_getvfs(&vc.vc_fsid); - } + error = SYSCTL_IN(req, &vc, proc_is64bit(p)? sizeof(vc.vc64):sizeof(vc.vc32)); + if (error) return (error); + + mp = vfs_getvfs(&vc.vc32.vc_fsid); /* works for 32 and 64 */ if (mp == NULL) return (ENOENT); hfsmp = VFSTOHFS(mp); @@ -2251,57 +3042,76 @@ hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp, vq.vq_flags = hfsmp->hfs_notification_conditions; return SYSCTL_OUT(req, &vq, sizeof(vq));; } else if (name[0] == HFS_REPLAY_JOURNAL) { - char *devnode = NULL; - size_t devnode_len; - - devnode_len = *oldlenp; - MALLOC(devnode, char *, devnode_len + 1, M_TEMP, M_WAITOK); - if (devnode == NULL) { - return ENOMEM; + vnode_t devvp = NULL; + int device_fd; + if (namelen != 2) { + return (EINVAL); } - - error = copyin(oldp, (caddr_t)devnode, devnode_len); + device_fd = name[1]; + error = file_vnode(device_fd, &devvp); if (error) { - FREE(devnode, M_TEMP); return error; } - devnode[devnode_len] = 0; - - error = hfs_journal_replay(devnode, context); - FREE(devnode, M_TEMP); + error = vnode_getwithref(devvp); + if (error) { + file_drop(device_fd); + return error; + } + error = hfs_journal_replay(devvp, context); + file_drop(device_fd); + vnode_put(devvp); return error; + } else if (name[0] == HFS_ENABLE_RESIZE_DEBUG) { + hfs_resize_debug = 1; + printf ("hfs_sysctl: Enabled volume resize debugging.\n"); + return 0; } return (ENOTSUP); } +/* + * hfs_vfs_vget is not static since it is used in hfs_readwrite.c to support + * the build_path ioctl. We use it to leverage the code below that updates + * the origin list cache if necessary + */ -static int +int hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context) { int error; + int lockflags; + struct hfsmount *hfsmp; + + hfsmp = VFSTOHFS(mp); - error = hfs_vget(VFSTOHFS(mp), (cnid_t)ino, vpp, 1); + error = hfs_vget(hfsmp, (cnid_t)ino, vpp, 1, 0); if (error) return (error); /* * ADLs may need to have their origin state updated - * since build_path needs a valid parent. + * since build_path needs a valid parent. The same is true + * for hardlinked files as well. There isn't a race window here + * in re-acquiring the cnode lock since we aren't pulling any data + * out of the cnode; instead, we're going to the catalog. */ - if (vnode_isdir(*vpp) && - (VTOC(*vpp)->c_flag & C_HARDLINK) && + if ((VTOC(*vpp)->c_flag & C_HARDLINK) && (hfs_lock(VTOC(*vpp), HFS_EXCLUSIVE_LOCK) == 0)) { cnode_t *cp = VTOC(*vpp); struct cat_desc cdesc; - if (!hfs_haslinkorigin(cp) && - (cat_findname(VFSTOHFS(mp), (cnid_t)ino, &cdesc) == 0)) { - if (cdesc.cd_parentcnid != - VFSTOHFS(mp)->hfs_private_desc[DIR_HARDLINKS].cd_cnid) { - hfs_savelinkorigin(cp, cdesc.cd_parentcnid); + if (!hfs_haslinkorigin(cp)) { + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + error = cat_findname(hfsmp, (cnid_t)ino, &cdesc); + hfs_systemfile_unlock(hfsmp, lockflags); + if (error == 0) { + if ((cdesc.cd_parentcnid != hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) && + (cdesc.cd_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid)) { + hfs_savelinkorigin(cp, cdesc.cd_parentcnid); + } + cat_releasedesc(&cdesc); } - cat_releasedesc(&cdesc); } hfs_unlock(cp); } @@ -2316,9 +3126,8 @@ hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_con * * If the object is a file then it will represent the data fork. */ -__private_extern__ int -hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock) +hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock, int allow_deleted) { struct vnode *vp = NULLVP; struct cat_desc cndesc; @@ -2340,7 +3149,7 @@ hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock) /* * Check the hash first */ - vp = hfs_chash_getvnode(hfsmp->hfs_raw_dev, cnid, 0, skiplock); + vp = hfs_chash_getvnode(hfsmp, cnid, 0, skiplock, allow_deleted); if (vp) { *vpp = vp; return(0); @@ -2412,6 +3221,7 @@ hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock) cnid_t nextlinkid; cnid_t prevlinkid; struct cat_desc linkdesc; + int lockflags; cnattr.ca_linkref = linkref; @@ -2419,9 +3229,12 @@ hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock) * Pick up the first link in the chain and get a descriptor for it. * This allows blind volfs paths to work for hardlinks. */ - if ((hfs_lookuplink(hfsmp, linkref, &prevlinkid, &nextlinkid) == 0) && + if ((hfs_lookup_siblinglinks(hfsmp, linkref, &prevlinkid, &nextlinkid) == 0) && (nextlinkid != 0)) { - if (cat_findname(hfsmp, nextlinkid, &linkdesc) == 0) { + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + error = cat_findname(hfsmp, nextlinkid, &linkdesc); + hfs_systemfile_unlock(hfsmp, lockflags); + if (error == 0) { cat_releasedesc(&cndesc); bcopy(&linkdesc, &cndesc, sizeof(linkdesc)); } @@ -2429,13 +3242,17 @@ hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock) } if (linkref) { - error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cnfork, &vp); + int newvnode_flags = 0; + + error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, + &cnfork, &vp, &newvnode_flags); if (error == 0) { VTOC(vp)->c_flag |= C_HARDLINK; vnode_setmultipath(vp); } } else { struct componentname cn; + int newvnode_flags = 0; /* Supply hfs_getnewvnode with a component name. */ MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); @@ -2449,9 +3266,10 @@ hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock) cn.cn_consume = 0; bcopy(cndesc.cd_nameptr, cn.cn_nameptr, cndesc.cd_namelen + 1); - error = hfs_getnewvnode(hfsmp, NULLVP, &cn, &cndesc, 0, &cnattr, &cnfork, &vp); + error = hfs_getnewvnode(hfsmp, NULLVP, &cn, &cndesc, 0, &cnattr, + &cnfork, &vp, &newvnode_flags); - if (error == 0 && (VTOC(vp)->c_flag & C_HARDLINK) && vnode_isdir(vp)) { + if (error == 0 && (VTOC(vp)->c_flag & C_HARDLINK)) { hfs_savelinkorigin(VTOC(vp), cndesc.cd_parentcnid); } FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI); @@ -2502,7 +3320,7 @@ hfs_flushfiles(struct mount *mp, int flags, __unused struct proc *p) } /* Obtain the root vnode so we can skip over it. */ - skipvp = hfs_chash_getvnode(hfsmp->hfs_raw_dev, kHFSRootFolderID, 0, 0); + skipvp = hfs_chash_getvnode(hfsmp, kHFSRootFolderID, 0, 0, 0); } #endif /* QUOTA */ @@ -2579,7 +3397,6 @@ hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding) * * On journal volumes this will cause a volume header flush */ -__private_extern__ int hfs_volupdate(struct hfsmount *hfsmp, enum volop op, int inroot) { @@ -2642,7 +3459,7 @@ hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush) int sectorsize; ByteCount namelen; - sectorsize = hfsmp->hfs_phys_block_size; + sectorsize = hfsmp->hfs_logical_block_size; retval = (int)buf_bread(hfsmp->hfs_devvp, (daddr64_t)HFS_PRI_SECTOR(sectorsize), sectorsize, NOCRED, &bp); if (retval) { if (bp) @@ -2654,7 +3471,7 @@ hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush) mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp) + HFS_PRI_OFFSET(sectorsize)); - mdb->drCrDate = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbCrDate))); + mdb->drCrDate = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->hfs_itime))); mdb->drLsMod = SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbLsMod))); mdb->drAtrb = SWAP_BE16 (vcb->vcbAtrb); mdb->drNmFls = SWAP_BE16 (vcb->vcbNmFls); @@ -2731,17 +3548,15 @@ hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush) * not flushed since the on-disk "H+" and "HX" signatures * are always stored in-memory as "H+". */ -__private_extern__ int hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush) { ExtendedVCB *vcb = HFSTOVCB(hfsmp); struct filefork *fp; - HFSPlusVolumeHeader *volumeHeader; + HFSPlusVolumeHeader *volumeHeader, *altVH; int retval; - struct buf *bp; + struct buf *bp, *alt_bp; int i; - int sectorsize; daddr64_t priIDSector; int critical; u_int16_t signature; @@ -2754,47 +3569,79 @@ hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush) return hfs_flushMDB(hfsmp, waitfor, altflush); } critical = altflush; - sectorsize = hfsmp->hfs_phys_block_size; - priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / sectorsize) + - HFS_PRI_SECTOR(sectorsize)); + priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + + HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size)); if (hfs_start_transaction(hfsmp) != 0) { return EINVAL; } - retval = (int)buf_meta_bread(hfsmp->hfs_devvp, priIDSector, sectorsize, NOCRED, &bp); - if (retval) { - if (bp) - buf_brelse(bp); - - hfs_end_transaction(hfsmp); - - printf("HFS: err %d reading VH blk (%s)\n", retval, vcb->vcbVN); - return (retval); - } + bp = NULL; + alt_bp = NULL; - if (hfsmp->jnl) { - journal_modify_block_start(hfsmp->jnl, bp); + retval = (int)buf_meta_bread(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, NOCRED, &bp); + if (retval) { + printf("hfs: err %d reading VH blk (%s)\n", retval, vcb->vcbVN); + goto err_exit; } - volumeHeader = (HFSPlusVolumeHeader *)((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(sectorsize)); + volumeHeader = (HFSPlusVolumeHeader *)((char *)buf_dataptr(bp) + + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size)); /* - * Sanity check what we just read. + * Sanity check what we just read. If it's bad, try the alternate + * instead. */ signature = SWAP_BE16 (volumeHeader->signature); hfsversion = SWAP_BE16 (volumeHeader->version); if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) || (hfsversion < kHFSPlusVersion) || (hfsversion > 100) || (SWAP_BE32 (volumeHeader->blockSize) != vcb->blockSize)) { -#if 1 - panic("HFS: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d", + printf("hfs: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d%s\n", vcb->vcbVN, signature, hfsversion, - SWAP_BE32 (volumeHeader->blockSize)); -#endif - printf("HFS: corrupt VH blk (%s)\n", vcb->vcbVN); - buf_brelse(bp); - return (EIO); + SWAP_BE32 (volumeHeader->blockSize), + hfsmp->hfs_alt_id_sector ? "; trying alternate" : ""); + hfs_mark_volume_inconsistent(hfsmp); + + if (hfsmp->hfs_alt_id_sector) { + retval = buf_meta_bread(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_alt_id_sector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, NOCRED, &alt_bp); + if (retval) { + printf("hfs: err %d reading alternate VH (%s)\n", retval, vcb->vcbVN); + goto err_exit; + } + + altVH = (HFSPlusVolumeHeader *)((char *)buf_dataptr(alt_bp) + + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size)); + signature = SWAP_BE16(altVH->signature); + hfsversion = SWAP_BE16(altVH->version); + + if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) || + (hfsversion < kHFSPlusVersion) || (kHFSPlusVersion > 100) || + (SWAP_BE32(altVH->blockSize) != vcb->blockSize)) { + printf("hfs: corrupt alternate VH on %s, sig 0x%04x, ver %d, blksize %d\n", + vcb->vcbVN, signature, hfsversion, + SWAP_BE32(altVH->blockSize)); + retval = EIO; + goto err_exit; + } + + /* The alternate is plausible, so use it. */ + bcopy(altVH, volumeHeader, kMDBSize); + buf_brelse(alt_bp); + alt_bp = NULL; + } else { + /* No alternate VH, nothing more we can do. */ + retval = EIO; + goto err_exit; + } + } + + if (hfsmp->jnl) { + journal_modify_block_start(hfsmp->jnl, bp); } /* @@ -2806,15 +3653,16 @@ hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush) struct buf *bp2; HFSMasterDirectoryBlock *mdb; - retval = (int)buf_meta_bread(hfsmp->hfs_devvp, (daddr64_t)HFS_PRI_SECTOR(sectorsize), - sectorsize, NOCRED, &bp2); + retval = (int)buf_meta_bread(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size), hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, NOCRED, &bp2); if (retval) { if (bp2) buf_brelse(bp2); retval = 0; } else { mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp2) + - HFS_PRI_OFFSET(sectorsize)); + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size)); if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate ) { @@ -2956,14 +3804,16 @@ done: /* If requested, flush out the alternate volume header */ if (altflush && hfsmp->hfs_alt_id_sector) { - struct buf *alt_bp = NULL; - - if (buf_meta_bread(hfsmp->hfs_devvp, hfsmp->hfs_alt_id_sector, sectorsize, NOCRED, &alt_bp) == 0) { + if (buf_meta_bread(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_alt_id_sector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) { if (hfsmp->jnl) { journal_modify_block_start(hfsmp->jnl, alt_bp); } - bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) + HFS_ALT_OFFSET(sectorsize), kMDBSize); + bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) + + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size), + kMDBSize); if (hfsmp->jnl) { journal_modify_block_end(hfsmp->jnl, alt_bp, NULL, NULL); @@ -2991,13 +3841,20 @@ done: hfs_end_transaction(hfsmp); return (retval); + +err_exit: + if (alt_bp) + buf_brelse(alt_bp); + if (bp) + buf_brelse(bp); + hfs_end_transaction(hfsmp); + return retval; } /* * Extend a file system. */ -__private_extern__ int hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) { @@ -3015,12 +3872,14 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) u_int32_t addblks; u_int64_t sectorcnt; u_int32_t sectorsize; + u_int32_t phys_sectorsize; daddr64_t prev_alt_sector; daddr_t bitmapblks; - int lockflags; + int lockflags = 0; int error; int64_t oldBitmapSize; Boolean usedExtendFileC = false; + int transaction_begun = 0; devvp = hfsmp->hfs_devvp; vcb = HFSTOVCB(hfsmp); @@ -3040,7 +3899,7 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) * ownership and check permissions. */ if (suser(cred, NULL)) { - error = hfs_vget(hfsmp, kHFSRootFolderID, &vp, 0); + error = hfs_vget(hfsmp, kHFSRootFolderID, &vp, 0, 0); if (error) return (error); @@ -3060,7 +3919,7 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)§orsize, 0, context)) { return (ENXIO); } - if (sectorsize != hfsmp->hfs_phys_block_size) { + if (sectorsize != hfsmp->hfs_logical_block_size) { return (ENXIO); } if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)§orcnt, 0, context)) { @@ -3070,12 +3929,20 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) printf("hfs_extendfs: not enough space on device\n"); return (ENOSPC); } + error = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_sectorsize, 0, context); + if (error) { + if ((error != ENOTSUP) && (error != ENOTTY)) { + return (ENXIO); + } + /* If ioctl is not supported, force physical and logical sector size to be same */ + phys_sectorsize = sectorsize; + } oldsize = (u_int64_t)hfsmp->totalBlocks * (u_int64_t)hfsmp->blockSize; /* * Validate new size. */ - if ((newsize <= oldsize) || (newsize % sectorsize)) { + if ((newsize <= oldsize) || (newsize % sectorsize) || (newsize % phys_sectorsize)) { printf("hfs_extendfs: invalid size\n"); return (EINVAL); } @@ -3085,13 +3952,32 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) addblks = newblkcnt - vcb->totalBlocks; - printf("hfs_extendfs: growing %s by %d blocks\n", vcb->vcbVN, addblks); + if (hfs_resize_debug) { + printf ("hfs_extendfs: old: size=%qu, blkcnt=%u\n", oldsize, hfsmp->totalBlocks); + printf ("hfs_extendfs: new: size=%qu, blkcnt=%u, addblks=%u\n", newsize, (u_int32_t)newblkcnt, addblks); + } + printf("hfs_extendfs: will extend \"%s\" by %d blocks\n", vcb->vcbVN, addblks); + + HFS_MOUNT_LOCK(hfsmp, TRUE); + if (hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS) { + HFS_MOUNT_UNLOCK(hfsmp, TRUE); + error = EALREADY; + goto out; + } + hfsmp->hfs_flags |= HFS_RESIZE_IN_PROGRESS; + HFS_MOUNT_UNLOCK(hfsmp, TRUE); + + /* Start with a clean journal. */ + hfs_journal_flush(hfsmp, TRUE); + /* * Enclose changes inside a transaction. */ if (hfs_start_transaction(hfsmp) != 0) { - return (EINVAL); + error = EINVAL; + goto out; } + transaction_begun = 1; /* * Note: we take the attributes lock in case we have an attribute data vnode @@ -3112,6 +3998,17 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) else bitmapblks = 0; + /* + * The allocation bitmap can contain unused bits that are beyond end of + * current volume's allocation blocks. Usually they are supposed to be + * zero'ed out but there can be cases where they might be marked as used. + * After extending the file system, those bits can represent valid + * allocation blocks, so we mark all the bits from the end of current + * volume to end of allocation bitmap as "free". + */ + BlockMarkFreeUnused(vcb, vcb->totalBlocks, + (fp->ff_blocks * vcb->blockSize * 8) - vcb->totalBlocks); + if (bitmapblks > 0) { daddr64_t blkno; daddr_t blkcnt; @@ -3131,8 +4028,8 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) * zone. */ error = ExtendFileC(vcb, fp, bitmapblks * vcb->blockSize, 0, - kEFAllMask | kEFNoClumpMask | kEFReserveMask | kEFMetadataMask, - &bytesAdded); + kEFAllMask | kEFNoClumpMask | kEFReserveMask + | kEFMetadataMask | kEFContigMask, &bytesAdded); if (error == 0) { usedExtendFileC = true; @@ -3228,14 +4125,14 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) /* * Adjust file system variables for new space. */ - prev_phys_block_count = hfsmp->hfs_phys_block_count; + prev_phys_block_count = hfsmp->hfs_logical_block_count; prev_alt_sector = hfsmp->hfs_alt_id_sector; vcb->totalBlocks += addblks; vcb->freeBlocks += addblks; - hfsmp->hfs_phys_block_count = newsize / sectorsize; + hfsmp->hfs_logical_block_count = newsize / sectorsize; hfsmp->hfs_alt_id_sector = (hfsmp->hfsPlusIOPosOffset / sectorsize) + - HFS_ALT_SECTOR(sectorsize, hfsmp->hfs_phys_block_count); + HFS_ALT_SECTOR(sectorsize, hfsmp->hfs_logical_block_count); MarkVCBDirty(vcb); error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH); if (error) { @@ -3244,7 +4141,8 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) * Restore to old state. */ if (usedExtendFileC) { - (void) TruncateFileC(vcb, fp, oldBitmapSize, false); + (void) TruncateFileC(vcb, fp, oldBitmapSize, 0, FORK_IS_RSRC(fp), + FTOC(fp)->c_fileid, false); } else { fp->ff_blocks -= bitmapblks; fp->ff_size -= (u_int64_t)bitmapblks * (u_int64_t)vcb->blockSize; @@ -3257,13 +4155,18 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) } vcb->totalBlocks -= addblks; vcb->freeBlocks -= addblks; - hfsmp->hfs_phys_block_count = prev_phys_block_count; + hfsmp->hfs_logical_block_count = prev_phys_block_count; hfsmp->hfs_alt_id_sector = prev_alt_sector; MarkVCBDirty(vcb); - if (vcb->blockSize == 512) - (void) BlockMarkAllocated(vcb, vcb->totalBlocks - 2, 2); - else - (void) BlockMarkAllocated(vcb, vcb->totalBlocks - 1, 1); + if (vcb->blockSize == 512) { + if (BlockMarkAllocated(vcb, vcb->totalBlocks - 2, 2)) { + hfs_mark_volume_inconsistent(hfsmp); + } + } else { + if (BlockMarkAllocated(vcb, vcb->totalBlocks - 1, 1)) { + hfs_mark_volume_inconsistent(hfsmp); + } + } goto out; } /* @@ -3271,11 +4174,12 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) */ bp = NULL; if (prev_alt_sector) { - if (buf_meta_bread(hfsmp->hfs_devvp, prev_alt_sector, sectorsize, - NOCRED, &bp) == 0) { + if (buf_meta_bread(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(prev_alt_sector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, NOCRED, &bp) == 0) { journal_modify_block_start(hfsmp->jnl, bp); - bzero((char *)buf_dataptr(bp) + HFS_ALT_OFFSET(sectorsize), kMDBSize); + bzero((char *)buf_dataptr(bp) + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size), kMDBSize); journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL); } else if (bp) { @@ -3283,9 +4187,10 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) } } - /* - * TODO: Adjust the size of the metadata zone based on new volume size? + /* + * Update the metadata zone size based on current volume size */ + hfs_metadatazone_init(hfsmp, false); /* * Adjust the size of hfsmp->hfs_attrdata_vp @@ -3307,23 +4212,48 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) } } + /* + * Update the R/B Tree if necessary. Since we don't have to drop the systemfile + * locks in the middle of these operations like we do in the truncate case + * where we have to relocate files, we can only update the red-black tree + * if there were actual changes made to the bitmap. Also, we can't really scan the + * new portion of the bitmap before it has been allocated. The BlockMarkAllocated + * routines are smart enough to avoid the r/b tree if the portion they are manipulating is + * not currently controlled by the tree. + * + * We only update hfsmp->allocLimit if totalBlocks actually increased. + */ + + if (error == 0) { + UpdateAllocLimit(hfsmp, hfsmp->totalBlocks); + } + + /* Log successful extending */ + printf("hfs_extendfs: extended \"%s\" to %d blocks (was %d blocks)\n", + hfsmp->vcbVN, hfsmp->totalBlocks, (u_int32_t)(oldsize/hfsmp->blockSize)); + out: if (error && fp) { /* Restore allocation fork. */ bcopy(&forkdata, &fp->ff_data, sizeof(forkdata)); VTOC(vp)->c_blocks = fp->ff_blocks; - + + } + + HFS_MOUNT_LOCK(hfsmp, TRUE); + hfsmp->hfs_flags &= ~HFS_RESIZE_IN_PROGRESS; + HFS_MOUNT_UNLOCK(hfsmp, TRUE); + if (lockflags) { + hfs_systemfile_unlock(hfsmp, lockflags); + } + if (transaction_begun) { + hfs_end_transaction(hfsmp); + hfs_journal_flush(hfsmp, FALSE); + /* Just to be sure, sync all data to the disk */ + (void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context); } - /* - Regardless of whether or not the totalblocks actually increased, - we should reset the allocLimit field. If it changed, it will - get updated; if not, it will remain the same. - */ - hfsmp->allocLimit = vcb->totalBlocks; - hfs_systemfile_unlock(hfsmp, lockflags); - hfs_end_transaction(hfsmp); - return (error); + return MacToVFSError(error); } #define HFS_MIN_SIZE (32LL * 1024LL * 1024LL) @@ -3331,7 +4261,6 @@ out: /* * Truncate a file system (while still mounted). */ -__private_extern__ int hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) { @@ -3341,7 +4270,9 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) u_int32_t reclaimblks = 0; int lockflags = 0; int transaction_begun = 0; - int error; + Boolean updateFreeBlocks = false; + Boolean disable_sparse = false; + int error = 0; lck_mtx_lock(&hfsmp->hfs_mutex); if (hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS) { @@ -3349,8 +4280,9 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) return (EALREADY); } hfsmp->hfs_flags |= HFS_RESIZE_IN_PROGRESS; - hfsmp->hfs_resize_filesmoved = 0; - hfsmp->hfs_resize_totalfiles = 0; + hfsmp->hfs_resize_blocksmoved = 0; + hfsmp->hfs_resize_totalblocks = 0; + hfsmp->hfs_resize_progress = 0; lck_mtx_unlock(&hfsmp->hfs_mutex); /* @@ -3366,29 +4298,80 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) newblkcnt = newsize / hfsmp->blockSize; reclaimblks = hfsmp->totalBlocks - newblkcnt; + if (hfs_resize_debug) { + printf ("hfs_truncatefs: old: size=%qu, blkcnt=%u, freeblks=%u\n", oldsize, hfsmp->totalBlocks, hfs_freeblks(hfsmp, 1)); + printf ("hfs_truncatefs: new: size=%qu, blkcnt=%u, reclaimblks=%u\n", newsize, newblkcnt, reclaimblks); + } + /* Make sure new size is valid. */ if ((newsize < HFS_MIN_SIZE) || (newsize >= oldsize) || - (newsize % hfsmp->hfs_phys_block_size)) { + (newsize % hfsmp->hfs_logical_block_size) || + (newsize % hfsmp->hfs_physical_block_size)) { + printf ("hfs_truncatefs: invalid size (newsize=%qu, oldsize=%qu)\n", newsize, oldsize); error = EINVAL; goto out; } - /* Make sure there's enough space to work with. */ + + /* + * Make sure that the file system has enough free blocks reclaim. + * + * Before resize, the disk is divided into four zones - + * A. Allocated_Stationary - These are allocated blocks that exist + * before the new end of disk. These blocks will not be + * relocated or modified during resize. + * B. Free_Stationary - These are free blocks that exist before the + * new end of disk. These blocks can be used for any new + * allocations during resize, including allocation for relocating + * data from the area of disk being reclaimed. + * C. Allocated_To-Reclaim - These are allocated blocks that exist + * beyond the new end of disk. These blocks need to be reclaimed + * during resize by allocating equal number of blocks in Free + * Stationary zone and copying the data. + * D. Free_To-Reclaim - These are free blocks that exist beyond the + * new end of disk. Nothing special needs to be done to reclaim + * them. + * + * Total number of blocks on the disk before resize: + * ------------------------------------------------ + * Total Blocks = Allocated_Stationary + Free_Stationary + + * Allocated_To-Reclaim + Free_To-Reclaim + * + * Total number of blocks that need to be reclaimed: + * ------------------------------------------------ + * Blocks to Reclaim = Allocated_To-Reclaim + Free_To-Reclaim + * + * Note that the check below also makes sure that we have enough space + * to relocate data from Allocated_To-Reclaim to Free_Stationary. + * Therefore we do not need to check total number of blocks to relocate + * later in the code. + * + * The condition below gets converted to: + * + * Allocated To-Reclaim + Free To-Reclaim >= Free Stationary + Free To-Reclaim + * + * which is equivalent to: + * + * Allocated To-Reclaim >= Free Stationary + */ if (reclaimblks >= hfs_freeblks(hfsmp, 1)) { - printf("hfs_truncatefs: insufficient space (need %u blocks; have %u blocks)\n", reclaimblks, hfs_freeblks(hfsmp, 1)); + printf("hfs_truncatefs: insufficient space (need %u blocks; have %u free blocks)\n", reclaimblks, hfs_freeblks(hfsmp, 1)); error = ENOSPC; goto out; } /* Start with a clean journal. */ - journal_flush(hfsmp->jnl); + hfs_journal_flush(hfsmp, TRUE); if (hfs_start_transaction(hfsmp) != 0) { error = EINVAL; goto out; } transaction_begun = 1; - + + /* Take the bitmap lock to update the alloc limit field */ + lockflags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + /* * Prevent new allocations from using the part we're trying to truncate. * @@ -3397,18 +4380,71 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) * interfere with allocating the new alternate volume header, and no files * in the allocation blocks beyond (i.e. the blocks we're trying to * truncate away. + * + * Also shrink the red-black tree if needed. + */ + if (hfsmp->blockSize == 512) { + error = UpdateAllocLimit (hfsmp, newblkcnt - 2); + } + else { + error = UpdateAllocLimit (hfsmp, newblkcnt - 1); + } + + /* Sparse devices use first fit allocation which is not ideal + * for volume resize which requires best fit allocation. If a + * sparse device is being truncated, disable the sparse device + * property temporarily for the duration of resize. Also reset + * the free extent cache so that it is rebuilt as sorted by + * totalBlocks instead of startBlock. + * + * Note that this will affect all allocations on the volume and + * ideal fix would be just to modify resize-related allocations, + * but it will result in complexity like handling of two free + * extent caches sorted differently, etc. So we stick to this + * solution for now. + */ + HFS_MOUNT_LOCK(hfsmp, TRUE); + if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) { + hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE; + ResetVCBFreeExtCache(hfsmp); + disable_sparse = true; + } + + /* + * Update the volume free block count to reflect the total number + * of free blocks that will exist after a successful resize. + * Relocation of extents will result in no net change in the total + * free space on the disk. Therefore the code that allocates + * space for new extent and deallocates the old extent explicitly + * prevents updating the volume free block count. It will also + * prevent false disk full error when the number of blocks in + * an extent being relocated is more than the free blocks that + * will exist after the volume is resized. */ - lck_mtx_lock(&hfsmp->hfs_mutex); - if (hfsmp->blockSize == 512) - hfsmp->allocLimit = newblkcnt - 2; - else - hfsmp->allocLimit = newblkcnt - 1; hfsmp->freeBlocks -= reclaimblks; - lck_mtx_unlock(&hfsmp->hfs_mutex); + updateFreeBlocks = true; + HFS_MOUNT_UNLOCK(hfsmp, TRUE); + + if (lockflags) { + hfs_systemfile_unlock(hfsmp, lockflags); + lockflags = 0; + } /* - * Look for files that have blocks at or beyond the location of the - * new alternate volume header. + * Update the metadata zone size to match the new volume size, + * and if it too less, metadata zone might be disabled. + */ + hfs_metadatazone_init(hfsmp, false); + + /* + * If some files have blocks at or beyond the location of the + * new alternate volume header, recalculate free blocks and + * reclaim blocks. Otherwise just update free blocks count. + * + * The current allocLimit is set to the location of new alternate + * volume header, and reclaimblks are the total number of blocks + * that need to be reclaimed. So the check below is really + * ignoring the blocks allocated for old alternate volume header. */ if (hfs_isallocated(hfsmp, hfsmp->allocLimit, reclaimblks)) { /* @@ -3419,8 +4455,9 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) transaction_begun = 0; /* Attempt to reclaim some space. */ - if (hfs_reclaimspace(hfsmp, hfsmp->allocLimit, reclaimblks, context) != 0) { - printf("hfs_truncatefs: couldn't reclaim space on %s\n", hfsmp->vcbVN); + error = hfs_reclaimspace(hfsmp, hfsmp->allocLimit, reclaimblks, context); + if (error != 0) { + printf("hfs_truncatefs: couldn't reclaim space on %s (error=%d)\n", hfsmp->vcbVN, error); error = ENOSPC; goto out; } @@ -3431,28 +4468,20 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) transaction_begun = 1; /* Check if we're clear now. */ - if (hfs_isallocated(hfsmp, hfsmp->allocLimit, reclaimblks)) { - printf("hfs_truncatefs: didn't reclaim enough space on %s\n", hfsmp->vcbVN); + error = hfs_isallocated(hfsmp, hfsmp->allocLimit, reclaimblks); + if (error != 0) { + printf("hfs_truncatefs: didn't reclaim enough space on %s (error=%d)\n", hfsmp->vcbVN, error); error = EAGAIN; /* tell client to try again */ goto out; } - } - + } + /* * Note: we take the attributes lock in case we have an attribute data vnode * which needs to change size. */ lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); - /* - * Mark the old alternate volume header as free. - * We don't bother shrinking allocation bitmap file. - */ - if (hfsmp->blockSize == 512) - (void) BlockMarkFree(hfsmp, hfsmp->totalBlocks - 2, 2); - else - (void) BlockMarkFree(hfsmp, hfsmp->totalBlocks - 1, 1); - /* * Allocate last 1KB for alternate volume header. */ @@ -3462,6 +4491,15 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) goto out; } + /* + * Mark the old alternate volume header as free. + * We don't bother shrinking allocation bitmap file. + */ + if (hfsmp->blockSize == 512) + (void) BlockMarkFree(hfsmp, hfsmp->totalBlocks - 2, 2); + else + (void) BlockMarkFree(hfsmp, hfsmp->totalBlocks - 1, 1); + /* * Invalidate the existing alternate volume header. * @@ -3469,13 +4507,16 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) * since this block will be outside of the truncated file system! */ if (hfsmp->hfs_alt_id_sector) { - if (buf_meta_bread(hfsmp->hfs_devvp, hfsmp->hfs_alt_id_sector, - hfsmp->hfs_phys_block_size, NOCRED, &bp) == 0) { - - bzero((void*)((char *)buf_dataptr(bp) + HFS_ALT_OFFSET(hfsmp->hfs_phys_block_size)), kMDBSize); + error = buf_meta_bread(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_alt_id_sector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, NOCRED, &bp); + if (error == 0) { + bzero((void*)((char *)buf_dataptr(bp) + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size)), kMDBSize); (void) VNOP_BWRITE(bp); - } else if (bp) { - buf_brelse(bp); + } else { + if (bp) { + buf_brelse(bp); + } } bp = NULL; } @@ -3488,17 +4529,13 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) * Adjust file system variables and flush them to disk. */ hfsmp->totalBlocks = newblkcnt; - hfsmp->hfs_phys_block_count = newsize / hfsmp->hfs_phys_block_size; - hfsmp->hfs_alt_id_sector = HFS_ALT_SECTOR(hfsmp->hfs_phys_block_size, hfsmp->hfs_phys_block_count); + hfsmp->hfs_logical_block_count = newsize / hfsmp->hfs_logical_block_size; + hfsmp->hfs_alt_id_sector = HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, hfsmp->hfs_logical_block_count); MarkVCBDirty(hfsmp); error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH); if (error) panic("hfs_truncatefs: unexpected error flushing volume header (%d)\n", error); - - /* - * TODO: Adjust the size of the metadata zone based on new volume size? - */ - + /* * Adjust the size of hfsmp->hfs_attrdata_vp */ @@ -3520,25 +4557,49 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context) } out: - if (error) + /* + * Update the allocLimit to acknowledge the last one or two blocks now. + * Add it to the tree as well if necessary. + */ + UpdateAllocLimit (hfsmp, hfsmp->totalBlocks); + + HFS_MOUNT_LOCK(hfsmp, TRUE); + if (disable_sparse == true) { + /* Now that resize is completed, set the volume to be sparse + * device again so that all further allocations will be first + * fit instead of best fit. Reset free extent cache so that + * it is rebuilt. + */ + hfsmp->hfs_flags |= HFS_HAS_SPARSE_DEVICE; + ResetVCBFreeExtCache(hfsmp); + } + + if (error && (updateFreeBlocks == true)) { hfsmp->freeBlocks += reclaimblks; + } - lck_mtx_lock(&hfsmp->hfs_mutex); - hfsmp->allocLimit = hfsmp->totalBlocks; - if (hfsmp->nextAllocation >= hfsmp->allocLimit) + if (hfsmp->nextAllocation >= hfsmp->allocLimit) { hfsmp->nextAllocation = hfsmp->hfs_metazone_end + 1; + } hfsmp->hfs_flags &= ~HFS_RESIZE_IN_PROGRESS; - lck_mtx_unlock(&hfsmp->hfs_mutex); + HFS_MOUNT_UNLOCK(hfsmp, TRUE); + + /* On error, reset the metadata zone for original volume size */ + if (error && (updateFreeBlocks == true)) { + hfs_metadatazone_init(hfsmp, false); + } if (lockflags) { hfs_systemfile_unlock(hfsmp, lockflags); } if (transaction_begun) { hfs_end_transaction(hfsmp); - journal_flush(hfsmp->jnl); + hfs_journal_flush(hfsmp, FALSE); + /* Just to be sure, sync all data to the disk */ + (void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context); } - return (error); + return MacToVFSError(error); } @@ -3599,7 +4660,10 @@ hfs_copy_extent( size_t ioSize; u_int32_t ioSizeSectors; /* Device sectors in this I/O */ daddr64_t srcSector, destSector; - u_int32_t sectorsPerBlock = hfsmp->blockSize / hfsmp->hfs_phys_block_size; + u_int32_t sectorsPerBlock = hfsmp->blockSize / hfsmp->hfs_logical_block_size; +#if CONFIG_PROTECT + int cpenabled = 0; +#endif /* * Sanity check that we have locked the vnode of the file we're copying. @@ -3612,18 +4676,25 @@ hfs_copy_extent( if (cp != hfsmp->hfs_allocation_cp && cp->c_lockowner != current_thread()) panic("hfs_copy_extent: vp=%p (cp=%p) not owned?\n", vp, cp); - /* - * Wait for any in-progress writes to this vnode to complete, so that we'll - * be copying consistent bits. (Otherwise, it's possible that an async - * write will complete to the old extent after we read from it. That - * could lead to corruption.) - */ - err = vnode_waitforwrites(vp, 0, 0, 0, "hfs_copy_extent"); - if (err) { - printf("hfs_copy_extent: Error %d from vnode_waitforwrites\n", err); - return err; +#if CONFIG_PROTECT + /* Prepare the CP blob and get it ready for use */ + if (!vnode_issystem (vp) && vnode_isreg(vp) && + cp_fs_protected (hfsmp->hfs_mp)) { + int cp_err = 0; + cp_err = cp_handle_relocate (cp); + if (cp_err) { + /* + * can't copy the file because we couldn't set up keys. + * bail out + */ + return cp_err; + } + else { + cpenabled = 1; + } } - +#endif + /* * Determine the I/O size to use * @@ -3641,11 +4712,11 @@ hfs_copy_extent( buf_setdataptr(bp, (uintptr_t)buffer); resid = (off_t) blockCount * (off_t) hfsmp->blockSize; - srcSector = (daddr64_t) oldStart * hfsmp->blockSize / hfsmp->hfs_phys_block_size; - destSector = (daddr64_t) newStart * hfsmp->blockSize / hfsmp->hfs_phys_block_size; + srcSector = (daddr64_t) oldStart * hfsmp->blockSize / hfsmp->hfs_logical_block_size; + destSector = (daddr64_t) newStart * hfsmp->blockSize / hfsmp->hfs_logical_block_size; while (resid > 0) { - ioSize = MIN(bufferSize, resid); - ioSizeSectors = ioSize / hfsmp->hfs_phys_block_size; + ioSize = MIN(bufferSize, (size_t) resid); + ioSizeSectors = ioSize / hfsmp->hfs_logical_block_size; /* Prepare the buffer for reading */ buf_reset(bp, B_READ); @@ -3653,7 +4724,14 @@ hfs_copy_extent( buf_setcount(bp, ioSize); buf_setblkno(bp, srcSector); buf_setlblkno(bp, srcSector); - + + /* Attach the CP to the buffer */ +#if CONFIG_PROTECT + if (cpenabled) { + buf_setcpaddr (bp, cp->c_cpentry); + } +#endif + /* Do the read */ err = VNOP_STRATEGY(bp); if (!err) @@ -3669,8 +4747,15 @@ hfs_copy_extent( buf_setcount(bp, ioSize); buf_setblkno(bp, destSector); buf_setlblkno(bp, destSector); - if (journal_uses_fua(hfsmp->jnl)) + if (vnode_issystem(vp) && journal_uses_fua(hfsmp->jnl)) buf_markfua(bp); + +#if CONFIG_PROTECT + /* Attach the CP to the buffer */ + if (cpenabled) { + buf_setcpaddr (bp, cp->c_cpentry); + } +#endif /* Do the write */ vnode_startwrite(hfsmp->hfs_devvp); @@ -3692,7 +4777,7 @@ hfs_copy_extent( kmem_free(kernel_map, (vm_offset_t)buffer, bufferSize); /* Make sure all writes have been flushed to disk. */ - if (!journal_uses_fua(hfsmp->jnl)) { + if (vnode_issystem(vp) && !journal_uses_fua(hfsmp->jnl)) { err = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context); if (err) { printf("hfs_copy_extent: DKIOCSYNCHRONIZECACHE failed (%d)\n", err); @@ -3707,258 +4792,1128 @@ hfs_copy_extent( } -/* - * Reclaim space at the end of a volume, used by a given system file. +/* Structure to store state of reclaiming extents from a + * given file. hfs_reclaim_file()/hfs_reclaim_xattr() + * initializes the values in this structure which are then + * used by code that reclaims and splits the extents. + */ +struct hfs_reclaim_extent_info { + struct vnode *vp; + u_int32_t fileID; + u_int8_t forkType; + u_int8_t is_dirlink; /* Extent belongs to directory hard link */ + u_int8_t is_sysfile; /* Extent belongs to system file */ + u_int8_t is_xattr; /* Extent belongs to extent-based xattr */ + u_int8_t extent_index; + int lockflags; /* Locks that reclaim and split code should grab before modifying the extent record */ + u_int32_t blocks_relocated; /* Total blocks relocated for this file till now */ + u_int32_t recStartBlock; /* File allocation block number (FABN) for current extent record */ + u_int32_t cur_blockCount; /* Number of allocation blocks that have been checked for reclaim */ + struct filefork *catalog_fp; /* If non-NULL, extent is from catalog record */ + union record { + HFSPlusExtentRecord overflow;/* Extent record from overflow extents btree */ + HFSPlusAttrRecord xattr; /* Attribute record for large EAs */ + } record; + HFSPlusExtentDescriptor *extents; /* Pointer to current extent record being processed. + * For catalog extent record, points to the correct + * extent information in filefork. For overflow extent + * record, or xattr record, points to extent record + * in the structure above + */ + struct cat_desc *dirlink_desc; + struct cat_attr *dirlink_attr; + struct filefork *dirlink_fork; /* For directory hard links, fp points actually to this */ + struct BTreeIterator *iterator; /* Shared read/write iterator, hfs_reclaim_file/xattr() + * use it for reading and hfs_reclaim_extent()/hfs_split_extent() + * use it for writing updated extent record + */ + struct FSBufferDescriptor btdata; /* Shared btdata for reading/writing extent record, same as iterator above */ + u_int16_t recordlen; + int overflow_count; /* For debugging, counter for overflow extent record */ + FCB *fcb; /* Pointer to the current btree being traversed */ +}; + +/* + * Split the current extent into two extents, with first extent + * to contain given number of allocation blocks. Splitting of + * extent creates one new extent entry which can result in + * shifting of many entries through all the extent records of a + * file, and/or creating a new extent record in the overflow + * extent btree. * - * This routine attempts to move any extent which contains allocation blocks - * at or after "startblk." A separate transaction is used to do the move. - * The contents of any moved extents are read and written via the volume's - * device vnode -- NOT via "vp." During the move, moved blocks which are part - * of a transaction have their physical block numbers invalidated so they will - * eventually be written to their new locations. + * Example: + * The diagram below represents two consecutive extent records, + * for simplicity, lets call them record X and X+1 respectively. + * Interesting extent entries have been denoted by letters. + * If the letter is unchanged before and after split, it means + * that the extent entry was not modified during the split. + * A '.' means that the entry remains unchanged after the split + * and is not relevant for our example. A '0' means that the + * extent entry is empty. * - * This routine can be used to move overflow extents for the allocation file. + * If there isn't sufficient contiguous free space to relocate + * an extent (extent "C" below), we will have to break the one + * extent into multiple smaller extents, and relocate each of + * the smaller extents individually. The way we do this is by + * finding the largest contiguous free space that is currently + * available (N allocation blocks), and then convert extent "C" + * into two extents, C1 and C2, that occupy exactly the same + * allocation blocks as extent C. Extent C1 is the first + * N allocation blocks of extent C, and extent C2 is the remainder + * of extent C. Then we can relocate extent C1 since we know + * we have enough contiguous free space to relocate it in its + * entirety. We then repeat the process starting with extent C2. * - * Inputs: - * hfsmp The volume being resized. - * startblk Blocks >= this allocation block need to be moved. - * locks Which locks need to be taken for the given system file. - * vp The vnode for the system file. + * In record X, only the entries following entry C are shifted, and + * the original entry C is replaced with two entries C1 and C2 which + * are actually two extent entries for contiguous allocation blocks. + * + * Note that the entry E from record X is shifted into record X+1 as + * the new first entry. Since the first entry of record X+1 is updated, + * the FABN will also get updated with the blockCount of entry E. + * This also results in shifting of all extent entries in record X+1. + * Note that the number of empty entries after the split has been + * changed from 3 to 2. + * + * Before: + * record X record X+1 + * ---------------------===--------- --------------------------------- + * | A | . | . | . | B | C | D | E | | F | . | . | . | G | 0 | 0 | 0 | + * ---------------------===--------- --------------------------------- + * + * After: + * ---------------------=======----- --------------------------------- + * | A | . | . | . | B | C1| C2| D | | E | F | . | . | . | G | 0 | 0 | + * ---------------------=======----- --------------------------------- * - * Outputs: - * moved Set to true if any extents were moved. + * C1.startBlock = C.startBlock + * C1.blockCount = N + * + * C2.startBlock = C.startBlock + N + * C2.blockCount = C.blockCount - N + * + * FABN = old FABN - E.blockCount + * + * Inputs: + * extent_info - This is the structure that contains state about + * the current file, extent, and extent record that + * is being relocated. This structure is shared + * among code that traverses through all the extents + * of the file, code that relocates extents, and + * code that splits the extent. + * Output: + * Zero on success, non-zero on failure. */ -static int -hfs_relocate_callback(__unused HFSPlusExtentKey *key, HFSPlusExtentRecord *record, HFSPlusExtentRecord *state) +static int +hfs_split_extent(struct hfs_reclaim_extent_info *extent_info, uint32_t newBlockCount) { - bcopy(state, record, sizeof(HFSPlusExtentRecord)); - return 0; -} -static int -hfs_reclaim_sys_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int locks, Boolean *moved, vfs_context_t context) -{ - int error; - int lockflags; + int error = 0; + int index = extent_info->extent_index; int i; - u_long datablks; - u_long block; - u_int32_t oldStartBlock; - u_int32_t newStartBlock; - u_int32_t blockCount; - struct filefork *fp; + HFSPlusExtentDescriptor shift_extent; /* Extent entry that should be shifted into next extent record */ + HFSPlusExtentDescriptor last_extent; + HFSPlusExtentDescriptor *extents; /* Pointer to current extent record being manipulated */ + HFSPlusExtentRecord *extents_rec = NULL; + HFSPlusExtentKey *extents_key = NULL; + HFSPlusAttrRecord *xattr_rec = NULL; + HFSPlusAttrKey *xattr_key = NULL; + struct BTreeIterator iterator; + struct FSBufferDescriptor btdata; + uint16_t reclen; + uint32_t read_recStartBlock; /* Starting allocation block number to read old extent record */ + uint32_t write_recStartBlock; /* Starting allocation block number to insert newly updated extent record */ + Boolean create_record = false; + Boolean is_xattr; + struct cnode *cp; + + is_xattr = extent_info->is_xattr; + extents = extent_info->extents; + cp = VTOC(extent_info->vp); - /* If there is no vnode for this file, then there's nothing to do. */ - if (vp == NULL) - return 0; + if (hfs_resize_debug) { + printf ("hfs_split_extent: Split record:%u recStartBlock=%u %u:(%u,%u) for %u blocks\n", extent_info->overflow_count, extent_info->recStartBlock, index, extents[index].startBlock, extents[index].blockCount, newBlockCount); + } - /* printf("hfs_reclaim_sys_file: %.*s\n", VTOC(vp)->c_desc.cd_namelen, VTOC(vp)->c_desc.cd_nameptr); */ - - /* We always need the allocation bitmap and extents B-tree */ - locks |= SFL_BITMAP | SFL_EXTENTS; - - error = hfs_start_transaction(hfsmp); - if (error) { - printf("hfs_reclaim_sys_file: hfs_start_transaction returned %d\n", error); - return error; + /* Extents overflow btree can not have more than 8 extents. + * No split allowed if the 8th extent is already used. + */ + if ((extent_info->fileID == kHFSExtentsFileID) && (extents[kHFSPlusExtentDensity - 1].blockCount != 0)) { + printf ("hfs_split_extent: Maximum 8 extents allowed for extents overflow btree, cannot split further.\n"); + error = ENOSPC; + goto out; } - lockflags = hfs_systemfile_lock(hfsmp, locks, HFS_EXCLUSIVE_LOCK); - fp = VTOF(vp); - datablks = 0; - /* Relocate non-overflow extents */ - for (i = 0; i < kHFSPlusExtentDensity; ++i) { - if (fp->ff_extents[i].blockCount == 0) + /* Determine the starting allocation block number for the following + * overflow extent record, if any, before the current record + * gets modified. + */ + read_recStartBlock = extent_info->recStartBlock; + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (extents[i].blockCount == 0) { break; - oldStartBlock = fp->ff_extents[i].startBlock; - blockCount = fp->ff_extents[i].blockCount; - datablks += blockCount; - block = oldStartBlock + blockCount; - if (block > startblk) { - error = BlockAllocate(hfsmp, 1, blockCount, blockCount, true, true, &newStartBlock, &blockCount); - if (error) { - printf("hfs_reclaim_sys_file: BlockAllocate returned %d\n", error); - goto fail; - } - if (blockCount != fp->ff_extents[i].blockCount) { - printf("hfs_reclaim_sys_file: new blockCount=%u, original blockCount=%u", blockCount, fp->ff_extents[i].blockCount); - goto free_fail; - } - error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, blockCount, context); - if (error) { - printf("hfs_reclaim_sys_file: hfs_copy_extent returned %d\n", error); - goto free_fail; - } - fp->ff_extents[i].startBlock = newStartBlock; - VTOC(vp)->c_flag |= C_MODIFIED; - *moved = true; - error = BlockDeallocate(hfsmp, oldStartBlock, blockCount); - if (error) { - /* TODO: Mark volume inconsistent? */ - printf("hfs_reclaim_sys_file: BlockDeallocate returned %d\n", error); - goto fail; - } - error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH); - if (error) { - /* TODO: Mark volume inconsistent? */ - printf("hfs_reclaim_sys_file: hfs_flushvolumeheader returned %d\n", error); - goto fail; + } + read_recStartBlock += extents[i].blockCount; + } + + /* Shift and split */ + if (index == kHFSPlusExtentDensity-1) { + /* The new extent created after split will go into following overflow extent record */ + shift_extent.startBlock = extents[index].startBlock + newBlockCount; + shift_extent.blockCount = extents[index].blockCount - newBlockCount; + + /* Last extent in the record will be split, so nothing to shift */ + } else { + /* Splitting of extents can result in at most of one + * extent entry to be shifted into following overflow extent + * record. So, store the last extent entry for later. + */ + shift_extent = extents[kHFSPlusExtentDensity-1]; + if ((hfs_resize_debug) && (shift_extent.blockCount != 0)) { + printf ("hfs_split_extent: Save 7:(%u,%u) to shift into overflow record\n", shift_extent.startBlock, shift_extent.blockCount); + } + + /* Start shifting extent information from the end of the extent + * record to the index where we want to insert the new extent. + * Note that kHFSPlusExtentDensity-1 is already saved above, and + * does not need to be shifted. The extent entry that is being + * split does not get shifted. + */ + for (i = kHFSPlusExtentDensity-2; i > index; i--) { + if (hfs_resize_debug) { + if (extents[i].blockCount) { + printf ("hfs_split_extent: Shift %u:(%u,%u) to %u:(%u,%u)\n", i, extents[i].startBlock, extents[i].blockCount, i+1, extents[i].startBlock, extents[i].blockCount); + } } + extents[i+1] = extents[i]; } } - /* Relocate overflow extents (if any) */ - if (i == kHFSPlusExtentDensity && fp->ff_blocks > datablks) { - struct BTreeIterator *iterator = NULL; - struct FSBufferDescriptor btdata; - HFSPlusExtentRecord record; - HFSPlusExtentKey *key; - FCB *fcb; - u_int32_t fileID; - u_int8_t forktype; + if (index == kHFSPlusExtentDensity-1) { + /* The second half of the extent being split will be the overflow + * entry that will go into following overflow extent record. The + * value has been stored in 'shift_extent' above, so there is + * nothing to be done here. + */ + } else { + /* Update the values in the second half of the extent being split + * before updating the first half of the split. Note that the + * extent to split or first half of the split is at index 'index' + * and a new extent or second half of the split will be inserted at + * 'index+1' or into following overflow extent record. + */ + extents[index+1].startBlock = extents[index].startBlock + newBlockCount; + extents[index+1].blockCount = extents[index].blockCount - newBlockCount; + } + /* Update the extent being split, only the block count will change */ + extents[index].blockCount = newBlockCount; + + if (hfs_resize_debug) { + printf ("hfs_split_extent: Split %u:(%u,%u) and ", index, extents[index].startBlock, extents[index].blockCount); + if (index != kHFSPlusExtentDensity-1) { + printf ("%u:(%u,%u)\n", index+1, extents[index+1].startBlock, extents[index+1].blockCount); + } else { + printf ("overflow:(%u,%u)\n", shift_extent.startBlock, shift_extent.blockCount); + } + } - forktype = VNODE_IS_RSRC(vp) ? 0xFF : 0; - fileID = VTOC(vp)->c_cnid; - if (kmem_alloc(kernel_map, (vm_offset_t*) &iterator, sizeof(*iterator))) { - printf("hfs_reclaim_sys_file: kmem_alloc failed!\n"); - error = ENOMEM; - goto fail; + /* Write out information about the newly split extent to the disk */ + if (extent_info->catalog_fp) { + /* (extent_info->catalog_fp != NULL) means the newly split + * extent exists in the catalog record. This means that + * the cnode was updated. Therefore, to write out the changes, + * mark the cnode as modified. We cannot call hfs_update() + * in this function because the caller hfs_reclaim_extent() + * is holding the catalog lock currently. + */ + cp->c_flag |= C_MODIFIED; + } else { + /* The newly split extent is for large EAs or is in overflow + * extent record, so update it directly in the btree using the + * iterator information from the shared extent_info structure + */ + error = BTReplaceRecord(extent_info->fcb, extent_info->iterator, + &(extent_info->btdata), extent_info->recordlen); + if (error) { + printf ("hfs_split_extent: fileID=%u BTReplaceRecord returned error=%d\n", extent_info->fileID, error); + goto out; + } + } + + /* No extent entry to be shifted into another extent overflow record */ + if (shift_extent.blockCount == 0) { + if (hfs_resize_debug) { + printf ("hfs_split_extent: No extent entry to be shifted into overflow records\n"); } + error = 0; + goto out; + } - bzero(iterator, sizeof(*iterator)); - key = (HFSPlusExtentKey *) &iterator->key; - key->keyLength = kHFSPlusExtentKeyMaximumLength; - key->forkType = forktype; - key->fileID = fileID; - key->startBlock = datablks; + /* The overflow extent entry has to be shifted into an extent + * overflow record. This means that we might have to shift + * extent entries from all subsequent overflow records by one. + * We start iteration from the first record to the last record, + * and shift the extent entry from one record to another. + * We might have to create a new extent record for the last + * extent entry for the file. + */ - btdata.bufferAddress = &record; - btdata.itemSize = sizeof(record); + /* Initialize iterator to search the next record */ + bzero(&iterator, sizeof(iterator)); + if (is_xattr) { + /* Copy the key from the iterator that was used to update the modified attribute record. */ + xattr_key = (HFSPlusAttrKey *)&(iterator.key); + bcopy((HFSPlusAttrKey *)&(extent_info->iterator->key), xattr_key, sizeof(HFSPlusAttrKey)); + /* Note: xattr_key->startBlock will be initialized later in the iteration loop */ + + MALLOC(xattr_rec, HFSPlusAttrRecord *, + sizeof(HFSPlusAttrRecord), M_TEMP, M_WAITOK); + if (xattr_rec == NULL) { + error = ENOMEM; + goto out; + } + btdata.bufferAddress = xattr_rec; + btdata.itemSize = sizeof(HFSPlusAttrRecord); + btdata.itemCount = 1; + extents = xattr_rec->overflowExtents.extents; + } else { + /* Initialize the extent key for the current file */ + extents_key = (HFSPlusExtentKey *) &(iterator.key); + extents_key->keyLength = kHFSPlusExtentKeyMaximumLength; + extents_key->forkType = extent_info->forkType; + extents_key->fileID = extent_info->fileID; + /* Note: extents_key->startBlock will be initialized later in the iteration loop */ + + MALLOC(extents_rec, HFSPlusExtentRecord *, + sizeof(HFSPlusExtentRecord), M_TEMP, M_WAITOK); + if (extents_rec == NULL) { + error = ENOMEM; + goto out; + } + btdata.bufferAddress = extents_rec; + btdata.itemSize = sizeof(HFSPlusExtentRecord); btdata.itemCount = 1; + extents = extents_rec[0]; + } + + /* The overflow extent entry has to be shifted into an extent + * overflow record. This means that we might have to shift + * extent entries from all subsequent overflow records by one. + * We start iteration from the first record to the last record, + * examine one extent record in each iteration and shift one + * extent entry from one record to another. We might have to + * create a new extent record for the last extent entry for the + * file. + * + * If shift_extent.blockCount is non-zero, it means that there is + * an extent entry that needs to be shifted into the next + * overflow extent record. We keep on going till there are no such + * entries left to be shifted. This will also change the starting + * allocation block number of the extent record which is part of + * the key for the extent record in each iteration. Note that + * because the extent record key is changing while we are searching, + * the record can not be updated directly, instead it has to be + * deleted and inserted again. + */ + while (shift_extent.blockCount) { + if (hfs_resize_debug) { + printf ("hfs_split_extent: Will shift (%u,%u) into overflow record with startBlock=%u\n", shift_extent.startBlock, shift_extent.blockCount, read_recStartBlock); + } + + /* Search if there is any existing overflow extent record + * that matches the current file and the logical start block + * number. + * + * For this, the logical start block number in the key is + * the value calculated based on the logical start block + * number of the current extent record and the total number + * of blocks existing in the current extent record. + */ + if (is_xattr) { + xattr_key->startBlock = read_recStartBlock; + } else { + extents_key->startBlock = read_recStartBlock; + } + error = BTSearchRecord(extent_info->fcb, &iterator, &btdata, &reclen, &iterator); + if (error) { + if (error != btNotFound) { + printf ("hfs_split_extent: fileID=%u startBlock=%u BTSearchRecord error=%d\n", extent_info->fileID, read_recStartBlock, error); + goto out; + } + /* No matching record was found, so create a new extent record. + * Note: Since no record was found, we can't rely on the + * btree key in the iterator any longer. This will be initialized + * later before we insert the record. + */ + create_record = true; + } - fcb = VTOF(hfsmp->hfs_extents_vp); + /* The extra extent entry from the previous record is being inserted + * as the first entry in the current extent record. This will change + * the file allocation block number (FABN) of the current extent + * record, which is the startBlock value from the extent record key. + * Since one extra entry is being inserted in the record, the new + * FABN for the record will less than old FABN by the number of blocks + * in the new extent entry being inserted at the start. We have to + * do this before we update read_recStartBlock to point at the + * startBlock of the following record. + */ + write_recStartBlock = read_recStartBlock - shift_extent.blockCount; + if (hfs_resize_debug) { + if (create_record) { + printf ("hfs_split_extent: No records found for startBlock=%u, will create new with startBlock=%u\n", read_recStartBlock, write_recStartBlock); + } + } - error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator); - while (error == 0) { - /* Stop when we encounter a different file or fork. */ - if ((key->fileID != fileID) || - (key->forkType != forktype)) { + /* Now update the read_recStartBlock to account for total number + * of blocks in this extent record. It will now point to the + * starting allocation block number for the next extent record. + */ + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (extents[i].blockCount == 0) { break; } - /* - * Check if the file overlaps target space. + read_recStartBlock += extents[i].blockCount; + } + + if (create_record == true) { + /* Initialize new record content with only one extent entry */ + bzero(extents, sizeof(HFSPlusExtentRecord)); + /* The new record will contain only one extent entry */ + extents[0] = shift_extent; + /* There are no more overflow extents to be shifted */ + shift_extent.startBlock = shift_extent.blockCount = 0; + + if (is_xattr) { + /* BTSearchRecord above returned btNotFound, + * but since the attribute btree is never empty + * if we are trying to insert new overflow + * record for the xattrs, the extents_key will + * contain correct data. So we don't need to + * re-initialize it again like below. + */ + + /* Initialize the new xattr record */ + xattr_rec->recordType = kHFSPlusAttrExtents; + xattr_rec->overflowExtents.reserved = 0; + reclen = sizeof(HFSPlusAttrExtents); + } else { + /* BTSearchRecord above returned btNotFound, + * which means that extents_key content might + * not correspond to the record that we are + * trying to create, especially when the extents + * overflow btree is empty. So we reinitialize + * the extents_key again always. + */ + extents_key->keyLength = kHFSPlusExtentKeyMaximumLength; + extents_key->forkType = extent_info->forkType; + extents_key->fileID = extent_info->fileID; + + /* Initialize the new extent record */ + reclen = sizeof(HFSPlusExtentRecord); + } + } else { + /* The overflow extent entry from previous record will be + * the first entry in this extent record. If the last + * extent entry in this record is valid, it will be shifted + * into the following extent record as its first entry. So + * save the last entry before shifting entries in current + * record. */ - for (i = 0; i < kHFSPlusExtentDensity; ++i) { - if (record[i].blockCount == 0) { - goto overflow_done; - } - oldStartBlock = record[i].startBlock; - blockCount = record[i].blockCount; - block = oldStartBlock + blockCount; - if (block > startblk) { - error = BlockAllocate(hfsmp, 1, blockCount, blockCount, true, true, &newStartBlock, &blockCount); - if (error) { - printf("hfs_reclaim_sys_file: BlockAllocate returned %d\n", error); - goto overflow_done; - } - if (blockCount != record[i].blockCount) { - printf("hfs_reclaim_sys_file: new blockCount=%u, original blockCount=%u", blockCount, fp->ff_extents[i].blockCount); - kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator)); - goto free_fail; - } - error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, blockCount, context); - if (error) { - printf("hfs_reclaim_sys_file: hfs_copy_extent returned %d\n", error); - kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator)); - goto free_fail; - } - record[i].startBlock = newStartBlock; - VTOC(vp)->c_flag |= C_MODIFIED; - *moved = true; - /* - * NOTE: To support relocating overflow extents of the - * allocation file, we must update the BTree record BEFORE - * deallocating the old extent so that BlockDeallocate will - * use the extent's new location to calculate physical block - * numbers. (This is for the case where the old extent's - * bitmap bits actually reside in the extent being moved.) - */ - error = BTUpdateRecord(fcb, iterator, (IterateCallBackProcPtr) hfs_relocate_callback, &record); - if (error) { - /* TODO: Mark volume inconsistent? */ - printf("hfs_reclaim_sys_file: BTUpdateRecord returned %d\n", error); - goto overflow_done; - } - error = BlockDeallocate(hfsmp, oldStartBlock, blockCount); - if (error) { - /* TODO: Mark volume inconsistent? */ - printf("hfs_reclaim_sys_file: BlockDeallocate returned %d\n", error); - goto overflow_done; - } - } + last_extent = extents[kHFSPlusExtentDensity-1]; + + /* Shift all entries by one index towards the end */ + for (i = kHFSPlusExtentDensity-2; i >= 0; i--) { + extents[i+1] = extents[i]; } - /* Look for more records. */ - error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL); - if (error == btNotFound) { - error = 0; - break; + + /* Overflow extent entry saved from previous record + * is now the first entry in the current record. + */ + extents[0] = shift_extent; + + if (hfs_resize_debug) { + printf ("hfs_split_extent: Shift overflow=(%u,%u) to record with updated startBlock=%u\n", shift_extent.startBlock, shift_extent.blockCount, write_recStartBlock); + } + + /* The last entry from current record will be the + * overflow entry which will be the first entry for + * the following extent record. + */ + shift_extent = last_extent; + + /* Since the key->startBlock is being changed for this record, + * it should be deleted and inserted with the new key. + */ + error = BTDeleteRecord(extent_info->fcb, &iterator); + if (error) { + printf ("hfs_split_extent: fileID=%u startBlock=%u BTDeleteRecord error=%d\n", extent_info->fileID, read_recStartBlock, error); + goto out; + } + if (hfs_resize_debug) { + printf ("hfs_split_extent: Deleted record with startBlock=%u\n", (is_xattr ? xattr_key->startBlock : extents_key->startBlock)); } } -overflow_done: - kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator)); + + /* Insert the newly created or modified extent record */ + bzero(&iterator.hint, sizeof(iterator.hint)); + if (is_xattr) { + xattr_key->startBlock = write_recStartBlock; + } else { + extents_key->startBlock = write_recStartBlock; + } + error = BTInsertRecord(extent_info->fcb, &iterator, &btdata, reclen); if (error) { - goto fail; + printf ("hfs_split_extent: fileID=%u, startBlock=%u BTInsertRecord error=%d\n", extent_info->fileID, write_recStartBlock, error); + goto out; + } + if (hfs_resize_debug) { + printf ("hfs_split_extent: Inserted extent record with startBlock=%u\n", write_recStartBlock); } } - - hfs_systemfile_unlock(hfsmp, lockflags); - error = hfs_end_transaction(hfsmp); - if (error) { - printf("hfs_reclaim_sys_file: hfs_end_transaction returned %d\n", error); + BTFlushPath(extent_info->fcb); +out: + if (extents_rec) { + FREE (extents_rec, M_TEMP); + } + if (xattr_rec) { + FREE (xattr_rec, M_TEMP); } - - return error; - -free_fail: - (void) BlockDeallocate(hfsmp, newStartBlock, blockCount); -fail: - (void) hfs_systemfile_unlock(hfsmp, lockflags); - (void) hfs_end_transaction(hfsmp); return error; } -/* - * This journal_relocate callback updates the journal info block to point - * at the new journal location. This write must NOT be done using the - * transaction. We must write the block immediately. We must also force - * it to get to the media so that the new journal location will be seen by - * the replay code before we can safely let journaled blocks be written - * to their normal locations. +/* + * Relocate an extent if it lies beyond the expected end of volume. * - * The tests for journal_uses_fua below are mildly hacky. Since the journal - * and the file system are both on the same device, I'm leveraging what - * the journal has decided about FUA. + * This function is called for every extent of the file being relocated. + * It allocates space for relocation, copies the data, deallocates + * the old extent, and update corresponding on-disk extent. If the function + * does not find contiguous space to relocate an extent, it splits the + * extent in smaller size to be able to relocate it out of the area of + * disk being reclaimed. As an optimization, if an extent lies partially + * in the area of the disk being reclaimed, it is split so that we only + * have to relocate the area that was overlapping with the area of disk + * being reclaimed. + * + * Note that every extent is relocated in its own transaction so that + * they do not overwhelm the journal. This function handles the extent + * record that exists in the catalog record, extent record from overflow + * extents btree, and extents for large EAs. + * + * Inputs: + * extent_info - This is the structure that contains state about + * the current file, extent, and extent record that + * is being relocated. This structure is shared + * among code that traverses through all the extents + * of the file, code that relocates extents, and + * code that splits the extent. */ -struct hfs_journal_relocate_args { - struct hfsmount *hfsmp; - vfs_context_t context; +static int +hfs_reclaim_extent(struct hfsmount *hfsmp, const u_long allocLimit, struct hfs_reclaim_extent_info *extent_info, vfs_context_t context) +{ + int error = 0; + int index; + struct cnode *cp; + u_int32_t oldStartBlock; + u_int32_t oldBlockCount; u_int32_t newStartBlock; -}; + u_int32_t newBlockCount; + u_int32_t roundedBlockCount; + uint16_t node_size; + uint32_t remainder_blocks; + u_int32_t alloc_flags; + int blocks_allocated = false; -static errno_t -hfs_journal_relocate_callback(void *_args) -{ - int error; - struct hfs_journal_relocate_args *args = _args; + index = extent_info->extent_index; + cp = VTOC(extent_info->vp); + + oldStartBlock = extent_info->extents[index].startBlock; + oldBlockCount = extent_info->extents[index].blockCount; + + if (0 && hfs_resize_debug) { + printf ("hfs_reclaim_extent: Examine record:%u recStartBlock=%u, %u:(%u,%u)\n", extent_info->overflow_count, extent_info->recStartBlock, index, oldStartBlock, oldBlockCount); + } + + /* If the current extent lies completely within allocLimit, + * it does not require any relocation. + */ + if ((oldStartBlock + oldBlockCount) <= allocLimit) { + extent_info->cur_blockCount += oldBlockCount; + return error; + } + + /* Every extent should be relocated in its own transaction + * to make sure that we don't overflow the journal buffer. + */ + error = hfs_start_transaction(hfsmp); + if (error) { + return error; + } + extent_info->lockflags = hfs_systemfile_lock(hfsmp, extent_info->lockflags, HFS_EXCLUSIVE_LOCK); + + /* Check if the extent lies partially in the area to reclaim, + * i.e. it starts before allocLimit and ends beyond allocLimit. + * We have already skipped extents that lie completely within + * allocLimit in the check above, so we only check for the + * startBlock. If it lies partially, split it so that we + * only relocate part of the extent. + */ + if (oldStartBlock < allocLimit) { + newBlockCount = allocLimit - oldStartBlock; + + /* If the extent belongs to a btree, check and trim + * it to be multiple of the node size. + */ + if (extent_info->is_sysfile) { + node_size = get_btree_nodesize(extent_info->vp); + /* If the btree node size is less than the block size, + * splitting this extent will not split a node across + * different extents. So we only check and trim if + * node size is more than the allocation block size. + */ + if (node_size > hfsmp->blockSize) { + remainder_blocks = newBlockCount % (node_size / hfsmp->blockSize); + if (remainder_blocks) { + newBlockCount -= remainder_blocks; + if (hfs_resize_debug) { + printf ("hfs_reclaim_extent: Fixing extent block count, node_blks=%u, old=%u, new=%u\n", node_size/hfsmp->blockSize, newBlockCount + remainder_blocks, newBlockCount); + } + } + } + } + + if (hfs_resize_debug) { + int idx = extent_info->extent_index; + printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks\n", idx, extent_info->extents[idx].startBlock, extent_info->extents[idx].blockCount, newBlockCount); + } + + /* Split the extents into two parts --- the first extent lies + * completely within allocLimit and therefore does not require + * relocation. The second extent will require relocation which + * will be handled when the caller calls this function again + * for the next extent. + */ + error = hfs_split_extent(extent_info, newBlockCount); + if (error == 0) { + /* Split success, no relocation required */ + goto out; + } + /* Split failed, so try to relocate entire extent */ + if (hfs_resize_debug) { + printf ("hfs_reclaim_extent: Split straddling extent failed, reclocate full extent\n"); + } + } + + /* At this point, the current extent requires relocation. + * We will try to allocate space equal to the size of the extent + * being relocated first to try to relocate it without splitting. + * If the allocation fails, we will try to allocate contiguous + * blocks out of metadata zone. If that allocation also fails, + * then we will take a whatever contiguous block run is returned + * by the allocation, split the extent into two parts, and then + * relocate the first splitted extent. + */ + alloc_flags = HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS; + if (extent_info->is_sysfile) { + alloc_flags |= HFS_ALLOC_METAZONE; + } + + error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, alloc_flags, + &newStartBlock, &newBlockCount); + if ((extent_info->is_sysfile == false) && + ((error == dskFulErr) || (error == ENOSPC))) { + /* For non-system files, try reallocating space in metadata zone */ + alloc_flags |= HFS_ALLOC_METAZONE; + error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, + alloc_flags, &newStartBlock, &newBlockCount); + } + if ((error == dskFulErr) || (error == ENOSPC)) { + /* We did not find desired contiguous space for this extent. + * So try to allocate the maximum contiguous space available. + */ + alloc_flags &= ~HFS_ALLOC_FORCECONTIG; + + error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, + alloc_flags, &newStartBlock, &newBlockCount); + if (error) { + printf ("hfs_reclaim_extent: fileID=%u start=%u, %u:(%u,%u) BlockAllocate error=%d\n", extent_info->fileID, extent_info->recStartBlock, index, oldStartBlock, oldBlockCount, error); + goto out; + } + blocks_allocated = true; + + /* The number of blocks allocated is less than the requested + * number of blocks. For btree extents, check and trim the + * extent to be multiple of the node size. + */ + if (extent_info->is_sysfile) { + node_size = get_btree_nodesize(extent_info->vp); + if (node_size > hfsmp->blockSize) { + remainder_blocks = newBlockCount % (node_size / hfsmp->blockSize); + if (remainder_blocks) { + roundedBlockCount = newBlockCount - remainder_blocks; + /* Free tail-end blocks of the newly allocated extent */ + BlockDeallocate(hfsmp, newStartBlock + roundedBlockCount, + newBlockCount - roundedBlockCount, + HFS_ALLOC_SKIPFREEBLKS); + newBlockCount = roundedBlockCount; + if (hfs_resize_debug) { + printf ("hfs_reclaim_extent: Fixing extent block count, node_blks=%u, old=%u, new=%u\n", node_size/hfsmp->blockSize, newBlockCount + remainder_blocks, newBlockCount); + } + if (newBlockCount == 0) { + printf ("hfs_reclaim_extent: Not enough contiguous blocks available to relocate fileID=%d\n", extent_info->fileID); + error = ENOSPC; + goto out; + } + } + } + } + + /* The number of blocks allocated is less than the number of + * blocks requested, so split this extent --- the first extent + * will be relocated as part of this function call and the caller + * will handle relocating the second extent by calling this + * function again for the second extent. + */ + error = hfs_split_extent(extent_info, newBlockCount); + if (error) { + printf ("hfs_reclaim_extent: fileID=%u start=%u, %u:(%u,%u) split error=%d\n", extent_info->fileID, extent_info->recStartBlock, index, oldStartBlock, oldBlockCount, error); + goto out; + } + oldBlockCount = newBlockCount; + } + if (error) { + printf ("hfs_reclaim_extent: fileID=%u start=%u, %u:(%u,%u) contig BlockAllocate error=%d\n", extent_info->fileID, extent_info->recStartBlock, index, oldStartBlock, oldBlockCount, error); + goto out; + } + blocks_allocated = true; + + /* Copy data from old location to new location */ + error = hfs_copy_extent(hfsmp, extent_info->vp, oldStartBlock, + newStartBlock, newBlockCount, context); + if (error) { + printf ("hfs_reclaim_extent: fileID=%u start=%u, %u:(%u,%u)=>(%u,%u) hfs_copy_extent error=%d\n", extent_info->fileID, extent_info->recStartBlock, index, oldStartBlock, oldBlockCount, newStartBlock, newBlockCount, error); + goto out; + } + + /* Update the extent record with the new start block information */ + extent_info->extents[index].startBlock = newStartBlock; + + /* Sync the content back to the disk */ + if (extent_info->catalog_fp) { + /* Update the extents in catalog record */ + if (extent_info->is_dirlink) { + error = cat_update_dirlink(hfsmp, extent_info->forkType, + extent_info->dirlink_desc, extent_info->dirlink_attr, + &(extent_info->dirlink_fork->ff_data)); + } else { + cp->c_flag |= C_MODIFIED; + /* If this is a system file, sync volume headers on disk */ + if (extent_info->is_sysfile) { + error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH); + } + } + } else { + /* Replace record for extents overflow or extents-based xattrs */ + error = BTReplaceRecord(extent_info->fcb, extent_info->iterator, + &(extent_info->btdata), extent_info->recordlen); + } + if (error) { + printf ("hfs_reclaim_extent: fileID=%u, update record error=%u\n", extent_info->fileID, error); + goto out; + } + + /* Deallocate the old extent */ + error = BlockDeallocate(hfsmp, oldStartBlock, oldBlockCount, HFS_ALLOC_SKIPFREEBLKS); + if (error) { + printf ("hfs_reclaim_extent: fileID=%u start=%u, %u:(%u,%u) BlockDeallocate error=%d\n", extent_info->fileID, extent_info->recStartBlock, index, oldStartBlock, oldBlockCount, error); + goto out; + } + extent_info->blocks_relocated += newBlockCount; + + if (hfs_resize_debug) { + printf ("hfs_reclaim_extent: Relocated record:%u %u:(%u,%u) to (%u,%u)\n", extent_info->overflow_count, index, oldStartBlock, oldBlockCount, newStartBlock, newBlockCount); + } + +out: + if (error != 0) { + if (blocks_allocated == true) { + BlockDeallocate(hfsmp, newStartBlock, newBlockCount, HFS_ALLOC_SKIPFREEBLKS); + } + } else { + /* On success, increment the total allocation blocks processed */ + extent_info->cur_blockCount += newBlockCount; + } + + hfs_systemfile_unlock(hfsmp, extent_info->lockflags); + + /* For a non-system file, if an extent entry from catalog record + * was modified, sync the in-memory changes to the catalog record + * on disk before ending the transaction. + */ + if ((extent_info->catalog_fp) && + (extent_info->is_sysfile == false)) { + (void) hfs_update(extent_info->vp, MNT_WAIT); + } + + hfs_end_transaction(hfsmp); + + return error; +} + +/* Report intermediate progress during volume resize */ +static void +hfs_truncatefs_progress(struct hfsmount *hfsmp) +{ + u_int32_t cur_progress; + + hfs_resize_progress(hfsmp, &cur_progress); + if (cur_progress > (hfsmp->hfs_resize_progress + 9)) { + printf("hfs_truncatefs: %d%% done...\n", cur_progress); + hfsmp->hfs_resize_progress = cur_progress; + } + return; +} + +/* + * Reclaim space at the end of a volume for given file and forktype. + * + * This routine attempts to move any extent which contains allocation blocks + * at or after "allocLimit." A separate transaction is used for every extent + * that needs to be moved. If there is not contiguous space available for + * moving an extent, it can be split into smaller extents. The contents of + * any moved extents are read and written via the volume's device vnode -- + * NOT via "vp." During the move, moved blocks which are part of a transaction + * have their physical block numbers invalidated so they will eventually be + * written to their new locations. + * + * This function is also called for directory hard links. Directory hard links + * are regular files with no data fork and resource fork that contains alias + * information for backward compatibility with pre-Leopard systems. However + * non-Mac OS X implementation can add/modify data fork or resource fork + * information to directory hard links, so we check, and if required, relocate + * both data fork and resource fork. + * + * Inputs: + * hfsmp The volume being resized. + * vp The vnode for the system file. + * fileID ID of the catalog record that needs to be relocated + * forktype The type of fork that needs relocated, + * kHFSResourceForkType for resource fork, + * kHFSDataForkType for data fork + * allocLimit Allocation limit for the new volume size, + * do not use this block or beyond. All extents + * that use this block or any blocks beyond this limit + * will be relocated. + * + * Side Effects: + * hfsmp->hfs_resize_blocksmoved is incremented by the number of allocation + * blocks that were relocated. + */ +static int +hfs_reclaim_file(struct hfsmount *hfsmp, struct vnode *vp, u_int32_t fileID, + u_int8_t forktype, u_long allocLimit, vfs_context_t context) +{ + int error = 0; + struct hfs_reclaim_extent_info *extent_info; + int i; + int lockflags = 0; + struct cnode *cp; + struct filefork *fp; + int took_truncate_lock = false; + int release_desc = false; + HFSPlusExtentKey *key; + + /* If there is no vnode for this file, then there's nothing to do. */ + if (vp == NULL) { + return 0; + } + + cp = VTOC(vp); + + MALLOC(extent_info, struct hfs_reclaim_extent_info *, + sizeof(struct hfs_reclaim_extent_info), M_TEMP, M_WAITOK); + if (extent_info == NULL) { + return ENOMEM; + } + bzero(extent_info, sizeof(struct hfs_reclaim_extent_info)); + extent_info->vp = vp; + extent_info->fileID = fileID; + extent_info->forkType = forktype; + extent_info->is_sysfile = vnode_issystem(vp); + if (vnode_isdir(vp) && (cp->c_flag & C_HARDLINK)) { + extent_info->is_dirlink = true; + } + /* We always need allocation bitmap and extent btree lock */ + lockflags = SFL_BITMAP | SFL_EXTENTS; + if ((fileID == kHFSCatalogFileID) || (extent_info->is_dirlink == true)) { + lockflags |= SFL_CATALOG; + } else if (fileID == kHFSAttributesFileID) { + lockflags |= SFL_ATTRIBUTE; + } else if (fileID == kHFSStartupFileID) { + lockflags |= SFL_STARTUP; + } + extent_info->lockflags = lockflags; + extent_info->fcb = VTOF(hfsmp->hfs_extents_vp); + + /* Flush data associated with current file on disk. + * + * If the current vnode is directory hard link, no flushing of + * journal or vnode is required. The current kernel does not + * modify data/resource fork of directory hard links, so nothing + * will be in the cache. If a directory hard link is newly created, + * the resource fork data is written directly using devvp and + * the code that actually relocates data (hfs_copy_extent()) also + * uses devvp for its I/O --- so they will see a consistent copy. + */ + if (extent_info->is_sysfile) { + /* If the current vnode is system vnode, flush journal + * to make sure that all data is written to the disk. + */ + error = hfs_journal_flush(hfsmp, TRUE); + if (error) { + printf ("hfs_reclaim_file: journal_flush returned %d\n", error); + goto out; + } + } else if (extent_info->is_dirlink == false) { + /* Flush all blocks associated with this regular file vnode. + * Normally there should not be buffer cache blocks for regular + * files, but for objects like symlinks, we can have buffer cache + * blocks associated with the vnode. Therefore we call + * buf_flushdirtyblks() also. + */ + buf_flushdirtyblks(vp, 0, BUF_SKIP_LOCKED, "hfs_reclaim_file"); + + hfs_unlock(cp); + hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK); + took_truncate_lock = true; + (void) cluster_push(vp, 0); + error = hfs_lock(cp, HFS_FORCE_LOCK); + if (error) { + goto out; + } + + /* If the file no longer exists, nothing left to do */ + if (cp->c_flag & C_NOEXISTS) { + error = 0; + goto out; + } + + /* Wait for any in-progress writes to this vnode to complete, so that we'll + * be copying consistent bits. (Otherwise, it's possible that an async + * write will complete to the old extent after we read from it. That + * could lead to corruption.) + */ + error = vnode_waitforwrites(vp, 0, 0, 0, "hfs_reclaim_file"); + if (error) { + goto out; + } + } + + if (hfs_resize_debug) { + printf("hfs_reclaim_file: === Start reclaiming %sfork for %sid=%u ===\n", (forktype ? "rsrc" : "data"), (extent_info->is_dirlink ? "dirlink" : "file"), fileID); + } + + if (extent_info->is_dirlink) { + MALLOC(extent_info->dirlink_desc, struct cat_desc *, + sizeof(struct cat_desc), M_TEMP, M_WAITOK); + MALLOC(extent_info->dirlink_attr, struct cat_attr *, + sizeof(struct cat_attr), M_TEMP, M_WAITOK); + MALLOC(extent_info->dirlink_fork, struct filefork *, + sizeof(struct filefork), M_TEMP, M_WAITOK); + if ((extent_info->dirlink_desc == NULL) || + (extent_info->dirlink_attr == NULL) || + (extent_info->dirlink_fork == NULL)) { + error = ENOMEM; + goto out; + } + + /* Lookup catalog record for directory hard link and + * create a fake filefork for the value looked up from + * the disk. + */ + fp = extent_info->dirlink_fork; + bzero(extent_info->dirlink_fork, sizeof(struct filefork)); + extent_info->dirlink_fork->ff_cp = cp; + lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK); + error = cat_lookup_dirlink(hfsmp, fileID, forktype, + extent_info->dirlink_desc, extent_info->dirlink_attr, + &(extent_info->dirlink_fork->ff_data)); + hfs_systemfile_unlock(hfsmp, lockflags); + if (error) { + printf ("hfs_reclaim_file: cat_lookup_dirlink for fileID=%u returned error=%u\n", fileID, error); + goto out; + } + release_desc = true; + } else { + fp = VTOF(vp); + } + + extent_info->catalog_fp = fp; + extent_info->recStartBlock = 0; + extent_info->extents = extent_info->catalog_fp->ff_extents; + /* Relocate extents from the catalog record */ + for (i = 0; i < kHFSPlusExtentDensity; ++i) { + if (fp->ff_extents[i].blockCount == 0) { + break; + } + extent_info->extent_index = i; + error = hfs_reclaim_extent(hfsmp, allocLimit, extent_info, context); + if (error) { + printf ("hfs_reclaim_file: fileID=%u #%d %u:(%u,%u) hfs_reclaim_extent error=%d\n", fileID, extent_info->overflow_count, i, fp->ff_extents[i].startBlock, fp->ff_extents[i].blockCount, error); + goto out; + } + } + + /* If the number of allocation blocks processed for reclaiming + * are less than total number of blocks for the file, continuing + * working on overflow extents record. + */ + if (fp->ff_blocks <= extent_info->cur_blockCount) { + if (0 && hfs_resize_debug) { + printf ("hfs_reclaim_file: Nothing more to relocate, offset=%d, ff_blocks=%u, cur_blockCount=%u\n", i, fp->ff_blocks, extent_info->cur_blockCount); + } + goto out; + } + + if (hfs_resize_debug) { + printf ("hfs_reclaim_file: Will check overflow records, offset=%d, ff_blocks=%u, cur_blockCount=%u\n", i, fp->ff_blocks, extent_info->cur_blockCount); + } + + MALLOC(extent_info->iterator, struct BTreeIterator *, sizeof(struct BTreeIterator), M_TEMP, M_WAITOK); + if (extent_info->iterator == NULL) { + error = ENOMEM; + goto out; + } + bzero(extent_info->iterator, sizeof(struct BTreeIterator)); + key = (HFSPlusExtentKey *) &(extent_info->iterator->key); + key->keyLength = kHFSPlusExtentKeyMaximumLength; + key->forkType = forktype; + key->fileID = fileID; + key->startBlock = extent_info->cur_blockCount; + + extent_info->btdata.bufferAddress = extent_info->record.overflow; + extent_info->btdata.itemSize = sizeof(HFSPlusExtentRecord); + extent_info->btdata.itemCount = 1; + + extent_info->catalog_fp = NULL; + + /* Search the first overflow extent with expected startBlock as 'cur_blockCount' */ + lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK); + error = BTSearchRecord(extent_info->fcb, extent_info->iterator, + &(extent_info->btdata), &(extent_info->recordlen), + extent_info->iterator); + hfs_systemfile_unlock(hfsmp, lockflags); + while (error == 0) { + extent_info->overflow_count++; + extent_info->recStartBlock = key->startBlock; + extent_info->extents = extent_info->record.overflow; + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (extent_info->record.overflow[i].blockCount == 0) { + goto out; + } + extent_info->extent_index = i; + error = hfs_reclaim_extent(hfsmp, allocLimit, extent_info, context); + if (error) { + printf ("hfs_reclaim_file: fileID=%u #%d %u:(%u,%u) hfs_reclaim_extent error=%d\n", fileID, extent_info->overflow_count, i, extent_info->record.overflow[i].startBlock, extent_info->record.overflow[i].blockCount, error); + goto out; + } + } + + /* Look for more overflow records */ + lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK); + error = BTIterateRecord(extent_info->fcb, kBTreeNextRecord, + extent_info->iterator, &(extent_info->btdata), + &(extent_info->recordlen)); + hfs_systemfile_unlock(hfsmp, lockflags); + if (error) { + break; + } + /* Stop when we encounter a different file or fork. */ + if ((key->fileID != fileID) || (key->forkType != forktype)) { + break; + } + } + if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) { + error = 0; + } + +out: + /* If any blocks were relocated, account them and report progress */ + if (extent_info->blocks_relocated) { + hfsmp->hfs_resize_blocksmoved += extent_info->blocks_relocated; + hfs_truncatefs_progress(hfsmp); + if (fileID < kHFSFirstUserCatalogNodeID) { + printf ("hfs_reclaim_file: Relocated %u blocks from fileID=%u on \"%s\"\n", + extent_info->blocks_relocated, fileID, hfsmp->vcbVN); + } + } + if (extent_info->iterator) { + FREE(extent_info->iterator, M_TEMP); + } + if (release_desc == true) { + cat_releasedesc(extent_info->dirlink_desc); + } + if (extent_info->dirlink_desc) { + FREE(extent_info->dirlink_desc, M_TEMP); + } + if (extent_info->dirlink_attr) { + FREE(extent_info->dirlink_attr, M_TEMP); + } + if (extent_info->dirlink_fork) { + FREE(extent_info->dirlink_fork, M_TEMP); + } + if ((extent_info->blocks_relocated != 0) && (extent_info->is_sysfile == false)) { + (void) hfs_update(vp, MNT_WAIT); + } + if (took_truncate_lock) { + hfs_unlock_truncate(cp, 0); + } + if (extent_info) { + FREE(extent_info, M_TEMP); + } + if (hfs_resize_debug) { + printf("hfs_reclaim_file: === Finished relocating %sfork for fileid=%u (error=%d) ===\n", (forktype ? "rsrc" : "data"), fileID, error); + } + + return error; +} + + +/* + * This journal_relocate callback updates the journal info block to point + * at the new journal location. This write must NOT be done using the + * transaction. We must write the block immediately. We must also force + * it to get to the media so that the new journal location will be seen by + * the replay code before we can safely let journaled blocks be written + * to their normal locations. + * + * The tests for journal_uses_fua below are mildly hacky. Since the journal + * and the file system are both on the same device, I'm leveraging what + * the journal has decided about FUA. + */ +struct hfs_journal_relocate_args { + struct hfsmount *hfsmp; + vfs_context_t context; + u_int32_t newStartBlock; +}; + +static errno_t +hfs_journal_relocate_callback(void *_args) +{ + int error; + struct hfs_journal_relocate_args *args = _args; struct hfsmount *hfsmp = args->hfsmp; buf_t bp; JournalInfoBlock *jibp; error = buf_meta_bread(hfsmp->hfs_devvp, - hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_phys_block_size), + hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size), hfsmp->blockSize, vfs_context_ucred(args->context), &bp); if (error) { printf("hfs_reclaim_journal_file: failed to read JIB (%d)\n", error); + if (bp) { + buf_brelse(bp); + } return error; } jibp = (JournalInfoBlock*) buf_dataptr(bp); @@ -3984,10 +5939,12 @@ hfs_journal_relocate_callback(void *_args) static int -hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context) +hfs_reclaim_journal_file(struct hfsmount *hfsmp, u_int32_t allocLimit, vfs_context_t context) { int error; + int journal_err; int lockflags; + u_int32_t oldStartBlock; u_int32_t newStartBlock; u_int32_t oldBlockCount; u_int32_t newBlockCount; @@ -3996,6 +5953,11 @@ hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context) struct cat_fork journal_fork; struct hfs_journal_relocate_args callback_args; + if (hfsmp->jnl_start + (hfsmp->jnl_size / hfsmp->blockSize) <= allocLimit) { + /* The journal does not require relocation */ + return 0; + } + error = hfs_start_transaction(hfsmp); if (error) { printf("hfs_reclaim_journal_file: hfs_start_transaction returned %d\n", error); @@ -4006,7 +5968,9 @@ hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context) oldBlockCount = hfsmp->jnl_size / hfsmp->blockSize; /* TODO: Allow the journal to change size based on the new volume size. */ - error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, true, true, &newStartBlock, &newBlockCount); + error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, + HFS_ALLOC_METAZONE | HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS, + &newStartBlock, &newBlockCount); if (error) { printf("hfs_reclaim_journal_file: BlockAllocate returned %d\n", error); goto fail; @@ -4016,7 +5980,7 @@ hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context) goto free_fail; } - error = BlockDeallocate(hfsmp, hfsmp->jnl_start, oldBlockCount); + error = BlockDeallocate(hfsmp, hfsmp->jnl_start, oldBlockCount, HFS_ALLOC_SKIPFREEBLKS); if (error) { printf("hfs_reclaim_journal_file: BlockDeallocate returned %d\n", error); goto free_fail; @@ -4028,6 +5992,7 @@ hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context) printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error); goto free_fail; } + oldStartBlock = journal_fork.cf_extents[0].startBlock; journal_fork.cf_size = newBlockCount * hfsmp->blockSize; journal_fork.cf_extents[0].startBlock = newStartBlock; journal_fork.cf_extents[0].blockCount = newBlockCount; @@ -4059,13 +6024,30 @@ hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context) printf("hfs_reclaim_journal_file: hfs_end_transaction returned %d\n", error); } + /* Account for the blocks relocated and print progress */ + hfsmp->hfs_resize_blocksmoved += oldBlockCount; + hfs_truncatefs_progress(hfsmp); + if (!error) { + printf ("hfs_reclaim_journal_file: Relocated %u blocks from journal on \"%s\"\n", + oldBlockCount, hfsmp->vcbVN); + if (hfs_resize_debug) { + printf ("hfs_reclaim_journal_file: Successfully relocated journal from (%u,%u) to (%u,%u)\n", oldStartBlock, oldBlockCount, newStartBlock, newBlockCount); + } + } return error; free_fail: - (void) BlockDeallocate(hfsmp, newStartBlock, newBlockCount); + journal_err = BlockDeallocate(hfsmp, newStartBlock, newBlockCount, HFS_ALLOC_SKIPFREEBLKS); + if (journal_err) { + printf("hfs_reclaim_journal_file: BlockDeallocate returned %d\n", error); + hfs_mark_volume_inconsistent(hfsmp); + } fail: hfs_systemfile_unlock(hfsmp, lockflags); (void) hfs_end_transaction(hfsmp); + if (hfs_resize_debug) { + printf ("hfs_reclaim_journal_file: Error relocating journal file (error=%d)\n", error); + } return error; } @@ -4076,16 +6058,23 @@ fail: * the field in the volume header and the catalog record. */ static int -hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context) +hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, u_int32_t allocLimit, vfs_context_t context) { int error; + int journal_err; int lockflags; + u_int32_t oldBlock; u_int32_t newBlock; u_int32_t blockCount; struct cat_desc jib_desc; struct cat_attr jib_attr; struct cat_fork jib_fork; buf_t old_bp, new_bp; + + if (hfsmp->vcbJinfoBlock <= allocLimit) { + /* The journal info block does not require relocation */ + return 0; + } error = hfs_start_transaction(hfsmp); if (error) { @@ -4094,7 +6083,9 @@ hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context) } lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); - error = BlockAllocate(hfsmp, 1, 1, 1, true, true, &newBlock, &blockCount); + error = BlockAllocate(hfsmp, 1, 1, 1, + HFS_ALLOC_METAZONE | HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS, + &newBlock, &blockCount); if (error) { printf("hfs_reclaim_journal_info_block: BlockAllocate returned %d\n", error); goto fail; @@ -4103,7 +6094,7 @@ hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context) printf("hfs_reclaim_journal_info_block: blockCount != 1 (%u)\n", blockCount); goto free_fail; } - error = BlockDeallocate(hfsmp, hfsmp->vcbJinfoBlock, 1); + error = BlockDeallocate(hfsmp, hfsmp->vcbJinfoBlock, 1, HFS_ALLOC_SKIPFREEBLKS); if (error) { printf("hfs_reclaim_journal_info_block: BlockDeallocate returned %d\n", error); goto free_fail; @@ -4111,14 +6102,17 @@ hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context) /* Copy the old journal info block content to the new location */ error = buf_meta_bread(hfsmp->hfs_devvp, - hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_phys_block_size), + hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size), hfsmp->blockSize, vfs_context_ucred(context), &old_bp); if (error) { printf("hfs_reclaim_journal_info_block: failed to read JIB (%d)\n", error); + if (old_bp) { + buf_brelse(old_bp); + } goto free_fail; } new_bp = buf_getblk(hfsmp->hfs_devvp, - newBlock * (hfsmp->blockSize/hfsmp->hfs_phys_block_size), + newBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size), hfsmp->blockSize, 0, 0, BLK_META); bcopy((char*)buf_dataptr(old_bp), (char*)buf_dataptr(new_bp), hfsmp->blockSize); buf_brelse(old_bp); @@ -4136,103 +6130,586 @@ hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context) /* Don't fail the operation. */ } } - - /* Update the catalog record for .journal_info_block */ - error = cat_idlookup(hfsmp, hfsmp->hfs_jnlinfoblkid, 1, &jib_desc, &jib_attr, &jib_fork); - if (error) { - printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error); - goto fail; - } - jib_fork.cf_size = hfsmp->blockSize; - jib_fork.cf_extents[0].startBlock = newBlock; - jib_fork.cf_extents[0].blockCount = 1; - jib_fork.cf_blocks = 1; - error = cat_update(hfsmp, &jib_desc, &jib_attr, &jib_fork, NULL); - cat_releasedesc(&jib_desc); /* all done with cat descriptor */ - if (error) { - printf("hfs_reclaim_journal_info_block: cat_update returned %d\n", error); - goto fail; - } - - /* Update the pointer to the journal info block in the volume header. */ - hfsmp->vcbJinfoBlock = newBlock; - error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH); - if (error) { - printf("hfs_reclaim_journal_info_block: hfs_flushvolumeheader returned %d\n", error); - goto fail; - } - hfs_systemfile_unlock(hfsmp, lockflags); - error = hfs_end_transaction(hfsmp); - if (error) { - printf("hfs_reclaim_journal_info_block: hfs_end_transaction returned %d\n", error); - } - error = journal_flush(hfsmp->jnl); - if (error) { - printf("hfs_reclaim_journal_info_block: journal_flush returned %d\n", error); + + /* Update the catalog record for .journal_info_block */ + error = cat_idlookup(hfsmp, hfsmp->hfs_jnlinfoblkid, 1, &jib_desc, &jib_attr, &jib_fork); + if (error) { + printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error); + goto fail; + } + oldBlock = jib_fork.cf_extents[0].startBlock; + jib_fork.cf_size = hfsmp->blockSize; + jib_fork.cf_extents[0].startBlock = newBlock; + jib_fork.cf_extents[0].blockCount = 1; + jib_fork.cf_blocks = 1; + error = cat_update(hfsmp, &jib_desc, &jib_attr, &jib_fork, NULL); + cat_releasedesc(&jib_desc); /* all done with cat descriptor */ + if (error) { + printf("hfs_reclaim_journal_info_block: cat_update returned %d\n", error); + goto fail; + } + + /* Update the pointer to the journal info block in the volume header. */ + hfsmp->vcbJinfoBlock = newBlock; + error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH); + if (error) { + printf("hfs_reclaim_journal_info_block: hfs_flushvolumeheader returned %d\n", error); + goto fail; + } + hfs_systemfile_unlock(hfsmp, lockflags); + error = hfs_end_transaction(hfsmp); + if (error) { + printf("hfs_reclaim_journal_info_block: hfs_end_transaction returned %d\n", error); + } + error = hfs_journal_flush(hfsmp, FALSE); + if (error) { + printf("hfs_reclaim_journal_info_block: journal_flush returned %d\n", error); + } + + /* Account for the block relocated and print progress */ + hfsmp->hfs_resize_blocksmoved += 1; + hfs_truncatefs_progress(hfsmp); + if (!error) { + printf ("hfs_reclaim_journal_info: Relocated 1 block from journal info on \"%s\"\n", + hfsmp->vcbVN); + if (hfs_resize_debug) { + printf ("hfs_reclaim_journal_info_block: Successfully relocated journal info block from (%u,%u) to (%u,%u)\n", oldBlock, blockCount, newBlock, blockCount); + } + } + return error; + +free_fail: + journal_err = BlockDeallocate(hfsmp, newBlock, blockCount, HFS_ALLOC_SKIPFREEBLKS); + if (journal_err) { + printf("hfs_reclaim_journal_info_block: BlockDeallocate returned %d\n", error); + hfs_mark_volume_inconsistent(hfsmp); + } + +fail: + hfs_systemfile_unlock(hfsmp, lockflags); + (void) hfs_end_transaction(hfsmp); + if (hfs_resize_debug) { + printf ("hfs_reclaim_journal_info_block: Error relocating journal info block (error=%d)\n", error); + } + return error; +} + + +/* + * This function traverses through all extended attribute records for a given + * fileID, and calls function that reclaims data blocks that exist in the + * area of the disk being reclaimed which in turn is responsible for allocating + * new space, copying extent data, deallocating new space, and if required, + * splitting the extent. + * + * Note: The caller has already acquired the cnode lock on the file. Therefore + * we are assured that no other thread would be creating/deleting/modifying + * extended attributes for this file. + * + * Side Effects: + * hfsmp->hfs_resize_blocksmoved is incremented by the number of allocation + * blocks that were relocated. + * + * Returns: + * 0 on success, non-zero on failure. + */ +static int +hfs_reclaim_xattr(struct hfsmount *hfsmp, struct vnode *vp, u_int32_t fileID, u_int32_t allocLimit, vfs_context_t context) +{ + int error = 0; + struct hfs_reclaim_extent_info *extent_info; + int i; + HFSPlusAttrKey *key; + int *lockflags; + + if (hfs_resize_debug) { + printf("hfs_reclaim_xattr: === Start reclaiming xattr for id=%u ===\n", fileID); + } + + MALLOC(extent_info, struct hfs_reclaim_extent_info *, + sizeof(struct hfs_reclaim_extent_info), M_TEMP, M_WAITOK); + if (extent_info == NULL) { + return ENOMEM; + } + bzero(extent_info, sizeof(struct hfs_reclaim_extent_info)); + extent_info->vp = vp; + extent_info->fileID = fileID; + extent_info->is_xattr = true; + extent_info->is_sysfile = vnode_issystem(vp); + extent_info->fcb = VTOF(hfsmp->hfs_attribute_vp); + lockflags = &(extent_info->lockflags); + *lockflags = SFL_ATTRIBUTE | SFL_BITMAP; + + /* Initialize iterator from the extent_info structure */ + MALLOC(extent_info->iterator, struct BTreeIterator *, + sizeof(struct BTreeIterator), M_TEMP, M_WAITOK); + if (extent_info->iterator == NULL) { + error = ENOMEM; + goto out; + } + bzero(extent_info->iterator, sizeof(struct BTreeIterator)); + + /* Build attribute key */ + key = (HFSPlusAttrKey *)&(extent_info->iterator->key); + error = hfs_buildattrkey(fileID, NULL, key); + if (error) { + goto out; + } + + /* Initialize btdata from extent_info structure. Note that the + * buffer pointer actually points to the xattr record from the + * extent_info structure itself. + */ + extent_info->btdata.bufferAddress = &(extent_info->record.xattr); + extent_info->btdata.itemSize = sizeof(HFSPlusAttrRecord); + extent_info->btdata.itemCount = 1; + + /* + * Sync all extent-based attribute data to the disk. + * + * All extent-based attribute data I/O is performed via cluster + * I/O using a virtual file that spans across entire file system + * space. + */ + hfs_lock_truncate(VTOC(hfsmp->hfs_attrdata_vp), HFS_EXCLUSIVE_LOCK); + (void)cluster_push(hfsmp->hfs_attrdata_vp, 0); + error = vnode_waitforwrites(hfsmp->hfs_attrdata_vp, 0, 0, 0, "hfs_reclaim_xattr"); + hfs_unlock_truncate(VTOC(hfsmp->hfs_attrdata_vp), 0); + if (error) { + goto out; + } + + /* Search for extended attribute for current file. This + * will place the iterator before the first matching record. + */ + *lockflags = hfs_systemfile_lock(hfsmp, *lockflags, HFS_EXCLUSIVE_LOCK); + error = BTSearchRecord(extent_info->fcb, extent_info->iterator, + &(extent_info->btdata), &(extent_info->recordlen), + extent_info->iterator); + hfs_systemfile_unlock(hfsmp, *lockflags); + if (error) { + if (error != btNotFound) { + goto out; + } + /* btNotFound is expected here, so just mask it */ + error = 0; + } + + while (1) { + /* Iterate to the next record */ + *lockflags = hfs_systemfile_lock(hfsmp, *lockflags, HFS_EXCLUSIVE_LOCK); + error = BTIterateRecord(extent_info->fcb, kBTreeNextRecord, + extent_info->iterator, &(extent_info->btdata), + &(extent_info->recordlen)); + hfs_systemfile_unlock(hfsmp, *lockflags); + + /* Stop the iteration if we encounter end of btree or xattr with different fileID */ + if (error || key->fileID != fileID) { + if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) { + error = 0; + } + break; + } + + /* We only care about extent-based EAs */ + if ((extent_info->record.xattr.recordType != kHFSPlusAttrForkData) && + (extent_info->record.xattr.recordType != kHFSPlusAttrExtents)) { + continue; + } + + if (extent_info->record.xattr.recordType == kHFSPlusAttrForkData) { + extent_info->overflow_count = 0; + extent_info->extents = extent_info->record.xattr.forkData.theFork.extents; + } else if (extent_info->record.xattr.recordType == kHFSPlusAttrExtents) { + extent_info->overflow_count++; + extent_info->extents = extent_info->record.xattr.overflowExtents.extents; + } + + extent_info->recStartBlock = key->startBlock; + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (extent_info->extents[i].blockCount == 0) { + break; + } + extent_info->extent_index = i; + error = hfs_reclaim_extent(hfsmp, allocLimit, extent_info, context); + if (error) { + printf ("hfs_reclaim_xattr: fileID=%u hfs_reclaim_extent error=%d\n", fileID, error); + goto out; + } + } + } + +out: + /* If any blocks were relocated, account them and report progress */ + if (extent_info->blocks_relocated) { + hfsmp->hfs_resize_blocksmoved += extent_info->blocks_relocated; + hfs_truncatefs_progress(hfsmp); + } + if (extent_info->iterator) { + FREE(extent_info->iterator, M_TEMP); + } + if (extent_info) { + FREE(extent_info, M_TEMP); + } + if (hfs_resize_debug) { + printf("hfs_reclaim_xattr: === Finished relocating xattr for fileid=%u (error=%d) ===\n", fileID, error); + } + return error; +} + +/* + * Reclaim any extent-based extended attributes allocation blocks from + * the area of the disk that is being truncated. + * + * The function traverses the attribute btree to find out the fileIDs + * of the extended attributes that need to be relocated. For every + * file whose large EA requires relocation, it looks up the cnode and + * calls hfs_reclaim_xattr() to do all the work for allocating + * new space, copying data, deallocating old space, and if required, + * splitting the extents. + * + * Inputs: + * allocLimit - starting block of the area being reclaimed + * + * Returns: + * returns 0 on success, non-zero on failure. + */ +static int +hfs_reclaim_xattrspace(struct hfsmount *hfsmp, u_int32_t allocLimit, vfs_context_t context) +{ + int error = 0; + FCB *fcb; + struct BTreeIterator *iterator = NULL; + struct FSBufferDescriptor btdata; + HFSPlusAttrKey *key; + HFSPlusAttrRecord rec; + int lockflags = 0; + cnid_t prev_fileid = 0; + struct vnode *vp; + int need_relocate; + int btree_operation; + u_int32_t files_moved = 0; + u_int32_t prev_blocksmoved; + int i; + + fcb = VTOF(hfsmp->hfs_attribute_vp); + /* Store the value to print total blocks moved by this function in end */ + prev_blocksmoved = hfsmp->hfs_resize_blocksmoved; + + if (kmem_alloc(kernel_map, (vm_offset_t *)&iterator, sizeof(*iterator))) { + return ENOMEM; + } + bzero(iterator, sizeof(*iterator)); + key = (HFSPlusAttrKey *)&iterator->key; + btdata.bufferAddress = &rec; + btdata.itemSize = sizeof(rec); + btdata.itemCount = 1; + + need_relocate = false; + btree_operation = kBTreeFirstRecord; + /* Traverse the attribute btree to find extent-based EAs to reclaim */ + while (1) { + lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE, HFS_SHARED_LOCK); + error = BTIterateRecord(fcb, btree_operation, iterator, &btdata, NULL); + hfs_systemfile_unlock(hfsmp, lockflags); + if (error) { + if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) { + error = 0; + } + break; + } + btree_operation = kBTreeNextRecord; + + /* If the extents of current fileID were already relocated, skip it */ + if (prev_fileid == key->fileID) { + continue; + } + + /* Check if any of the extents in the current record need to be relocated */ + need_relocate = false; + switch(rec.recordType) { + case kHFSPlusAttrForkData: + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (rec.forkData.theFork.extents[i].blockCount == 0) { + break; + } + if ((rec.forkData.theFork.extents[i].startBlock + + rec.forkData.theFork.extents[i].blockCount) > allocLimit) { + need_relocate = true; + break; + } + } + break; + + case kHFSPlusAttrExtents: + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (rec.overflowExtents.extents[i].blockCount == 0) { + break; + } + if ((rec.overflowExtents.extents[i].startBlock + + rec.overflowExtents.extents[i].blockCount) > allocLimit) { + need_relocate = true; + break; + } + } + break; + }; + + /* Continue iterating to next attribute record */ + if (need_relocate == false) { + continue; + } + + /* Look up the vnode for corresponding file. The cnode + * will be locked which will ensure that no one modifies + * the xattrs when we are relocating them. + * + * We want to allow open-unlinked files to be moved, + * so provide allow_deleted == 1 for hfs_vget(). + */ + if (hfs_vget(hfsmp, key->fileID, &vp, 0, 1) != 0) { + continue; + } + + error = hfs_reclaim_xattr(hfsmp, vp, key->fileID, allocLimit, context); + hfs_unlock(VTOC(vp)); + vnode_put(vp); + if (error) { + printf ("hfs_reclaim_xattrspace: Error relocating xattrs for fileid=%u (error=%d)\n", key->fileID, error); + break; + } + prev_fileid = key->fileID; + files_moved++; + } + + if (files_moved) { + printf("hfs_reclaim_xattrspace: Relocated %u xattr blocks from %u files on \"%s\"\n", + (hfsmp->hfs_resize_blocksmoved - prev_blocksmoved), + files_moved, hfsmp->vcbVN); + } + + kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator)); + return error; +} + +/* + * Reclaim blocks from regular files. + * + * This function iterates over all the record in catalog btree looking + * for files with extents that overlap into the space we're trying to + * free up. If a file extent requires relocation, it looks up the vnode + * and calls function to relocate the data. + * + * Returns: + * Zero on success, non-zero on failure. + */ +static int +hfs_reclaim_filespace(struct hfsmount *hfsmp, u_int32_t allocLimit, vfs_context_t context) +{ + int error; + FCB *fcb; + struct BTreeIterator *iterator = NULL; + struct FSBufferDescriptor btdata; + int btree_operation; + int lockflags; + struct HFSPlusCatalogFile filerec; + struct vnode *vp; + struct vnode *rvp; + struct filefork *datafork; + u_int32_t files_moved = 0; + u_int32_t prev_blocksmoved; + + fcb = VTOF(hfsmp->hfs_catalog_vp); + /* Store the value to print total blocks moved by this function at the end */ + prev_blocksmoved = hfsmp->hfs_resize_blocksmoved; + + if (kmem_alloc(kernel_map, (vm_offset_t *)&iterator, sizeof(*iterator))) { + return ENOMEM; + } + bzero(iterator, sizeof(*iterator)); + + btdata.bufferAddress = &filerec; + btdata.itemSize = sizeof(filerec); + btdata.itemCount = 1; + + btree_operation = kBTreeFirstRecord; + while (1) { + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + error = BTIterateRecord(fcb, btree_operation, iterator, &btdata, NULL); + hfs_systemfile_unlock(hfsmp, lockflags); + if (error) { + if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) { + error = 0; + } + break; + } + btree_operation = kBTreeNextRecord; + + if (filerec.recordType != kHFSPlusFileRecord) { + continue; + } + + /* Check if any of the extents require relocation */ + if (hfs_file_extent_overlaps(hfsmp, allocLimit, &filerec) == false) { + continue; + } + + /* We want to allow open-unlinked files to be moved, so allow_deleted == 1 */ + if (hfs_vget(hfsmp, filerec.fileID, &vp, 0, 1) != 0) { + continue; + } + + /* If data fork exists or item is a directory hard link, relocate blocks */ + datafork = VTOF(vp); + if ((datafork && datafork->ff_blocks > 0) || vnode_isdir(vp)) { + error = hfs_reclaim_file(hfsmp, vp, filerec.fileID, + kHFSDataForkType, allocLimit, context); + if (error) { + printf ("hfs_reclaimspace: Error reclaiming datafork blocks of fileid=%u (error=%d)\n", filerec.fileID, error); + hfs_unlock(VTOC(vp)); + vnode_put(vp); + break; + } + } + + /* If resource fork exists or item is a directory hard link, relocate blocks */ + if (((VTOC(vp)->c_blocks - (datafork ? datafork->ff_blocks : 0)) > 0) || vnode_isdir(vp)) { + if (vnode_isdir(vp)) { + /* Resource fork vnode lookup is invalid for directory hard link. + * So we fake data fork vnode as resource fork vnode. + */ + rvp = vp; + } else { + error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE, FALSE); + if (error) { + printf ("hfs_reclaimspace: Error looking up rvp for fileid=%u (error=%d)\n", filerec.fileID, error); + hfs_unlock(VTOC(vp)); + vnode_put(vp); + break; + } + VTOC(rvp)->c_flag |= C_NEED_RVNODE_PUT; + } + + error = hfs_reclaim_file(hfsmp, rvp, filerec.fileID, + kHFSResourceForkType, allocLimit, context); + if (error) { + printf ("hfs_reclaimspace: Error reclaiming rsrcfork blocks of fileid=%u (error=%d)\n", filerec.fileID, error); + hfs_unlock(VTOC(vp)); + vnode_put(vp); + break; + } + } + + /* The file forks were relocated successfully, now drop the + * cnode lock and vnode reference, and continue iterating to + * next catalog record. + */ + hfs_unlock(VTOC(vp)); + vnode_put(vp); + files_moved++; + } + + if (files_moved) { + printf("hfs_reclaim_filespace: Relocated %u blocks from %u files on \"%s\"\n", + (hfsmp->hfs_resize_blocksmoved - prev_blocksmoved), + files_moved, hfsmp->vcbVN); } - return error; -free_fail: - (void) BlockDeallocate(hfsmp, newBlock, blockCount); -fail: - hfs_systemfile_unlock(hfsmp, lockflags); - (void) hfs_end_transaction(hfsmp); + kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator)); return error; } - /* * Reclaim space at the end of a file system. + * + * Inputs - + * allocLimit - start block of the space being reclaimed + * reclaimblks - number of allocation blocks to reclaim */ static int -hfs_reclaimspace(struct hfsmount *hfsmp, u_long startblk, u_long reclaimblks, vfs_context_t context) +hfs_reclaimspace(struct hfsmount *hfsmp, u_int32_t allocLimit, u_int32_t reclaimblks, vfs_context_t context) { - struct vnode *vp = NULL; - FCB *fcb; - struct BTreeIterator * iterator = NULL; - struct FSBufferDescriptor btdata; - struct HFSPlusCatalogFile filerec; - u_int32_t saved_next_allocation; - cnid_t * cnidbufp; - size_t cnidbufsize; - int filecnt = 0; - int maxfilecnt; - u_long block; - u_long datablks; - u_long rsrcblks; - u_long blkstomove = 0; - int lockflags; - int i; - int error; - int lastprogress = 0; - Boolean system_file_moved = false; + int error = 0; - /* Relocate extents of the Allocation file if they're in the way. */ - error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_allocation_vp, startblk, SFL_BITMAP, &system_file_moved, context); + /* + * Preflight the bitmap to find out total number of blocks that need + * relocation. + * + * Note: Since allocLimit is set to the location of new alternate volume + * header, the check below does not account for blocks allocated for old + * alternate volume header. + */ + error = hfs_count_allocated(hfsmp, allocLimit, reclaimblks, &(hfsmp->hfs_resize_totalblocks)); if (error) { - printf("hfs_reclaimspace: reclaim allocation file returned %d\n", error); + printf ("hfs_reclaimspace: Unable to determine total blocks to reclaim error=%d\n", error); + return error; + } + if (hfs_resize_debug) { + printf ("hfs_reclaimspace: Total number of blocks to reclaim = %u\n", hfsmp->hfs_resize_totalblocks); + } + + /* Just to be safe, sync the content of the journal to the disk before we proceed */ + hfs_journal_flush(hfsmp, TRUE); + + /* First, relocate journal file blocks if they're in the way. + * Doing this first will make sure that journal relocate code + * gets access to contiguous blocks on disk first. The journal + * file has to be contiguous on the disk, otherwise resize will + * fail. + */ + error = hfs_reclaim_journal_file(hfsmp, allocLimit, context); + if (error) { + printf("hfs_reclaimspace: hfs_reclaim_journal_file failed (%d)\n", error); + return error; + } + + /* Relocate journal info block blocks if they're in the way. */ + error = hfs_reclaim_journal_info_block(hfsmp, allocLimit, context); + if (error) { + printf("hfs_reclaimspace: hfs_reclaim_journal_info_block failed (%d)\n", error); return error; } - /* Relocate extents of the Extents B-tree if they're in the way. */ - error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_extents_vp, startblk, SFL_EXTENTS, &system_file_moved, context); + + /* Relocate extents of the Extents B-tree if they're in the way. + * Relocating extents btree before other btrees is important as + * this will provide access to largest contiguous block range on + * the disk for relocating extents btree. Note that extents btree + * can only have maximum of 8 extents. + */ + error = hfs_reclaim_file(hfsmp, hfsmp->hfs_extents_vp, kHFSExtentsFileID, + kHFSDataForkType, allocLimit, context); if (error) { printf("hfs_reclaimspace: reclaim extents b-tree returned %d\n", error); return error; } + + /* Relocate extents of the Allocation file if they're in the way. */ + error = hfs_reclaim_file(hfsmp, hfsmp->hfs_allocation_vp, kHFSAllocationFileID, + kHFSDataForkType, allocLimit, context); + if (error) { + printf("hfs_reclaimspace: reclaim allocation file returned %d\n", error); + return error; + } + /* Relocate extents of the Catalog B-tree if they're in the way. */ - error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_catalog_vp, startblk, SFL_CATALOG, &system_file_moved, context); + error = hfs_reclaim_file(hfsmp, hfsmp->hfs_catalog_vp, kHFSCatalogFileID, + kHFSDataForkType, allocLimit, context); if (error) { printf("hfs_reclaimspace: reclaim catalog b-tree returned %d\n", error); return error; } + /* Relocate extents of the Attributes B-tree if they're in the way. */ - error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_attribute_vp, startblk, SFL_ATTRIBUTE, &system_file_moved, context); + error = hfs_reclaim_file(hfsmp, hfsmp->hfs_attribute_vp, kHFSAttributesFileID, + kHFSDataForkType, allocLimit, context); if (error) { printf("hfs_reclaimspace: reclaim attribute b-tree returned %d\n", error); return error; } + /* Relocate extents of the Startup File if there is one and they're in the way. */ - error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_startup_vp, startblk, SFL_STARTUP, &system_file_moved, context); + error = hfs_reclaim_file(hfsmp, hfsmp->hfs_startup_vp, kHFSStartupFileID, + kHFSDataForkType, allocLimit, context); if (error) { printf("hfs_reclaimspace: reclaim startup file returned %d\n", error); return error; @@ -4244,252 +6721,94 @@ hfs_reclaimspace(struct hfsmount *hfsmp, u_long startblk, u_long reclaimblks, vf * shrinking the size of the volume, or else the journal code will panic * with an invalid (too large) block number. * - * Note that system_file_moved will be set if ANY extent was moved, even + * Note that blks_moved will be set if ANY extent was moved, even * if it was just an overflow extent. In this case, the journal_flush isn't * strictly required, but shouldn't hurt. */ - if (system_file_moved) - journal_flush(hfsmp->jnl); - - if (hfsmp->jnl_start + (hfsmp->jnl_size / hfsmp->blockSize) > startblk) { - error = hfs_reclaim_journal_file(hfsmp, context); - if (error) { - printf("hfs_reclaimspace: hfs_reclaim_journal_file failed (%d)\n", error); - return error; - } - } - - if (hfsmp->vcbJinfoBlock >= startblk) { - error = hfs_reclaim_journal_info_block(hfsmp, context); - if (error) { - printf("hfs_reclaimspace: hfs_reclaim_journal_info_block failed (%d)\n", error); - return error; - } + if (hfsmp->hfs_resize_blocksmoved) { + hfs_journal_flush(hfsmp, TRUE); } - - /* For now move a maximum of 250,000 files. */ - maxfilecnt = MIN(hfsmp->hfs_filecount, 250000); - maxfilecnt = MIN((u_long)maxfilecnt, reclaimblks); - cnidbufsize = maxfilecnt * sizeof(cnid_t); - if (kmem_alloc(kernel_map, (vm_offset_t *)&cnidbufp, cnidbufsize)) { - return (ENOMEM); - } - if (kmem_alloc(kernel_map, (vm_offset_t *)&iterator, sizeof(*iterator))) { - kmem_free(kernel_map, (vm_offset_t)cnidbufp, cnidbufsize); - return (ENOMEM); - } - - saved_next_allocation = hfsmp->nextAllocation; - HFS_UPDATE_NEXT_ALLOCATION(hfsmp, hfsmp->hfs_metazone_start); - - fcb = VTOF(hfsmp->hfs_catalog_vp); - bzero(iterator, sizeof(*iterator)); - - btdata.bufferAddress = &filerec; - btdata.itemSize = sizeof(filerec); - btdata.itemCount = 1; - - /* Keep the Catalog and extents files locked during iteration. */ - lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_SHARED_LOCK); - error = BTIterateRecord(fcb, kBTreeFirstRecord, iterator, NULL, NULL); + /* Reclaim extents from catalog file records */ + error = hfs_reclaim_filespace(hfsmp, allocLimit, context); if (error) { - goto end_iteration; - } - /* - * Iterate over all the catalog records looking for files - * that overlap into the space we're trying to free up. - */ - for (filecnt = 0; filecnt < maxfilecnt; ) { - error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL); - if (error) { - if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) { - error = 0; - } - break; - } - if (filerec.recordType != kHFSPlusFileRecord) { - continue; - } - datablks = rsrcblks = 0; - /* - * Check if either fork overlaps target space. - */ - for (i = 0; i < kHFSPlusExtentDensity; ++i) { - if (filerec.dataFork.extents[i].blockCount != 0) { - datablks += filerec.dataFork.extents[i].blockCount; - block = filerec.dataFork.extents[i].startBlock + - filerec.dataFork.extents[i].blockCount; - if (block >= startblk) { - if ((filerec.fileID == hfsmp->hfs_jnlfileid) || - (filerec.fileID == hfsmp->hfs_jnlinfoblkid)) { - printf("hfs_reclaimspace: cannot move active journal\n"); - error = EPERM; - goto end_iteration; - } - cnidbufp[filecnt++] = filerec.fileID; - blkstomove += filerec.dataFork.totalBlocks; - break; - } - } - if (filerec.resourceFork.extents[i].blockCount != 0) { - rsrcblks += filerec.resourceFork.extents[i].blockCount; - block = filerec.resourceFork.extents[i].startBlock + - filerec.resourceFork.extents[i].blockCount; - if (block >= startblk) { - cnidbufp[filecnt++] = filerec.fileID; - blkstomove += filerec.resourceFork.totalBlocks; - break; - } - } - } - /* - * Check for any overflow extents that overlap. - */ - if (i == kHFSPlusExtentDensity) { - if (filerec.dataFork.totalBlocks > datablks) { - if (hfs_overlapped_overflow_extents(hfsmp, startblk, datablks, filerec.fileID, 0)) { - cnidbufp[filecnt++] = filerec.fileID; - blkstomove += filerec.dataFork.totalBlocks; - } - } else if (filerec.resourceFork.totalBlocks > rsrcblks) { - if (hfs_overlapped_overflow_extents(hfsmp, startblk, rsrcblks, filerec.fileID, 1)) { - cnidbufp[filecnt++] = filerec.fileID; - blkstomove += filerec.resourceFork.totalBlocks; - } - } - } - } - -end_iteration: - if (filecnt == 0 && !system_file_moved) { - printf("hfs_reclaimspace: no files moved\n"); - error = ENOSPC; - } - /* All done with catalog. */ - hfs_systemfile_unlock(hfsmp, lockflags); - if (error || filecnt == 0) - goto out; - - /* - * Double check space requirements to make sure - * there is enough space to relocate any files - * that reside in the reclaim area. - * - * Blocks To Move -------------- - * | | | - * V V V - * ------------------------------------------------------------------------ - * | | / /// // | - * | | / /// // | - * | | / /// // | - * ------------------------------------------------------------------------ - * - * <------------------- New Total Blocks ------------------><-- Reclaim --> - * - * <------------------------ Original Total Blocks -----------------------> - * - */ - if (blkstomove >= hfs_freeblks(hfsmp, 1)) { - printf("hfs_truncatefs: insufficient space (need %lu blocks; have %u blocks)\n", blkstomove, hfs_freeblks(hfsmp, 1)); - error = ENOSPC; - goto out; + printf ("hfs_reclaimspace: hfs_reclaim_filespace returned error=%d\n", error); + return error; } - hfsmp->hfs_resize_filesmoved = 0; - hfsmp->hfs_resize_totalfiles = filecnt; - - /* Now move any files that are in the way. */ - for (i = 0; i < filecnt; ++i) { - struct vnode * rvp; - - if (hfs_vget(hfsmp, cnidbufp[i], &vp, 0) != 0) - continue; - - /* Relocate any data fork blocks. */ - if (VTOF(vp)->ff_blocks > 0) { - error = hfs_relocate(vp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc()); - } - if (error) - break; - - /* Relocate any resource fork blocks. */ - if ((VTOC((vp))->c_blocks - VTOF((vp))->ff_blocks) > 0) { - error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE); - if (error) - break; - error = hfs_relocate(rvp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc()); - vnode_put(rvp); - if (error) - break; - } - hfs_unlock(VTOC(vp)); - vnode_put(vp); - vp = NULL; - ++hfsmp->hfs_resize_filesmoved; - - /* Report intermediate progress. */ - if (filecnt > 100) { - int progress; - - progress = (i * 100) / filecnt; - if (progress > (lastprogress + 9)) { - printf("hfs_reclaimspace: %d%% done...\n", progress); - lastprogress = progress; - } - } - } - if (vp) { - hfs_unlock(VTOC(vp)); - vnode_put(vp); - vp = NULL; - } - if (hfsmp->hfs_resize_filesmoved != 0) { - printf("hfs_reclaimspace: relocated %d files on \"%s\"\n", - (int)hfsmp->hfs_resize_filesmoved, hfsmp->vcbVN); + /* Reclaim extents from extent-based extended attributes, if any */ + error = hfs_reclaim_xattrspace(hfsmp, allocLimit, context); + if (error) { + printf ("hfs_reclaimspace: hfs_reclaim_xattrspace returned error=%d\n", error); + return error; } -out: - kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator)); - kmem_free(kernel_map, (vm_offset_t)cnidbufp, cnidbufsize); - /* - * Restore the roving allocation pointer on errors. - * (but only if we didn't move any files) - */ - if (error && hfsmp->hfs_resize_filesmoved == 0) { - HFS_UPDATE_NEXT_ALLOCATION(hfsmp, saved_next_allocation); - } - return (error); + return error; } /* - * Check if there are any overflow extents that overlap. + * Check if there are any extents (including overflow extents) that overlap + * into the disk space that is being reclaimed. + * + * Output - + * true - One of the extents need to be relocated + * false - No overflow extents need to be relocated, or there was an error */ static int -hfs_overlapped_overflow_extents(struct hfsmount *hfsmp, u_int32_t startblk, u_int32_t catblks, u_int32_t fileID, int rsrcfork) +hfs_file_extent_overlaps(struct hfsmount *hfsmp, u_int32_t allocLimit, struct HFSPlusCatalogFile *filerec) { struct BTreeIterator * iterator = NULL; struct FSBufferDescriptor btdata; HFSPlusExtentRecord extrec; HFSPlusExtentKey *extkeyptr; FCB *fcb; - u_int32_t block; - u_int8_t forktype; - int overlapped = 0; - int i; + int overlapped = false; + int i, j; int error; + int lockflags = 0; + u_int32_t endblock; + + /* Check if data fork overlaps the target space */ + for (i = 0; i < kHFSPlusExtentDensity; ++i) { + if (filerec->dataFork.extents[i].blockCount == 0) { + break; + } + endblock = filerec->dataFork.extents[i].startBlock + + filerec->dataFork.extents[i].blockCount; + if (endblock > allocLimit) { + overlapped = true; + goto out; + } + } + + /* Check if resource fork overlaps the target space */ + for (j = 0; j < kHFSPlusExtentDensity; ++j) { + if (filerec->resourceFork.extents[j].blockCount == 0) { + break; + } + endblock = filerec->resourceFork.extents[j].startBlock + + filerec->resourceFork.extents[j].blockCount; + if (endblock > allocLimit) { + overlapped = true; + goto out; + } + } + + /* Return back if there are no overflow extents for this file */ + if ((i < kHFSPlusExtentDensity) && (j < kHFSPlusExtentDensity)) { + goto out; + } - forktype = rsrcfork ? 0xFF : 0; if (kmem_alloc(kernel_map, (vm_offset_t *)&iterator, sizeof(*iterator))) { - return (0); + return 0; } bzero(iterator, sizeof(*iterator)); extkeyptr = (HFSPlusExtentKey *)&iterator->key; extkeyptr->keyLength = kHFSPlusExtentKeyMaximumLength; - extkeyptr->forkType = forktype; - extkeyptr->fileID = fileID; - extkeyptr->startBlock = catblks; + extkeyptr->forkType = 0; + extkeyptr->fileID = filerec->fileID; + extkeyptr->startBlock = 0; btdata.bufferAddress = &extrec; btdata.itemSize = sizeof(extrec); @@ -4497,32 +6816,49 @@ hfs_overlapped_overflow_extents(struct hfsmount *hfsmp, u_int32_t startblk, u_in fcb = VTOF(hfsmp->hfs_extents_vp); + lockflags = hfs_systemfile_lock(hfsmp, SFL_EXTENTS, HFS_SHARED_LOCK); + + /* This will position the iterator just before the first overflow + * extent record for given fileID. It will always return btNotFound, + * so we special case the error code. + */ error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator); + if (error && (error != btNotFound)) { + goto out; + } + + /* BTIterateRecord() might return error if the btree is empty, and + * therefore we return that the extent does not overflow to the caller + */ + error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL); while (error == 0) { /* Stop when we encounter a different file. */ - if ((extkeyptr->fileID != fileID) || - (extkeyptr->forkType != forktype)) { + if (extkeyptr->fileID != filerec->fileID) { break; } - /* - * Check if the file overlaps target space. - */ + /* Check if any of the forks exist in the target space. */ for (i = 0; i < kHFSPlusExtentDensity; ++i) { if (extrec[i].blockCount == 0) { break; } - block = extrec[i].startBlock + extrec[i].blockCount; - if (block >= startblk) { - overlapped = 1; - break; + endblock = extrec[i].startBlock + extrec[i].blockCount; + if (endblock > allocLimit) { + overlapped = true; + goto out; } } /* Look for more records. */ error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL); } - kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator)); - return (overlapped); +out: + if (lockflags) { + hfs_systemfile_unlock(hfsmp, lockflags); + } + if (iterator) { + kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator)); + } + return overlapped; } @@ -4537,15 +6873,38 @@ hfs_resize_progress(struct hfsmount *hfsmp, u_int32_t *progress) return (ENXIO); } - if (hfsmp->hfs_resize_totalfiles > 0) - *progress = (hfsmp->hfs_resize_filesmoved * 100) / hfsmp->hfs_resize_totalfiles; - else + if (hfsmp->hfs_resize_totalblocks > 0) { + *progress = (u_int32_t)((hfsmp->hfs_resize_blocksmoved * 100ULL) / hfsmp->hfs_resize_totalblocks); + } else { *progress = 0; + } return (0); } +/* + * Creates a UUID from a unique "name" in the HFS UUID Name space. + * See version 3 UUID. + */ +static void +hfs_getvoluuid(struct hfsmount *hfsmp, uuid_t result) +{ + MD5_CTX md5c; + uint8_t rawUUID[8]; + + ((uint32_t *)rawUUID)[0] = hfsmp->vcbFndrInfo[6]; + ((uint32_t *)rawUUID)[1] = hfsmp->vcbFndrInfo[7]; + + MD5Init( &md5c ); + MD5Update( &md5c, HFS_UUID_NAMESPACE_ID, sizeof( uuid_t ) ); + MD5Update( &md5c, rawUUID, sizeof (rawUUID) ); + MD5Final( result, &md5c ); + + result[6] = 0x30 | ( result[6] & 0x0F ); + result[8] = 0x80 | ( result[8] & 0x3F ); +} + /* * Get file system attributes. */ @@ -4554,18 +6913,19 @@ hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t { #define HFS_ATTR_CMN_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST)) #define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST)) +#define HFS_ATTR_CMN_VOL_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST | ATTR_CMN_ACCTIME)) ExtendedVCB *vcb = VFSTOVCB(mp); struct hfsmount *hfsmp = VFSTOHFS(mp); - u_long freeCNIDs; + u_int32_t freeCNIDs; - freeCNIDs = (u_long)0xFFFFFFFF - (u_long)hfsmp->vcbNxtCNID; + freeCNIDs = (u_int32_t)0xFFFFFFFF - (u_int32_t)hfsmp->vcbNxtCNID; VFSATTR_RETURN(fsap, f_objcount, (u_int64_t)hfsmp->vcbFilCnt + (u_int64_t)hfsmp->vcbDirCnt); VFSATTR_RETURN(fsap, f_filecount, (u_int64_t)hfsmp->vcbFilCnt); VFSATTR_RETURN(fsap, f_dircount, (u_int64_t)hfsmp->vcbDirCnt); VFSATTR_RETURN(fsap, f_maxobjcount, (u_int64_t)0xFFFFFFFF); - VFSATTR_RETURN(fsap, f_iosize, (size_t)(MAX_UPL_TRANSFER * PAGE_SIZE)); + VFSATTR_RETURN(fsap, f_iosize, (size_t)cluster_max_io_size(mp, 0)); VFSATTR_RETURN(fsap, f_blocks, (u_int64_t)hfsmp->totalBlocks); VFSATTR_RETURN(fsap, f_bfree, (u_int64_t)hfs_freeblks(hfsmp, 0)); VFSATTR_RETURN(fsap, f_bavail, (u_int64_t)hfs_freeblks(hfsmp, 1)); @@ -4608,7 +6968,12 @@ hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t VOL_CAP_FMT_FAST_STATFS | VOL_CAP_FMT_2TB_FILESIZE | VOL_CAP_FMT_HIDDEN_FILES | +#if HFS_COMPRESSION + VOL_CAP_FMT_PATH_FROM_ID | + VOL_CAP_FMT_DECMPFS_COMPRESSION; +#else VOL_CAP_FMT_PATH_FROM_ID; +#endif } cap->capabilities[VOL_CAPABILITIES_INTERFACES] = VOL_CAP_INT_SEARCHFS | @@ -4644,7 +7009,12 @@ hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t VOL_CAP_FMT_2TB_FILESIZE | VOL_CAP_FMT_OPENDENYMODES | VOL_CAP_FMT_HIDDEN_FILES | +#if HFS_COMPRESSION + VOL_CAP_FMT_PATH_FROM_ID | + VOL_CAP_FMT_DECMPFS_COMPRESSION; +#else VOL_CAP_FMT_PATH_FROM_ID; +#endif cap->valid[VOL_CAPABILITIES_INTERFACES] = VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | @@ -4670,20 +7040,20 @@ hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) { vol_attributes_attr_t *attrp = &fsap->f_attributes; - attrp->validattr.commonattr = HFS_ATTR_CMN_VALIDMASK; + attrp->validattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK; attrp->validattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO; attrp->validattr.dirattr = ATTR_DIR_VALIDMASK; attrp->validattr.fileattr = HFS_ATTR_FILE_VALIDMASK; attrp->validattr.forkattr = 0; - attrp->nativeattr.commonattr = HFS_ATTR_CMN_VALIDMASK; + attrp->nativeattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK; attrp->nativeattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO; attrp->nativeattr.dirattr = ATTR_DIR_VALIDMASK; attrp->nativeattr.fileattr = HFS_ATTR_FILE_VALIDMASK; attrp->nativeattr.forkattr = 0; VFSATTR_SET_SUPPORTED(fsap, f_attributes); } - fsap->f_create_time.tv_sec = hfsmp->vcbCrDate; + fsap->f_create_time.tv_sec = hfsmp->hfs_itime; fsap->f_create_time.tv_nsec = 0; VFSATTR_SET_SUPPORTED(fsap, f_create_time); fsap->f_modify_time.tv_sec = hfsmp->vcbLsMod; @@ -4722,6 +7092,10 @@ hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t strlcpy(fsap->f_vol_name, (char *) hfsmp->vcbVN, MAXPATHLEN); VFSATTR_SET_SUPPORTED(fsap, f_vol_name); } + if (VFSATTR_IS_ACTIVE(fsap, f_uuid)) { + hfs_getvoluuid(hfsmp, fsap->f_uuid); + VFSATTR_SET_SUPPORTED(fsap, f_uuid); + } return (0); } @@ -4740,6 +7114,10 @@ hfs_rename_volume(struct vnode *vp, const char *name, proc_t p) cat_cookie_t cookie; int lockflags; int error = 0; + char converted_volname[256]; + size_t volname_length = 0; + size_t conv_volname_length = 0; + /* * Ignore attempts to rename a volume to a zero-length name. @@ -4774,8 +7152,16 @@ hfs_rename_volume(struct vnode *vp, const char *name, proc_t p) */ if (!error) { strlcpy((char *)vcb->vcbVN, name, sizeof(vcb->vcbVN)); + volname_length = strlen ((const char*)vcb->vcbVN); +#define DKIOCCSSETLVNAME _IOW('d', 198, char[1024]) + /* Send the volume name down to CoreStorage if necessary */ + error = utf8_normalizestr(vcb->vcbVN, volname_length, (u_int8_t*)converted_volname, &conv_volname_length, 256, UTF_PRECOMPOSED); + if (error == 0) { + (void) VNOP_IOCTL (hfsmp->hfs_devvp, DKIOCCSSETLVNAME, converted_volname, 0, vfs_context_current()); + } + error = 0; } - + hfs_systemfile_unlock(hfsmp, lockflags); cat_postflight(hfsmp, &cookie, p); @@ -4856,44 +7242,50 @@ void hfs_mark_volume_inconsistent(struct hfsmount *hfsmp) hfsmp->vcbAtrb |= kHFSVolumeInconsistentMask; MarkVCBDirty(hfsmp); } - /* Log information to ASL log */ - fslog_fs_corrupt(hfsmp->hfs_mp); - printf("HFS: Runtime corruption detected on %s, fsck will be forced on next mount.\n", hfsmp->vcbVN); + if ((hfsmp->hfs_flags & HFS_READ_ONLY)==0) { + /* Log information to ASL log */ + fslog_fs_corrupt(hfsmp->hfs_mp); + printf("hfs: Runtime corruption detected on %s, fsck will be forced on next mount.\n", hfsmp->vcbVN); + } HFS_MOUNT_UNLOCK(hfsmp, TRUE); } /* Replay the journal on the device node provided. Returns zero if * journal replay succeeded or no journal was supposed to be replayed. */ -static int hfs_journal_replay(const char *devnode, vfs_context_t context) +static int hfs_journal_replay(vnode_t devvp, vfs_context_t context) { int retval = 0; - struct vnode *devvp = NULL; struct mount *mp = NULL; struct hfs_mount_args *args = NULL; - /* Lookup vnode for given raw device path */ - retval = vnode_open(devnode, FREAD|FWRITE, 0, 0, &devvp, NULL); - if (retval) { - goto out; - } - /* Replay allowed only on raw devices */ - if (!vnode_ischr(devvp)) { + if (!vnode_ischr(devvp) && !vnode_isblk(devvp)) { retval = EINVAL; goto out; } /* Create dummy mount structures */ MALLOC(mp, struct mount *, sizeof(struct mount), M_TEMP, M_WAITOK); + if (mp == NULL) { + retval = ENOMEM; + goto out; + } bzero(mp, sizeof(struct mount)); mount_lock_init(mp); MALLOC(args, struct hfs_mount_args *, sizeof(struct hfs_mount_args), M_TEMP, M_WAITOK); + if (args == NULL) { + retval = ENOMEM; + goto out; + } bzero(args, sizeof(struct hfs_mount_args)); retval = hfs_mountfs(devvp, mp, args, 1, context); - buf_flushdirtyblks(devvp, MNT_WAIT, 0, "hfs_journal_replay"); + buf_flushdirtyblks(devvp, TRUE, 0, "hfs_journal_replay"); + + /* FSYNC the devnode to be sure all data has been flushed */ + retval = VNOP_FSYNC(devvp, MNT_WAIT, context); out: if (mp) { @@ -4903,9 +7295,6 @@ out: if (args) { FREE(args, M_TEMP); } - if (devvp) { - vnode_close(devvp, FREAD|FWRITE, NULL); - } return retval; }