/*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/utfconv.h>
#include <sys/kauth.h>
#include <sys/fcntl.h>
+#include <sys/fsctl.h>
#include <sys/vnode_internal.h>
#include <kern/clock.h>
#include <libkern/OSAtomic.h>
+/* for parsing boot-args */
+#include <pexpert/pexpert.h>
+
+#if CONFIG_PROTECT
+#include <sys/cprotect.h>
+#endif
+
#include "hfs.h"
#include "hfs_catalog.h"
#include "hfs_dbg.h"
static void ReleaseMetaFileVNode(struct vnode *vp);
static int hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_args);
-static void hfs_metadatazone_init(struct hfsmount *);
static u_int32_t hfs_hotfile_freeblocks(struct hfsmount *);
+#define HFS_MOUNT_DEBUG 1
+
//*******************************************************************************
// Note: Finder information in the HFS/HFS+ metadata are considered opaque and
unsigned char hfs_attrname[] = "Attribute B-tree";
unsigned char hfs_startupname[] = "Startup File";
-
-__private_extern__
+#if CONFIG_HFS_STD
OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb,
__unused struct proc *p)
{
struct cat_desc cndesc;
struct cat_attr cnattr;
struct cat_fork fork;
+ int newvnode_flags = 0;
/* Block size must be a multiple of 512 */
if (SWAP_BE32(mdb->drAlBlkSiz) == 0 ||
*
*/
vcb->vcbSigWord = SWAP_BE16 (mdb->drSigWord);
- vcb->vcbCrDate = to_bsd_time(LocalToUTC(SWAP_BE32(mdb->drCrDate)));
+ vcb->hfs_itime = to_bsd_time(LocalToUTC(SWAP_BE32(mdb->drCrDate)));
vcb->localCreateDate = SWAP_BE32 (mdb->drCrDate);
vcb->vcbLsMod = to_bsd_time(LocalToUTC(SWAP_BE32(mdb->drLsMod)));
vcb->vcbAtrb = SWAP_BE16 (mdb->drAtrb);
* When an HFS name cannot be encoded with the current
* volume encoding we use MacRoman as a fallback.
*/
- if (error || (utf8chars == 0))
- (void) mac_roman_to_utf8(mdb->drVN, NAME_MAX, &utf8chars, vcb->vcbVN);
+ if (error || (utf8chars == 0)) {
+ error = mac_roman_to_utf8(mdb->drVN, NAME_MAX, &utf8chars, vcb->vcbVN);
+ /* If we fail to encode to UTF8 from Mac Roman, the name is bad. Deny the mount */
+ if (error) {
+ goto MtVolErr;
+ }
+ }
hfsmp->hfs_logBlockSize = BestBlockSizeFit(vcb->blockSize, MAXBSIZE, hfsmp->hfs_logical_block_size);
vcb->vcbVBMIOSize = kHFSBlockSize;
cnattr.ca_blocks = fork.cf_blocks;
error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &fork,
- &hfsmp->hfs_extents_vp);
- if (error) goto MtVolErr;
+ &hfsmp->hfs_extents_vp, &newvnode_flags);
+ if (error) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfs (std): error creating Ext Vnode (%d) \n", error);
+ }
+ goto MtVolErr;
+ }
error = MacToVFSError(BTOpenPath(VTOF(hfsmp->hfs_extents_vp),
(KeyCompareProcPtr)CompareExtentKeys));
if (error) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfs (std): error opening Ext Vnode (%d) \n", error);
+ }
hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
goto MtVolErr;
}
cnattr.ca_blocks = fork.cf_blocks;
error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &fork,
- &hfsmp->hfs_catalog_vp);
+ &hfsmp->hfs_catalog_vp, &newvnode_flags);
if (error) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfs (std): error creating catalog Vnode (%d) \n", error);
+ }
hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
goto MtVolErr;
}
error = MacToVFSError(BTOpenPath(VTOF(hfsmp->hfs_catalog_vp),
(KeyCompareProcPtr)CompareCatalogKeys));
if (error) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfs (std): error opening catalog Vnode (%d) \n", error);
+ }
hfs_unlock(VTOC(hfsmp->hfs_catalog_vp));
hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
goto MtVolErr;
cnattr.ca_blocks = 0;
error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &fork,
- &hfsmp->hfs_allocation_vp);
+ &hfsmp->hfs_allocation_vp, &newvnode_flags);
if (error) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfs (std): error creating bitmap Vnode (%d) \n", error);
+ }
hfs_unlock(VTOC(hfsmp->hfs_catalog_vp));
hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
goto MtVolErr;
/* mark the volume dirty (clear clean unmount bit) */
vcb->vcbAtrb &= ~kHFSVolumeUnmountedMask;
- if (error == noErr)
- {
- error = cat_idlookup(hfsmp, kHFSRootFolderID, 0, NULL, NULL, NULL);
- }
-
- if ( error == noErr )
- {
- if ( !(vcb->vcbAtrb & kHFSVolumeHardwareLockMask) ) // if the disk is not write protected
- {
- MarkVCBDirty( vcb ); // mark VCB dirty so it will be written
- }
- }
-
+ if (error == noErr) {
+ error = cat_idlookup(hfsmp, kHFSRootFolderID, 0, 0, NULL, NULL, NULL);
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfs (std): error looking up root folder (%d) \n", error);
+ }
+ }
+
+ if (error == noErr) {
+ /* If the disk isn't write protected.. */
+ if ( !(vcb->vcbAtrb & kHFSVolumeHardwareLockMask)) {
+ MarkVCBDirty (vcb); // mark VCB dirty so it will be written
+ }
+ }
+
/*
* all done with system files so we can unlock now...
*/
hfs_unlock(VTOC(hfsmp->hfs_allocation_vp));
hfs_unlock(VTOC(hfsmp->hfs_catalog_vp));
hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
-
- goto CmdDone;
+
+ if (error == noErr) {
+ /* If successful, then we can just return once we've unlocked the cnodes */
+ return error;
+ }
//-- Release any resources allocated so far before exiting with an error:
MtVolErr:
- ReleaseMetaFileVNode(hfsmp->hfs_catalog_vp);
- ReleaseMetaFileVNode(hfsmp->hfs_extents_vp);
+ hfsUnmount(hfsmp, NULL);
-CmdDone:
return (error);
}
+#endif
+
//*******************************************************************************
// Routine: hfs_MountHFSPlusVolume
//
//
//*******************************************************************************
-__private_extern__
OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp,
off_t embeddedOffset, u_int64_t disksize, __unused struct proc *p, void *args, kauth_cred_t cred)
{
struct BTreeInfoRec btinfo;
u_int16_t signature;
u_int16_t hfs_version;
+ int newvnode_flags = 0;
int i;
OSErr retval;
+ char converted_volname[256];
+ size_t volname_length = 0;
+ size_t conv_volname_length = 0;
signature = SWAP_BE16(vhp->signature);
hfs_version = SWAP_BE16(vhp->version);
if (signature == kHFSPlusSigWord) {
if (hfs_version != kHFSPlusVersion) {
- printf("hfs_mount: invalid HFS+ version: %d\n", hfs_version);
+ printf("hfs_mount: invalid HFS+ version: %x\n", hfs_version);
return (EINVAL);
}
} else if (signature == kHFSXSigWord) {
if (hfs_version != kHFSXVersion) {
- printf("hfs_mount: invalid HFSX version: %d\n", hfs_version);
+ printf("hfs_mount: invalid HFSX version: %x\n", hfs_version);
return (EINVAL);
}
/* The in-memory signature is always 'H+'. */
/* Removed printf for invalid HFS+ signature because it gives
* false error for UFS root volume
*/
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: unknown Volume Signature : %x\n", signature);
+ }
return (EINVAL);
}
/* Block size must be at least 512 and a power of 2 */
blockSize = SWAP_BE32(vhp->blockSize);
- if (blockSize < 512 || !powerof2(blockSize))
+ if (blockSize < 512 || !powerof2(blockSize)) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: invalid blocksize (%d) \n", blockSize);
+ }
return (EINVAL);
+ }
/* don't mount a writable volume if its dirty, it must be cleaned by fsck_hfs */
if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0 && hfsmp->jnl == NULL &&
- (SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) == 0)
+ (SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) == 0) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: cannot mount dirty non-journaled volumes\n");
+ }
return (EINVAL);
+ }
/* Make sure we can live with the physical block size. */
if ((disksize & (hfsmp->hfs_logical_block_size - 1)) ||
(embeddedOffset & (hfsmp->hfs_logical_block_size - 1)) ||
(blockSize < hfsmp->hfs_logical_block_size)) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: invalid physical blocksize (%d), hfs_logical_blocksize (%d) \n",
+ blockSize, hfsmp->hfs_logical_block_size);
+ }
return (ENXIO);
}
SWAP_BE32 (vhp->extentsFile.extents[i].blockCount);
}
retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork,
- &hfsmp->hfs_extents_vp);
+ &hfsmp->hfs_extents_vp, &newvnode_flags);
if (retval)
{
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: hfs_getnewvnode returned (%d) getting extentoverflow BT\n", retval);
+ }
goto ErrorExit;
}
hfsmp->hfs_extents_cp = VTOC(hfsmp->hfs_extents_vp);
(KeyCompareProcPtr) CompareExtentKeysPlus));
if (retval)
{
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: BTOpenPath returned (%d) getting extentoverflow BT\n", retval);
+ }
goto ErrorExit;
}
/*
SWAP_BE32 (vhp->catalogFile.extents[i].blockCount);
}
retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork,
- &hfsmp->hfs_catalog_vp);
+ &hfsmp->hfs_catalog_vp, &newvnode_flags);
if (retval) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: hfs_getnewvnode returned (%d) getting catalog BT\n", retval);
+ }
goto ErrorExit;
}
hfsmp->hfs_catalog_cp = VTOC(hfsmp->hfs_catalog_vp);
retval = MacToVFSError(BTOpenPath(VTOF(hfsmp->hfs_catalog_vp),
(KeyCompareProcPtr) CompareExtendedCatalogKeys));
if (retval) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: BTOpenPath returned (%d) getting catalog BT\n", retval);
+ }
goto ErrorExit;
}
if ((hfsmp->hfs_flags & HFS_X) &&
SWAP_BE32 (vhp->allocationFile.extents[i].blockCount);
}
retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork,
- &hfsmp->hfs_allocation_vp);
+ &hfsmp->hfs_allocation_vp, &newvnode_flags);
if (retval) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: hfs_getnewvnode returned (%d) getting bitmap\n", retval);
+ }
goto ErrorExit;
}
hfsmp->hfs_allocation_cp = VTOC(hfsmp->hfs_allocation_vp);
SWAP_BE32 (vhp->attributesFile.extents[i].blockCount);
}
retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork,
- &hfsmp->hfs_attribute_vp);
+ &hfsmp->hfs_attribute_vp, &newvnode_flags);
if (retval) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: hfs_getnewvnode returned (%d) getting EA BT\n", retval);
+ }
goto ErrorExit;
}
hfsmp->hfs_attribute_cp = VTOC(hfsmp->hfs_attribute_vp);
retval = MacToVFSError(BTOpenPath(VTOF(hfsmp->hfs_attribute_vp),
(KeyCompareProcPtr) hfs_attrkeycompare));
if (retval) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: BTOpenPath returned (%d) getting EA BT\n", retval);
+ }
+ goto ErrorExit;
+ }
+
+ /* Initialize vnode for virtual attribute data file that spans the
+ * entire file system space for performing I/O to attribute btree
+ * We hold iocount on the attrdata vnode for the entire duration
+ * of mount (similar to btree vnodes)
+ */
+ retval = init_attrdata_vnode(hfsmp);
+ if (retval) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: init_attrdata_vnode returned (%d) for virtual EA file\n", retval);
+ }
goto ErrorExit;
}
}
SWAP_BE32 (vhp->startupFile.extents[i].blockCount);
}
retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork,
- &hfsmp->hfs_startup_vp);
+ &hfsmp->hfs_startup_vp, &newvnode_flags);
if (retval) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: hfs_getnewvnode returned (%d) getting startup file\n", retval);
+ }
goto ErrorExit;
}
hfsmp->hfs_startup_cp = VTOC(hfsmp->hfs_startup_vp);
hfs_unlock(hfsmp->hfs_startup_cp);
}
- /* Pick up volume name and create date */
- retval = cat_idlookup(hfsmp, kHFSRootFolderID, 0, &cndesc, &cnattr, NULL);
+ /*
+ * Pick up volume name and create date
+ *
+ * Acquiring the volume name should not manipulate the bitmap, only the catalog
+ * btree and possibly the extents overflow b-tree.
+ */
+ retval = cat_idlookup(hfsmp, kHFSRootFolderID, 0, 0, &cndesc, &cnattr, NULL);
if (retval) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: cat_idlookup returned (%d) getting rootfolder \n", retval);
+ }
goto ErrorExit;
}
- vcb->vcbCrDate = cnattr.ca_itime;
+ vcb->hfs_itime = cnattr.ca_itime;
vcb->volumeNameEncodingHint = cndesc.cd_encoding;
bcopy(cndesc.cd_nameptr, vcb->vcbVN, min(255, cndesc.cd_namelen));
+ volname_length = strlen ((const char*)vcb->vcbVN);
cat_releasedesc(&cndesc);
+
+#define DKIOCCSSETLVNAME _IOW('d', 198, char[256])
+
+
+ /* Send the volume name down to CoreStorage if necessary */
+ retval = utf8_normalizestr(vcb->vcbVN, volname_length, (u_int8_t*)converted_volname, &conv_volname_length, 256, UTF_PRECOMPOSED);
+ if (retval == 0) {
+ (void) VNOP_IOCTL (hfsmp->hfs_devvp, DKIOCCSSETLVNAME, converted_volname, 0, vfs_context_current());
+ }
+
+ /* reset retval == 0. we don't care about errors in volname conversion */
+ retval = 0;
+
+
+ /*
+ * We now always initiate a full bitmap scan even if the volume is read-only because this is
+ * our only shot to do I/Os of dramaticallly different sizes than what the buffer cache ordinarily
+ * expects. TRIMs will not be delivered to the underlying media if the volume is not
+ * read-write though.
+ */
+ thread_t allocator_scanner;
+ hfsmp->scan_var = 0;
+
+ /* Take the HFS mount mutex and wait on scan_var */
+ hfs_lock_mount (hfsmp);
+
+ kernel_thread_start ((thread_continue_t) hfs_scan_blocks, hfsmp, &allocator_scanner);
+ /* Wait until it registers that it's got the appropriate locks */
+ while ((hfsmp->scan_var & HFS_ALLOCATOR_SCAN_INFLIGHT) == 0) {
+ (void) msleep (&hfsmp->scan_var, &hfsmp->hfs_mutex, (PDROP | PINOD), "hfs_scan_blocks", 0);
+ if (hfsmp->scan_var & HFS_ALLOCATOR_SCAN_INFLIGHT) {
+ break;
+ }
+ else {
+ hfs_lock_mount (hfsmp);
+ }
+ }
+
+ thread_deallocate (allocator_scanner);
/* mark the volume dirty (clear clean unmount bit) */
vcb->vcbAtrb &= ~kHFSVolumeUnmountedMask;
// EROFS is a special error code that means the volume has an external
// journal which we couldn't find. in that case we do not want to
// rewrite the volume header - we'll just refuse to mount the volume.
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: hfs_late_journal_init returned (%d), maybe an external jnl?\n", retval);
+ }
retval = EINVAL;
goto ErrorExit;
}
bp = NULL;
}
}
-
+
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: hfs_late_journal_init returned (%d)\n", retval);
+ }
retval = EINVAL;
goto ErrorExit;
} else if (hfsmp->jnl) {
}
}
+ if ( !(vcb->vcbAtrb & kHFSVolumeHardwareLockMask) ) // if the disk is not write protected
+ {
+ MarkVCBDirty( vcb ); // mark VCB dirty so it will be written
+ }
+
+ /*
+ * Distinguish 3 potential cases involving content protection:
+ * 1. mount point bit set; vcbAtrb does not support it. Fail.
+ * 2. mount point bit set; vcbattrb supports it. we're good.
+ * 3. mount point bit not set; vcbatrb supports it, turn bit on, then good.
+ */
+ if (vfs_flags(hfsmp->hfs_mp) & MNT_CPROTECT) {
+ /* Does the mount point support it ? */
+ if ((vcb->vcbAtrb & kHFSContentProtectionMask) == 0) {
+ /* Case 1 above */
+ retval = EINVAL;
+ goto ErrorExit;
+ }
+ }
+ else {
+ /* not requested in the mount point. Is it in FS? */
+ if (vcb->vcbAtrb & kHFSContentProtectionMask) {
+ /* Case 3 above */
+ vfs_setflags (hfsmp->hfs_mp, MNT_CPROTECT);
+ }
+ }
+
+ /* At this point, if the mount point flag is set, we can enable it. */
+ if (vfs_flags(hfsmp->hfs_mp) & MNT_CPROTECT) {
+ /* Cases 2+3 above */
+#if CONFIG_PROTECT
+ /* Get the EAs as needed. */
+ int cperr = 0;
+ uint16_t majorversion;
+ uint16_t minorversion;
+
+ struct cp_root_xattr *xattr = NULL;
+ MALLOC (xattr, struct cp_root_xattr*, sizeof(struct cp_root_xattr), M_TEMP, M_WAITOK);
+ if (xattr == NULL) {
+ retval = ENOMEM;
+ goto ErrorExit;
+ }
+ bzero (xattr, sizeof(struct cp_root_xattr));
+
+ /* go get the EA to get the version information */
+ cperr = cp_getrootxattr (hfsmp, xattr);
+ /*
+ * If there was no EA there, then write one out.
+ * Assuming EA is not present on the root means
+ * this is an erase install or a very old FS
+ */
+
+ if (cperr == 0) {
+ /* Have to run a valid CP version. */
+ if ((xattr->major_version < CP_PREV_MAJOR_VERS) || (xattr->major_version > CP_NEW_MAJOR_VERS)) {
+ cperr = EINVAL;
+ }
+ }
+ else if (cperr == ENOATTR) {
+ printf("No root EA set, creating new EA with new version: %d\n", CP_NEW_MAJOR_VERS);
+ bzero(xattr, sizeof(struct cp_root_xattr));
+ xattr->major_version = CP_NEW_MAJOR_VERS;
+ xattr->minor_version = CP_MINOR_VERS;
+ xattr->flags = 0;
+ cperr = cp_setrootxattr (hfsmp, xattr);
+ }
+ majorversion = xattr->major_version;
+ minorversion = xattr->minor_version;
+ if (xattr) {
+ FREE(xattr, M_TEMP);
+ }
+
+ /* Recheck for good status */
+ if (cperr == 0) {
+ /* If we got here, then the CP version is valid. Set it in the mount point */
+ hfsmp->hfs_running_cp_major_vers = majorversion;
+ printf("Running with CP root xattr: %d.%d\n", majorversion, minorversion);
+
+ /*
+ * Acquire the boot-arg for the AKS default key.
+ * Ensure that the boot-arg's value is valid for FILES (not directories),
+ * since only files are actually protected for now.
+ */
+ PE_parse_boot_argn("aks_default_class", &hfsmp->default_cp_class, sizeof(hfsmp->default_cp_class));
+ if (cp_is_valid_class(0, hfsmp->default_cp_class) == 0) {
+ hfsmp->default_cp_class = PROTECTION_CLASS_D;
+ }
+ }
+ else {
+ retval = EPERM;
+ goto ErrorExit;
+ }
+#else
+ /* If CONFIG_PROTECT not built, ignore CP */
+ vfs_clearflags(hfsmp->hfs_mp, MNT_CPROTECT);
+#endif
+ }
+
/*
* Establish a metadata allocation zone.
*/
- hfs_metadatazone_init(hfsmp);
+ hfs_metadatazone_init(hfsmp, false);
/*
* Make any metadata zone adjustments.
if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0)
{
retval = hfs_erase_unused_nodes(hfsmp);
- if (retval)
+ if (retval) {
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: hfs_erase_unused_nodes returned (%d) for %s \n", retval, hfsmp->vcbVN);
+ }
+
goto ErrorExit;
+ }
}
-
- if ( !(vcb->vcbAtrb & kHFSVolumeHardwareLockMask) ) // if the disk is not write protected
- {
- MarkVCBDirty( vcb ); // mark VCB dirty so it will be written
- }
-
+
/*
* Allow hot file clustering if conditions allow.
*/
if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) &&
- ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0)) {
+ ((hfsmp->hfs_flags & (HFS_READ_ONLY | HFS_SSD)) == 0)) {
(void) hfs_recording_init(hfsmp);
}
/* Force ACLs on HFS+ file systems. */
vfs_setextendedsecurity(HFSTOVFS(hfsmp));
- /* Check if volume supports writing of extent-based extended attributes */
- hfs_check_volxattr(hfsmp, HFS_SET_XATTREXTENTS_STATE);
+ /* Enable extent-based extended attributes by default */
+ hfsmp->hfs_flags |= HFS_XATTR_EXTENTS;
return (0);
ErrorExit:
/*
- * A fatal error occurred and the volume cannot be mounted
- * release any resources that we aquired...
+ * A fatal error occurred and the volume cannot be mounted, so
+ * release any resources that we acquired...
*/
- if (hfsmp->hfs_attribute_vp)
- ReleaseMetaFileVNode(hfsmp->hfs_attribute_vp);
- ReleaseMetaFileVNode(hfsmp->hfs_allocation_vp);
- ReleaseMetaFileVNode(hfsmp->hfs_catalog_vp);
- ReleaseMetaFileVNode(hfsmp->hfs_extents_vp);
-
+ hfsUnmount(hfsmp, NULL);
+
+ if (HFS_MOUNT_DEBUG) {
+ printf("hfs_mounthfsplus: encountered error (%d)\n", retval);
+ }
return (retval);
}
if (vp && (fp = VTOF(vp))) {
if (fp->fcbBTCBPtr != NULL) {
- (void)hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
+ (void)hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
(void) BTClosePath(fp);
hfs_unlock(VTOC(vp));
}
*
*************************************************************/
-__private_extern__
int
hfsUnmount( register struct hfsmount *hfsmp, __unused struct proc *p)
{
- /* Get rid of our attribute data vnode (if any). */
+ /* Get rid of our attribute data vnode (if any). This is done
+ * after the vflush() during mount, so we don't need to worry
+ * about any locks.
+ */
if (hfsmp->hfs_attrdata_vp) {
- vnode_t advp = hfsmp->hfs_attrdata_vp;
-
- if (vnode_get(advp) == 0) {
- vnode_rele_ext(advp, O_EVTONLY, 0);
- vnode_put(advp);
- }
+ ReleaseMetaFileVNode(hfsmp->hfs_attrdata_vp);
hfsmp->hfs_attrdata_vp = NULLVP;
}
- if (hfsmp->hfs_startup_vp)
+ if (hfsmp->hfs_startup_vp) {
ReleaseMetaFileVNode(hfsmp->hfs_startup_vp);
-
- if (hfsmp->hfs_allocation_vp)
- ReleaseMetaFileVNode(hfsmp->hfs_allocation_vp);
-
- if (hfsmp->hfs_attribute_vp)
+ hfsmp->hfs_startup_cp = NULL;
+ hfsmp->hfs_startup_vp = NULL;
+ }
+
+ if (hfsmp->hfs_attribute_vp) {
ReleaseMetaFileVNode(hfsmp->hfs_attribute_vp);
+ hfsmp->hfs_attribute_cp = NULL;
+ hfsmp->hfs_attribute_vp = NULL;
+ }
- ReleaseMetaFileVNode(hfsmp->hfs_catalog_vp);
- ReleaseMetaFileVNode(hfsmp->hfs_extents_vp);
+ if (hfsmp->hfs_catalog_vp) {
+ ReleaseMetaFileVNode(hfsmp->hfs_catalog_vp);
+ hfsmp->hfs_catalog_cp = NULL;
+ hfsmp->hfs_catalog_vp = NULL;
+ }
- /*
- * Setting these pointers to NULL so that any references
- * past this point will fail, and tell us the point of failure.
- * Also, facilitates a check in hfs_update for a null catalog
- * vp
- */
- hfsmp->hfs_allocation_vp = NULL;
- hfsmp->hfs_attribute_vp = NULL;
- hfsmp->hfs_catalog_vp = NULL;
- hfsmp->hfs_extents_vp = NULL;
- hfsmp->hfs_startup_vp = NULL;
+ if (hfsmp->hfs_extents_vp) {
+ ReleaseMetaFileVNode(hfsmp->hfs_extents_vp);
+ hfsmp->hfs_extents_cp = NULL;
+ hfsmp->hfs_extents_vp = NULL;
+ }
+
+ if (hfsmp->hfs_allocation_vp) {
+ ReleaseMetaFileVNode(hfsmp->hfs_allocation_vp);
+ hfsmp->hfs_allocation_cp = NULL;
+ hfsmp->hfs_allocation_vp = NULL;
+ }
return (0);
}
/*
* Test if fork has overflow extents.
+ *
+ * Returns:
+ * non-zero - overflow extents exist
+ * zero - overflow extents do not exist
*/
__private_extern__
int
return (fp->ff_blocks > blocks);
}
+/*
+ * Lock the HFS global journal lock
+ */
+int
+hfs_lock_global (struct hfsmount *hfsmp, enum hfs_locktype locktype)
+{
+ void *thread = current_thread();
+
+ if (hfsmp->hfs_global_lockowner == thread) {
+ panic ("hfs_lock_global: locking against myself!");
+ }
+
+ /* HFS_SHARED_LOCK */
+ if (locktype == HFS_SHARED_LOCK) {
+ lck_rw_lock_shared (&hfsmp->hfs_global_lock);
+ hfsmp->hfs_global_lockowner = HFS_SHARED_OWNER;
+ }
+ /* HFS_EXCLUSIVE_LOCK */
+ else {
+ lck_rw_lock_exclusive (&hfsmp->hfs_global_lock);
+ hfsmp->hfs_global_lockowner = thread;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Unlock the HFS global journal lock
+ */
+void
+hfs_unlock_global (struct hfsmount *hfsmp)
+{
+
+ void *thread = current_thread();
+
+ /* HFS_LOCK_EXCLUSIVE */
+ if (hfsmp->hfs_global_lockowner == thread) {
+ hfsmp->hfs_global_lockowner = NULL;
+ lck_rw_unlock_exclusive (&hfsmp->hfs_global_lock);
+ }
+ /* HFS_LOCK_SHARED */
+ else {
+ lck_rw_unlock_shared (&hfsmp->hfs_global_lock);
+ }
+}
+
+/*
+ * Lock the HFS mount lock
+ *
+ * Note: this is a mutex, not a rw lock!
+ */
+inline
+void hfs_lock_mount (struct hfsmount *hfsmp) {
+ lck_mtx_lock (&(hfsmp->hfs_mutex));
+}
+
+/*
+ * Unlock the HFS mount lock
+ *
+ * Note: this is a mutex, not a rw lock!
+ */
+inline
+void hfs_unlock_mount (struct hfsmount *hfsmp) {
+ lck_mtx_unlock (&(hfsmp->hfs_mutex));
+}
/*
* Lock HFS system file(s).
*/
-__private_extern__
int
-hfs_systemfile_lock(struct hfsmount *hfsmp, int flags, enum hfslocktype locktype)
+hfs_systemfile_lock(struct hfsmount *hfsmp, int flags, enum hfs_locktype locktype)
{
/*
* Locking order is Catalog file, Attributes file, Startup file, Bitmap file, Extents file
*/
if (flags & SFL_CATALOG) {
-
#ifdef HFS_CHECK_LOCK_ORDER
if (hfsmp->hfs_attribute_cp && hfsmp->hfs_attribute_cp->c_lockowner == current_thread()) {
panic("hfs_systemfile_lock: bad lock order (Attributes before Catalog)");
}
#endif /* HFS_CHECK_LOCK_ORDER */
- (void) hfs_lock(hfsmp->hfs_catalog_cp, locktype);
- /*
- * When the catalog file has overflow extents then
- * also acquire the extents b-tree lock if its not
- * already requested.
- */
- if ((flags & SFL_EXTENTS) == 0 &&
- overflow_extents(VTOF(hfsmp->hfs_catalog_vp))) {
- flags |= SFL_EXTENTS;
+ if (hfsmp->hfs_catalog_cp) {
+ (void) hfs_lock(hfsmp->hfs_catalog_cp, locktype, HFS_LOCK_DEFAULT);
+ /*
+ * When the catalog file has overflow extents then
+ * also acquire the extents b-tree lock if its not
+ * already requested.
+ */
+ if (((flags & SFL_EXTENTS) == 0) &&
+ (hfsmp->hfs_catalog_vp != NULL) &&
+ (overflow_extents(VTOF(hfsmp->hfs_catalog_vp)))) {
+ flags |= SFL_EXTENTS;
+ }
+ } else {
+ flags &= ~SFL_CATALOG;
}
}
- if (flags & SFL_ATTRIBUTE) {
+ if (flags & SFL_ATTRIBUTE) {
#ifdef HFS_CHECK_LOCK_ORDER
if (hfsmp->hfs_startup_cp && hfsmp->hfs_startup_cp->c_lockowner == current_thread()) {
panic("hfs_systemfile_lock: bad lock order (Startup before Attributes)");
#endif /* HFS_CHECK_LOCK_ORDER */
if (hfsmp->hfs_attribute_cp) {
- (void) hfs_lock(hfsmp->hfs_attribute_cp, locktype);
+ (void) hfs_lock(hfsmp->hfs_attribute_cp, locktype, HFS_LOCK_DEFAULT);
/*
* When the attribute file has overflow extents then
* also acquire the extents b-tree lock if its not
* already requested.
*/
- if ((flags & SFL_EXTENTS) == 0 &&
- overflow_extents(VTOF(hfsmp->hfs_attribute_vp))) {
+ if (((flags & SFL_EXTENTS) == 0) &&
+ (hfsmp->hfs_attribute_vp != NULL) &&
+ (overflow_extents(VTOF(hfsmp->hfs_attribute_vp)))) {
flags |= SFL_EXTENTS;
}
} else {
flags &= ~SFL_ATTRIBUTE;
}
}
+
if (flags & SFL_STARTUP) {
#ifdef HFS_CHECK_LOCK_ORDER
if (hfsmp-> hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == current_thread()) {
}
#endif /* HFS_CHECK_LOCK_ORDER */
- (void) hfs_lock(hfsmp->hfs_startup_cp, locktype);
- /*
- * When the startup file has overflow extents then
- * also acquire the extents b-tree lock if its not
- * already requested.
- */
- if ((flags & SFL_EXTENTS) == 0 &&
- overflow_extents(VTOF(hfsmp->hfs_startup_vp))) {
- flags |= SFL_EXTENTS;
+ if (hfsmp->hfs_startup_cp) {
+ (void) hfs_lock(hfsmp->hfs_startup_cp, locktype, HFS_LOCK_DEFAULT);
+ /*
+ * When the startup file has overflow extents then
+ * also acquire the extents b-tree lock if its not
+ * already requested.
+ */
+ if (((flags & SFL_EXTENTS) == 0) &&
+ (hfsmp->hfs_startup_vp != NULL) &&
+ (overflow_extents(VTOF(hfsmp->hfs_startup_vp)))) {
+ flags |= SFL_EXTENTS;
+ }
+ } else {
+ flags &= ~SFL_STARTUP;
}
}
+
/*
* To prevent locks being taken in the wrong order, the extent lock
* gets a bitmap lock as well.
*/
if (flags & (SFL_BITMAP | SFL_EXTENTS)) {
- /*
- * Since the only bitmap operations are clearing and
- * setting bits we always need exclusive access. And
- * when we have a journal, we can "hide" behind that
- * lock since we can only change the bitmap from
- * within a transaction.
- */
- if (hfsmp->jnl || (hfsmp->hfs_allocation_cp == NULL)) {
- flags &= ~SFL_BITMAP;
- } else {
- (void) hfs_lock(hfsmp->hfs_allocation_cp, HFS_EXCLUSIVE_LOCK);
- /* The bitmap lock is also grabbed when only extent lock
+ if (hfsmp->hfs_allocation_cp) {
+ (void) hfs_lock(hfsmp->hfs_allocation_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+ /*
+ * The bitmap lock is also grabbed when only extent lock
* was requested. Set the bitmap lock bit in the lock
* flags which callers will use during unlock.
*/
flags |= SFL_BITMAP;
+ } else {
+ flags &= ~SFL_BITMAP;
}
}
+
if (flags & SFL_EXTENTS) {
/*
* Since the extents btree lock is recursive we always
* need exclusive access.
*/
- (void) hfs_lock(hfsmp->hfs_extents_cp, HFS_EXCLUSIVE_LOCK);
+ if (hfsmp->hfs_extents_cp) {
+ (void) hfs_lock(hfsmp->hfs_extents_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+ } else {
+ flags &= ~SFL_EXTENTS;
+ }
}
+
return (flags);
}
/*
* unlock HFS system file(s).
*/
-__private_extern__
void
hfs_systemfile_unlock(struct hfsmount *hfsmp, int flags)
{
}
hfs_unlock(hfsmp->hfs_attribute_cp);
}
- if (flags & SFL_CATALOG) {
+ if (flags & SFL_CATALOG && hfsmp->hfs_catalog_cp) {
if (hfsmp->jnl == NULL) {
BTGetLastSync((FCB*)VTOF(hfsmp->hfs_catalog_vp), &lastfsync);
numOfLockedBuffs = count_lock_queue();
}
hfs_unlock(hfsmp->hfs_catalog_cp);
}
- if (flags & SFL_BITMAP) {
+ if (flags & SFL_BITMAP && hfsmp->hfs_allocation_cp) {
hfs_unlock(hfsmp->hfs_allocation_cp);
}
- if (flags & SFL_EXTENTS) {
+ if (flags & SFL_EXTENTS && hfsmp->hfs_extents_cp) {
if (hfsmp->jnl == NULL) {
BTGetLastSync((FCB*)VTOF(hfsmp->hfs_extents_vp), &lastfsync);
numOfLockedBuffs = count_lock_queue();
}
-__private_extern__
u_int32_t
GetFileInfo(ExtendedVCB *vcb, __unused u_int32_t dirid, const char *name,
struct cat_attr *fattr, struct cat_fork *forkinfo)
jdesc.cd_namelen = strlen(name);
lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
- error = cat_lookup(hfsmp, &jdesc, 0, NULL, fattr, forkinfo, NULL);
+ error = cat_lookup(hfsmp, &jdesc, 0, 0, NULL, fattr, forkinfo, NULL);
hfs_systemfile_unlock(hfsmp, lockflags);
if (error == 0) {
* If the volume was not cleanly unmounted then some of these may
* have persisted and need to be removed.
*/
-__private_extern__
void
hfs_remove_orphans(struct hfsmount * hfsmp)
{
*/
if (bcmp(tempname, filename, namelen) == 0) {
struct filefork dfork;
- struct filefork rfork;
+ struct filefork rfork;
struct cnode cnode;
+ int mode = 0;
bzero(&dfork, sizeof(dfork));
bzero(&rfork, sizeof(rfork));
fsize = 0;
}
- if (TruncateFileC(vcb, (FCB*)&dfork, fsize, false) != 0) {
- printf("hfs: error truncting data fork!\n");
+ if (TruncateFileC(vcb, (FCB*)&dfork, fsize, 1, 0,
+ cnode.c_attr.ca_fileid, false) != 0) {
+ printf("hfs: error truncating data fork!\n");
break;
}
rfork.ff_cp = &cnode;
cnode.c_datafork = NULL;
cnode.c_rsrcfork = &rfork;
- if (TruncateFileC(vcb, (FCB*)&rfork, 0, false) != 0) {
- printf("hfs: error truncting rsrc fork!\n");
+ if (TruncateFileC(vcb, (FCB*)&rfork, 0, 1, 1, cnode.c_attr.ca_fileid, false) != 0) {
+ printf("hfs: error truncating rsrc fork!\n");
break;
}
}
break;
}
- if (cnode.c_attr.ca_mode & S_IFDIR) {
+ mode = cnode.c_attr.ca_mode & S_IFMT;
+
+ if (mode == S_IFDIR) {
orphaned_dirs++;
}
else {
/* Update parent and volume counts */
hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries--;
- if (cnode.c_attr.ca_mode & S_IFDIR) {
+ if (mode == S_IFDIR) {
DEC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
}
Now that Catalog is unlocked, update the volume info, making
sure to differentiate between files and directories
*/
- if (cnode.c_attr.ca_mode & S_IFDIR) {
+ if (mode == S_IFDIR) {
hfs_volupdate(hfsmp, VOL_RMDIR, 0);
}
else{
return logBlockSize;
}
-__private_extern__
u_int32_t
hfs_freeblks(struct hfsmount * hfsmp, int wantreserve)
{
else
freeblks = 0;
-#ifdef HFS_SPARSE_DEV
+#if HFS_SPARSE_DEV
/*
* When the underlying device is sparse, check the
* available space on the backing store volume.
}
if ((vfsp = vfs_statfs(backingfs_mp))) {
- HFS_MOUNT_LOCK(hfsmp, TRUE);
+ hfs_lock_mount (hfsmp);
vfreeblks = vfsp->f_bavail;
/* Normalize block count if needed. */
if (vfsp->f_bsize != hfsmp->blockSize) {
vfreeblks = MIN(vfreeblks, hfsmp->hfs_backingfs_maxblocks);
}
freeblks = MIN(vfreeblks, freeblks);
- HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ hfs_unlock_mount (hfsmp);
}
}
#endif /* HFS_SPARSE_DEV */
+ if (hfsmp->hfs_flags & HFS_CS) {
+ uint64_t cs_free_bytes;
+ uint64_t cs_free_blks;
+ if (VNOP_IOCTL(hfsmp->hfs_devvp, _DKIOCCSGETFREEBYTES,
+ (caddr_t)&cs_free_bytes, 0, vfs_context_kernel()) == 0) {
+ cs_free_blks = cs_free_bytes / hfsmp->blockSize;
+ if (cs_free_blks > loanblks)
+ cs_free_blks -= loanblks;
+ else
+ cs_free_blks = 0;
+ freeblks = MIN(cs_free_blks, freeblks);
+ }
+ }
return (freeblks);
}
if (err >= 0)
return err;
+ /* BSD/VFS internal errnos */
+ switch (err) {
+ case ERESERVEDNAME: /* -8 */
+ return err;
+ }
+
switch (err) {
case dskFulErr: /* -34 */
case btNoSpaceAvail: /* -32733 */
// desired uuid so let's try to open the device for writing and
// see if it works. if it does, we'll use it.
- NDINIT(&nd, LOOKUP, LOCKLEAF, UIO_SYSSPACE32, CAST_USER_ADDR_T(bsd_name), vfs_context_kernel());
+ NDINIT(&nd, LOOKUP, OP_LOOKUP, LOCKLEAF, UIO_SYSSPACE32, CAST_USER_ADDR_T(bsd_name), vfs_context_kernel());
if ((error = namei(&nd))) {
printf("hfs: journal open cb: error %d looking up device %s (dev uuid %s)\n", error, bsd_name, uuid_str);
return 1; // keep iterating
strlcpy(ji->desired_uuid, uuid_str, 128);
}
vnode_setmountedon(ji->jvp);
- // printf("hfs: journal open cb: got device %s (%s)\n", bsd_name, uuid_str);
return 0; // stop iterating
} else {
vnode_put(ji->jvp);
return 1; // keep iterating
}
-extern dev_t IOBSDGetMediaWithUUID(const char *uuid_cstring, char *bsd_name, int bsd_name_len, int timeout);
extern void IOBSDIterateMediaWithContent(const char *uuid_cstring, int (*func)(const char *bsd_dev_name, const char *uuid_str, void *arg), void *arg);
-extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp);
kern_return_t IOBSDGetPlatformSerialNumber(char *serial_number_str, u_int32_t len);
}
-__private_extern__
int
hfs_early_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp,
void *_args, off_t embeddedOffset, daddr64_t mdb_offset,
const char *dev_name;
devvp = hfsmp->hfs_devvp;
- dev_name = vnode_name(devvp);
- if (dev_name == NULL) {
- dev_name = "unknown-dev";
- }
+ dev_name = vnode_getname_printable(devvp);
if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS)) {
arg_flags = args->journal_flags;
if (jinfo_bp) {
buf_brelse(jinfo_bp);
}
- return retval;
+ goto cleanup_dev_name;
}
jibp = (JournalInfoBlock *)buf_dataptr(jinfo_bp);
hfsmp->hfs_logical_block_size,
&need_init);
if (hfsmp->jvp == NULL) {
- buf_brelse(jinfo_bp);
- return EROFS;
+ buf_brelse(jinfo_bp);
+ retval = EROFS;
+ goto cleanup_dev_name;
} else {
if (IOBSDGetPlatformSerialNumber(&jibp->machine_serial_num[0], sizeof(jibp->machine_serial_num)) != KERN_SUCCESS) {
strlcpy(&jibp->machine_serial_num[0], "unknown-machine-uuid", sizeof(jibp->machine_serial_num));
buf_brelse(jinfo_bp);
if (retval) {
- const char *name = vnode_getname(devvp);
- printf("hfs: early journal init: volume on %s is read-only and journal is dirty. Can not mount volume.\n",
- name ? name : "");
- if (name)
- vnode_putname(name);
+ const char *name = vnode_getname_printable(devvp);
+ printf("hfs: early journal init: volume on %s is read-only and journal is dirty. Can not mount volume.\n",
+ name);
+ vnode_putname_printable(name);
}
- return retval;
+ goto cleanup_dev_name;
}
if (jib_flags & kJIJournalNeedInitMask) {
blksize,
arg_flags,
arg_tbufsz,
- hfs_sync_metadata, hfsmp->hfs_mp);
+ hfs_sync_metadata, hfsmp->hfs_mp,
+ hfsmp->hfs_mp);
+ if (hfsmp->jnl)
+ journal_trim_set_callback(hfsmp->jnl, hfs_trim_callback, hfsmp);
// no need to start a transaction here... if this were to fail
// we'd just re-init it on the next mount.
blksize,
arg_flags,
arg_tbufsz,
- hfs_sync_metadata, hfsmp->hfs_mp);
+ hfs_sync_metadata, hfsmp->hfs_mp,
+ hfsmp->hfs_mp);
+ if (hfsmp->jnl)
+ journal_trim_set_callback(hfsmp->jnl, hfs_trim_callback, hfsmp);
if (write_jibp) {
buf_bwrite(jinfo_bp);
}
printf("hfs: failed to reload the mdb after opening the journal (retval %d)!\n",
retval);
- return retval;
+ goto cleanup_dev_name;
}
bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size), mdbp, 512);
buf_brelse(bp);
}
}
-
- //printf("journal @ 0x%x\n", hfsmp->jnl);
-
// if we expected the journal to be there and we couldn't
// create it or open it then we have to bail out.
if (hfsmp->jnl == NULL) {
printf("hfs: early jnl init: failed to open/create the journal (retval %d).\n", retval);
- return EINVAL;
+ retval = EINVAL;
+ goto cleanup_dev_name;
}
- return 0;
+ retval = 0;
+
+cleanup_dev_name:
+ vnode_putname_printable(dev_name);
+ return retval;
}
} else {
const char *dev_name;
int need_init = 0;
-
- dev_name = vnode_name(devvp);
- if (dev_name == NULL) {
- dev_name = "unknown-dev";
- }
+
+ dev_name = vnode_getname_printable(devvp);
// since the journal is empty, just use any available external journal
*((char *)&jibp->ext_jnl_uuid[0]) = '\0';
hfsmp->hfs_logical_block_size,
&need_init);
if (hfsmp->jvp == NULL) {
- buf_brelse(jinfo_bp);
- return EROFS;
+ buf_brelse(jinfo_bp);
+ vnode_putname_printable(dev_name);
+ return EROFS;
} else {
if (IOBSDGetPlatformSerialNumber(&jibp->machine_serial_num[0], sizeof(jibp->machine_serial_num)) != KERN_SUCCESS) {
strlcpy(&jibp->machine_serial_num[0], "unknown-machine-serial-num", sizeof(jibp->machine_serial_num));
}
- }
+ }
jib_offset = 0;
recreate_journal = 1;
write_jibp = 1;
if (need_init) {
jib_flags |= kJIJournalNeedInitMask;
}
+ vnode_putname_printable(dev_name);
}
// save this off for the hack-y check in hfs_remove()
buf_brelse(jinfo_bp);
if (retval) {
- const char *name = vnode_getname(devvp);
- printf("hfs: late journal init: volume on %s is read-only and journal is dirty. Can not mount volume.\n",
- name ? name : "");
- if (name)
- vnode_putname(name);
+ const char *name = vnode_getname_printable(devvp);
+ printf("hfs: late journal init: volume on %s is read-only and journal is dirty. Can not mount volume.\n",
+ name);
+ vnode_putname_printable(name);
}
return retval;
hfsmp->hfs_logical_block_size,
arg_flags,
arg_tbufsz,
- hfs_sync_metadata, hfsmp->hfs_mp);
+ hfs_sync_metadata, hfsmp->hfs_mp,
+ hfsmp->hfs_mp);
+ if (hfsmp->jnl)
+ journal_trim_set_callback(hfsmp->jnl, hfs_trim_callback, hfsmp);
// no need to start a transaction here... if this were to fail
// we'd just re-init it on the next mount.
hfsmp->hfs_logical_block_size,
arg_flags,
arg_tbufsz,
- hfs_sync_metadata, hfsmp->hfs_mp);
+ hfs_sync_metadata, hfsmp->hfs_mp,
+ hfsmp->hfs_mp);
+ if (hfsmp->jnl)
+ journal_trim_set_callback(hfsmp->jnl, hfs_trim_callback, hfsmp);
}
jinfo_bp = NULL;
jibp = NULL;
- //printf("hfs: journal @ 0x%x\n", hfsmp->jnl);
-
// if we expected the journal to be there and we couldn't
// create it or open it then we have to bail out.
if (hfsmp->jnl == NULL) {
#define HOTBAND_MINIMUM_SIZE (10*1024*1024)
#define HOTBAND_MAXIMUM_SIZE (512*1024*1024)
-static void
-hfs_metadatazone_init(struct hfsmount *hfsmp)
+/* Initialize the metadata zone.
+ *
+ * If the size of the volume is less than the minimum size for
+ * metadata zone, metadata zone is disabled.
+ *
+ * If disable is true, disable metadata zone unconditionally.
+ */
+void
+hfs_metadatazone_init(struct hfsmount *hfsmp, int disable)
{
ExtendedVCB *vcb;
u_int64_t fs_size;
int items, really_do_it=1;
vcb = HFSTOVCB(hfsmp);
- fs_size = (u_int64_t)vcb->blockSize * (u_int64_t)vcb->totalBlocks;
+ fs_size = (u_int64_t)vcb->blockSize * (u_int64_t)vcb->allocLimit;
/*
* For volumes less than 10 GB, don't bother.
really_do_it = 0;
}
+ /* If caller wants to disable metadata zone, do it */
+ if (disable == true) {
+ really_do_it = 0;
+ }
+
/*
* Start with space for the boot blocks and Volume Header.
* 1536 = byte offset from start of volume to end of volume header:
hfsmp->hfs_min_alloc_start = zonesize / vcb->blockSize;
/*
* If doing the round up for hfs_min_alloc_start would push us past
- * totalBlocks, then just reset it back to 0. Though using a value
- * bigger than totalBlocks would not cause damage in the block allocator
+ * allocLimit, then just reset it back to 0. Though using a value
+ * bigger than allocLimit would not cause damage in the block allocator
* code, this value could get stored in the volume header and make it out
* to disk, making the volume header technically corrupt.
*/
- if (hfsmp->hfs_min_alloc_start >= hfsmp->totalBlocks) {
+ if (hfsmp->hfs_min_alloc_start >= hfsmp->allocLimit) {
hfsmp->hfs_min_alloc_start = 0;
}
if (really_do_it == 0) {
+ /* If metadata zone needs to be disabled because the
+ * volume was truncated, clear the bit and zero out
+ * the values that are no longer needed.
+ */
+ if (hfsmp->hfs_flags & HFS_METADATA_ZONE) {
+ /* Disable metadata zone */
+ hfsmp->hfs_flags &= ~HFS_METADATA_ZONE;
+
+ /* Zero out mount point values that are not required */
+ hfsmp->hfs_catalog_maxblks = 0;
+ hfsmp->hfs_hotfile_maxblks = 0;
+ hfsmp->hfs_hotfile_start = 0;
+ hfsmp->hfs_hotfile_end = 0;
+ hfsmp->hfs_hotfile_freeblks = 0;
+ hfsmp->hfs_metazone_start = 0;
+ hfsmp->hfs_metazone_end = 0;
+ }
+
return;
}
hfsmp->hfs_metazone_end = blk - 1;
/* The default hotfile area is at the end of the zone. */
- hfsmp->hfs_hotfile_start = blk - (filesize / vcb->blockSize);
- hfsmp->hfs_hotfile_end = hfsmp->hfs_metazone_end;
- hfsmp->hfs_hotfile_freeblks = hfs_hotfile_freeblocks(hfsmp);
+ if (vfs_flags(HFSTOVFS(hfsmp)) & MNT_ROOTFS) {
+ hfsmp->hfs_hotfile_start = blk - (filesize / vcb->blockSize);
+ hfsmp->hfs_hotfile_end = hfsmp->hfs_metazone_end;
+ hfsmp->hfs_hotfile_freeblks = hfs_hotfile_freeblocks(hfsmp);
+ }
+ else {
+ hfsmp->hfs_hotfile_start = 0;
+ hfsmp->hfs_hotfile_end = 0;
+ hfsmp->hfs_hotfile_freeblks = 0;
+ }
#if 0
printf("hfs: metadata zone is %d to %d\n", hfsmp->hfs_metazone_start, hfsmp->hfs_metazone_end);
printf("hfs: hot file band is %d to %d\n", hfsmp->hfs_hotfile_start, hfsmp->hfs_hotfile_end);
* Determine if a file is a "virtual" metadata file.
* This includes journal and quota files.
*/
-__private_extern__
int
hfs_virtualmetafile(struct cnode *cp)
{
return (0);
}
+__private_extern__
+void hfs_syncer_lock(struct hfsmount *hfsmp)
+{
+ hfs_lock_mount(hfsmp);
+}
+
+__private_extern__
+void hfs_syncer_unlock(struct hfsmount *hfsmp)
+{
+ hfs_unlock_mount(hfsmp);
+}
+
+__private_extern__
+void hfs_syncer_wait(struct hfsmount *hfsmp)
+{
+ msleep(&hfsmp->hfs_sync_incomplete, &hfsmp->hfs_mutex, PWAIT,
+ "hfs_syncer_wait", NULL);
+}
+
+__private_extern__
+void hfs_syncer_wakeup(struct hfsmount *hfsmp)
+{
+ wakeup(&hfsmp->hfs_sync_incomplete);
+}
+
+__private_extern__
+uint64_t hfs_usecs_to_deadline(uint64_t usecs)
+{
+ uint64_t deadline;
+ clock_interval_to_deadline(usecs, NSEC_PER_USEC, &deadline);
+ return deadline;
+}
+
+__private_extern__
+void hfs_syncer_queue(thread_call_t syncer)
+{
+ if (thread_call_enter_delayed_with_leeway(syncer,
+ NULL,
+ hfs_usecs_to_deadline(HFS_META_DELAY),
+ 0,
+ THREAD_CALL_DELAY_SYS_BACKGROUND)) {
+ printf ("hfs: syncer already scheduled!");
+ }
+}
//
// Fire off a timed callback to sync the disk if the
void
hfs_sync_ejectable(struct hfsmount *hfsmp)
{
- if (hfsmp->hfs_syncer) {
- clock_sec_t secs;
- clock_usec_t usecs;
- uint64_t now;
+ // If we don't have a syncer or we get called by the syncer, just return
+ if (!hfsmp->hfs_syncer || current_thread() == hfsmp->hfs_syncer_thread)
+ return;
- clock_get_calendar_microtime(&secs, &usecs);
- now = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs;
+ hfs_syncer_lock(hfsmp);
- if (hfsmp->hfs_sync_incomplete && hfsmp->hfs_mp->mnt_pending_write_size >= hfsmp->hfs_max_pending_io) {
- // if we have a sync scheduled but i/o is starting to pile up,
- // don't call thread_call_enter_delayed() again because that
- // will defer the sync.
- return;
- }
+ if (!timerisset(&hfsmp->hfs_sync_req_oldest))
+ microuptime(&hfsmp->hfs_sync_req_oldest);
- if (hfsmp->hfs_sync_scheduled == 0) {
- uint64_t deadline;
+ /* If hfs_unmount is running, it will set hfs_syncer to NULL. Also we
+ don't want to queue again if there is a sync outstanding. */
+ if (!hfsmp->hfs_syncer || hfsmp->hfs_sync_incomplete) {
+ hfs_syncer_unlock(hfsmp);
+ return;
+ }
- hfsmp->hfs_last_sync_request_time = now;
+ hfsmp->hfs_sync_incomplete = TRUE;
- clock_interval_to_deadline(HFS_META_DELAY, HFS_MILLISEC_SCALE, &deadline);
+ thread_call_t syncer = hfsmp->hfs_syncer;
- /*
- * Increment hfs_sync_scheduled on the assumption that we're the
- * first thread to schedule the timer. If some other thread beat
- * us, then we'll decrement it. If we *were* the first to
- * schedule the timer, then we need to keep track that the
- * callback is waiting to complete.
- */
- OSIncrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_scheduled);
- if (thread_call_enter_delayed(hfsmp->hfs_syncer, deadline))
- OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_scheduled);
- else
- OSIncrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_incomplete);
- }
- }
-}
+ hfs_syncer_unlock(hfsmp);
+ hfs_syncer_queue(syncer);
+}
-__private_extern__
int
hfs_start_transaction(struct hfsmount *hfsmp)
{
}
#endif /* HFS_CHECK_LOCK_ORDER */
- if (hfsmp->jnl == NULL || journal_owner(hfsmp->jnl) != thread) {
- lck_rw_lock_shared(&hfsmp->hfs_global_lock);
- OSAddAtomic(1, (SInt32 *)&hfsmp->hfs_active_threads);
- unlock_on_err = 1;
- }
+ if (hfsmp->jnl == NULL || journal_owner(hfsmp->jnl) != thread) {
+ hfs_lock_global (hfsmp, HFS_SHARED_LOCK);
+ OSAddAtomic(1, (SInt32 *)&hfsmp->hfs_active_threads);
+ unlock_on_err = 1;
+ }
/* If a downgrade to read-only mount is in progress, no other
* process than the downgrade process is allowed to modify
goto out;
}
- if (hfsmp->jnl) {
- ret = journal_start_transaction(hfsmp->jnl);
- if (ret == 0) {
- OSAddAtomic(1, &hfsmp->hfs_global_lock_nesting);
+ if (hfsmp->jnl) {
+ ret = journal_start_transaction(hfsmp->jnl);
+ if (ret == 0) {
+ OSAddAtomic(1, &hfsmp->hfs_global_lock_nesting);
+ }
+ } else {
+ ret = 0;
}
- } else {
- ret = 0;
- }
out:
- if (ret != 0 && unlock_on_err) {
- lck_rw_unlock_shared(&hfsmp->hfs_global_lock);
- OSAddAtomic(-1, (SInt32 *)&hfsmp->hfs_active_threads);
- }
+ if (ret != 0 && unlock_on_err) {
+ hfs_unlock_global (hfsmp);
+ OSAddAtomic(-1, (SInt32 *)&hfsmp->hfs_active_threads);
+ }
return ret;
}
-__private_extern__
int
hfs_end_transaction(struct hfsmount *hfsmp)
{
int need_unlock=0, ret;
- if ( hfsmp->jnl == NULL
- || ( journal_owner(hfsmp->jnl) == current_thread()
+ if ((hfsmp->jnl == NULL) || ( journal_owner(hfsmp->jnl) == current_thread()
&& (OSAddAtomic(-1, &hfsmp->hfs_global_lock_nesting) == 1)) ) {
-
need_unlock = 1;
}
- if (hfsmp->jnl) {
- ret = journal_end_transaction(hfsmp->jnl);
- } else {
- ret = 0;
- }
+ if (hfsmp->jnl) {
+ ret = journal_end_transaction(hfsmp->jnl);
+ } else {
+ ret = 0;
+ }
- if (need_unlock) {
- OSAddAtomic(-1, (SInt32 *)&hfsmp->hfs_active_threads);
- lck_rw_unlock_shared(&hfsmp->hfs_global_lock);
- hfs_sync_ejectable(hfsmp);
- }
+ if (need_unlock) {
+ OSAddAtomic(-1, (SInt32 *)&hfsmp->hfs_active_threads);
+ hfs_unlock_global (hfsmp);
+ hfs_sync_ejectable(hfsmp);
+ }
return ret;
}
-__private_extern__
+/*
+ * Flush the contents of the journal to the disk.
+ *
+ * Input:
+ * wait_for_IO -
+ * If TRUE, wait to write in-memory journal to the disk
+ * consistently, and also wait to write all asynchronous
+ * metadata blocks to its corresponding locations
+ * consistently on the disk. This means that the journal
+ * is empty at this point and does not contain any
+ * transactions. This is overkill in normal scenarios
+ * but is useful whenever the metadata blocks are required
+ * to be consistent on-disk instead of just the journal
+ * being consistent; like before live verification
+ * and live volume resizing.
+ *
+ * If FALSE, only wait to write in-memory journal to the
+ * disk consistently. This means that the journal still
+ * contains uncommitted transactions and the file system
+ * metadata blocks in the journal transactions might be
+ * written asynchronously to the disk. But there is no
+ * guarantee that they are written to the disk before
+ * returning to the caller. Note that this option is
+ * sufficient for file system data integrity as it
+ * guarantees consistent journal content on the disk.
+ */
int
-hfs_journal_flush(struct hfsmount *hfsmp)
+hfs_journal_flush(struct hfsmount *hfsmp, boolean_t wait_for_IO)
{
int ret;
-
+
/* Only peek at hfsmp->jnl while holding the global lock */
- lck_rw_lock_shared(&hfsmp->hfs_global_lock);
+ hfs_lock_global (hfsmp, HFS_SHARED_LOCK);
if (hfsmp->jnl) {
- ret = journal_flush(hfsmp->jnl);
+ ret = journal_flush(hfsmp->jnl, wait_for_IO);
} else {
ret = 0;
}
- lck_rw_unlock_shared(&hfsmp->hfs_global_lock);
+ hfs_unlock_global (hfsmp);
return ret;
}
* unused nodes have been repaired. A newer newfs_hfs will set this bit.
* As will fsck_hfs when it repairs the unused nodes.
*/
-__private_extern__
int hfs_erase_unused_nodes(struct hfsmount *hfsmp)
{
int result;
done:
return result;
}
+
+
+extern time_t snapshot_timestamp;
+
+int
+check_for_tracked_file(struct vnode *vp, time_t ctime, uint64_t op_type, void *arg)
+{
+ int tracked_error = 0, snapshot_error = 0;
+
+ if (vp == NULL) {
+ return 0;
+ }
+
+ /* Swap files are special; skip them */
+ if (vnode_isswap(vp)) {
+ return 0;
+ }
+
+ if (VTOC(vp)->c_bsdflags & UF_TRACKED) {
+ // the file has the tracked bit set, so send an event to the tracked-file handler
+ int error;
+
+ // printf("hfs: tracked-file: encountered a file with the tracked bit set! (vp %p)\n", vp);
+ error = resolve_nspace_item(vp, op_type | NAMESPACE_HANDLER_TRACK_EVENT);
+ if (error) {
+ if (error == EAGAIN) {
+ printf("hfs: tracked-file: timed out waiting for namespace handler...\n");
+
+ } else if (error == EINTR) {
+ // printf("hfs: tracked-file: got a signal while waiting for namespace handler...\n");
+ tracked_error = EINTR;
+ }
+ }
+ }
+
+ if (ctime != 0 && snapshot_timestamp != 0 && (ctime <= snapshot_timestamp || vnode_needssnapshots(vp))) {
+ // the change time is within this epoch
+ int error;
+
+ error = resolve_nspace_item_ext(vp, op_type | NAMESPACE_HANDLER_SNAPSHOT_EVENT, arg);
+ if (error == EDEADLK) {
+ snapshot_error = 0;
+ } else if (error) {
+ if (error == EAGAIN) {
+ printf("hfs: cow-snapshot: timed out waiting for namespace handler...\n");
+ } else if (error == EINTR) {
+ // printf("hfs: cow-snapshot: got a signal while waiting for namespace handler...\n");
+ snapshot_error = EINTR;
+ }
+ }
+ }
+
+ if (tracked_error) return tracked_error;
+ if (snapshot_error) return snapshot_error;
+
+ return 0;
+}
+
+int
+check_for_dataless_file(struct vnode *vp, uint64_t op_type)
+{
+ int error;
+
+ if (vp == NULL || (VTOC(vp)->c_bsdflags & UF_COMPRESSED) == 0 || VTOCMP(vp) == NULL || VTOCMP(vp)->cmp_type != DATALESS_CMPFS_TYPE) {
+ // there's nothing to do, it's not dataless
+ return 0;
+ }
+
+ /* Swap files are special; ignore them */
+ if (vnode_isswap(vp)) {
+ return 0;
+ }
+
+ // printf("hfs: dataless: encountered a file with the dataless bit set! (vp %p)\n", vp);
+ error = resolve_nspace_item(vp, op_type | NAMESPACE_HANDLER_NSPACE_EVENT);
+ if (error == EDEADLK && op_type == NAMESPACE_HANDLER_WRITE_OP) {
+ error = 0;
+ } else if (error) {
+ if (error == EAGAIN) {
+ printf("hfs: dataless: timed out waiting for namespace handler...\n");
+ // XXXdbg - return the fabled ENOTPRESENT (i.e. EJUKEBOX)?
+ return 0;
+ } else if (error == EINTR) {
+ // printf("hfs: dataless: got a signal while waiting for namespace handler...\n");
+ return EINTR;
+ }
+ } else if (VTOC(vp)->c_bsdflags & UF_COMPRESSED) {
+ //
+ // if we're here, the dataless bit is still set on the file
+ // which means it didn't get handled. we return an error
+ // but it's presently ignored by all callers of this function.
+ //
+ // XXXdbg - EDATANOTPRESENT is what we really need...
+ //
+ return EBADF;
+ }
+
+ return error;
+}
+
+
+//
+// NOTE: this function takes care of starting a transaction and
+// acquiring the systemfile lock so that it can call
+// cat_update().
+//
+// NOTE: do NOT hold and cnode locks while calling this function
+// to avoid deadlocks (because we take a lock on the root
+// cnode)
+//
+int
+hfs_generate_document_id(struct hfsmount *hfsmp, uint32_t *docid)
+{
+ struct vnode *rvp;
+ struct cnode *cp;
+ int error;
+
+ error = VFS_ROOT(HFSTOVFS(hfsmp), &rvp, vfs_context_kernel());
+ if (error) {
+ return error;
+ }
+
+ cp = VTOC(rvp);
+ if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) != 0) {
+ return error;
+ }
+ struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((void *)((char *)&cp->c_attr.ca_finderinfo + 16));
+
+ int lockflags;
+ if (hfs_start_transaction(hfsmp) != 0) {
+ return error;
+ }
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
+
+ if (extinfo->document_id == 0) {
+ // initialize this to start at 3 (one greater than the root-dir id)
+ extinfo->document_id = 3;
+ }
+
+ *docid = extinfo->document_id++;
+
+ // mark the root cnode dirty
+ cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+ (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+
+ hfs_systemfile_unlock (hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+
+ (void) hfs_unlock(cp);
+
+ vnode_put(rvp);
+ rvp = NULL;
+
+ return 0;
+}