From de8ee0119e51802e7b74e261b628b6de53b34e6c Mon Sep 17 00:00:00 2001 From: Apple Date: Fri, 31 Jan 2020 02:28:39 +0000 Subject: [PATCH] hfs-522.0.9.tar.gz --- .../InfoPlist.strings | 0 fsck_hfs/dfalib/SControl.c | 4 +- fsck_hfs/dfalib/SUtils.c | 16 +- fsck_hfs/dfalib/Scavenger.h | 2 +- fsck_hfs/fsck_hfs.c | 144 +- fsck_hfs/fsck_hfs.h | 4 +- fsck_hfs/fsck_hfs_strings.c | 2 +- fsck_hfs/utilities.c | 60 +- hfs.xcconfig | 2 +- hfs.xcodeproj/project.pbxproj | 1006 +++- .../xcschemes/livefiles_hfs.xcscheme | 80 + .../xcschemes/livefiles_hfs_tester.xcscheme | 97 + hfs_encodings/hfs_encodings.h | 4 +- hfs_util/hfs_util.osx.entitlements | 2 +- livefiles_hfs_plugin/lf_hfs.h | 600 +++ livefiles_hfs_plugin/lf_hfs_attrlist.c | 682 +++ livefiles_hfs_plugin/lf_hfs_attrlist.h | 61 + livefiles_hfs_plugin/lf_hfs_btree.c | 1952 +++++++ livefiles_hfs_plugin/lf_hfs_btree.h | 389 ++ livefiles_hfs_plugin/lf_hfs_btree_allocate.c | 655 +++ livefiles_hfs_plugin/lf_hfs_btree_misc_ops.c | 568 ++ livefiles_hfs_plugin/lf_hfs_btree_node_ops.c | 941 ++++ .../lf_hfs_btree_node_reserve.c | 313 ++ livefiles_hfs_plugin/lf_hfs_btree_tree_ops.c | 1298 +++++ livefiles_hfs_plugin/lf_hfs_btrees_internal.h | 336 ++ livefiles_hfs_plugin/lf_hfs_btrees_io.c | 916 ++++ livefiles_hfs_plugin/lf_hfs_btrees_io.h | 35 + livefiles_hfs_plugin/lf_hfs_btrees_private.h | 382 ++ livefiles_hfs_plugin/lf_hfs_catalog.c | 3841 +++++++++++++ livefiles_hfs_plugin/lf_hfs_catalog.h | 290 + livefiles_hfs_plugin/lf_hfs_chash.c | 537 ++ livefiles_hfs_plugin/lf_hfs_chash.h | 30 + livefiles_hfs_plugin/lf_hfs_cnode.c | 2040 +++++++ livefiles_hfs_plugin/lf_hfs_cnode.h | 367 ++ livefiles_hfs_plugin/lf_hfs_common.h | 149 + livefiles_hfs_plugin/lf_hfs_defs.h | 99 + livefiles_hfs_plugin/lf_hfs_dirops_handler.c | 435 ++ livefiles_hfs_plugin/lf_hfs_dirops_handler.h | 28 + livefiles_hfs_plugin/lf_hfs_endian.c | 872 +++ livefiles_hfs_plugin/lf_hfs_endian.h | 50 + .../lf_hfs_file_extent_mapping.c | 1764 ++++++ .../lf_hfs_file_extent_mapping.h | 60 + .../lf_hfs_file_mgr_internal.h | 250 + livefiles_hfs_plugin/lf_hfs_fileops_handler.c | 656 +++ livefiles_hfs_plugin/lf_hfs_fileops_handler.h | 66 + livefiles_hfs_plugin/lf_hfs_format.h | 623 +++ livefiles_hfs_plugin/lf_hfs_fsops_handler.c | 709 +++ livefiles_hfs_plugin/lf_hfs_fsops_handler.h | 24 + livefiles_hfs_plugin/lf_hfs_generic_buf.c | 800 +++ livefiles_hfs_plugin/lf_hfs_generic_buf.h | 87 + livefiles_hfs_plugin/lf_hfs_journal.c | 3474 ++++++++++++ livefiles_hfs_plugin/lf_hfs_journal.h | 379 ++ livefiles_hfs_plugin/lf_hfs_link.c | 950 ++++ livefiles_hfs_plugin/lf_hfs_link.h | 24 + livefiles_hfs_plugin/lf_hfs_locks.c | 186 + livefiles_hfs_plugin/lf_hfs_locks.h | 65 + livefiles_hfs_plugin/lf_hfs_logger.c | 33 + livefiles_hfs_plugin/lf_hfs_logger.h | 38 + livefiles_hfs_plugin/lf_hfs_lookup.c | 254 + livefiles_hfs_plugin/lf_hfs_lookup.h | 16 + livefiles_hfs_plugin/lf_hfs_mount.h | 71 + livefiles_hfs_plugin/lf_hfs_rangelist.c | 381 ++ livefiles_hfs_plugin/lf_hfs_rangelist.h | 44 + livefiles_hfs_plugin/lf_hfs_raw_read_write.c | 534 ++ livefiles_hfs_plugin/lf_hfs_raw_read_write.h | 32 + livefiles_hfs_plugin/lf_hfs_readwrite_ops.c | 755 +++ livefiles_hfs_plugin/lf_hfs_readwrite_ops.h | 32 + livefiles_hfs_plugin/lf_hfs_sbunicode.c | 973 ++++ livefiles_hfs_plugin/lf_hfs_sbunicode.h | 135 + .../lf_hfs_ucs_string_cmp_data.h | 268 + .../lf_hfs_unicode_wrappers.c | 401 ++ .../lf_hfs_unicode_wrappers.h | 28 + livefiles_hfs_plugin/lf_hfs_utfconvdata.h | 1713 ++++++ livefiles_hfs_plugin/lf_hfs_utils.c | 130 + livefiles_hfs_plugin/lf_hfs_utils.h | 42 + livefiles_hfs_plugin/lf_hfs_vfsops.c | 2103 ++++++++ livefiles_hfs_plugin/lf_hfs_vfsops.h | 44 + livefiles_hfs_plugin/lf_hfs_vfsutils.c | 2610 +++++++++ livefiles_hfs_plugin/lf_hfs_vfsutils.h | 56 + livefiles_hfs_plugin/lf_hfs_vnode.c | 173 + livefiles_hfs_plugin/lf_hfs_vnode.h | 246 + livefiles_hfs_plugin/lf_hfs_vnops.c | 3173 +++++++++++ livefiles_hfs_plugin/lf_hfs_vnops.h | 51 + .../lf_hfs_volume_allocation.c | 4790 +++++++++++++++++ .../lf_hfs_volume_allocation.h | 18 + livefiles_hfs_plugin/lf_hfs_xattr.c | 1854 +++++++ livefiles_hfs_plugin/lf_hfs_xattr.h | 25 + livefiles_hfs_plugin/livefiles_hfs_tester.c | 4782 ++++++++++++++++ .../livefiles_hfs_tester.entitlements | 10 + livefiles_hfs_plugin/livefiles_hfs_tester.h | 14 + livefiles_hfs_plugin/scripts/CreateRelease.py | 73 + mount_hfs/mount_hfs.c | 16 +- mount_hfs/mount_hfs.osx.entitlements | 2 +- newfs_hfs/makehfs.c | 69 +- tests/cases/test-cas-bsdflags.c | 69 + tests/cases/test-chflags.c | 4 +- tests/cases/test-class-roll.c | 6 +- tests/cases/test-dir-link.c | 4 +- tests/cases/test-dprotect.c | 4 +- tests/cases/test-external-jnl.c | 4 +- tests/cases/test-file-too-big.m | 4 +- tests/cases/test-fsinfo-cprotect.c | 4 +- tests/cases/test-fsync.c | 4 +- tests/cases/test-get-volume-create-time.c | 43 + tests/cases/test-getattrlist-dprotect.m | 4 +- tests/cases/test-getattrlist.c | 2 +- tests/cases/test-hard-links.m | 4 +- tests/cases/test-invalid-ranges.m | 4 +- tests/cases/test-journal-toggle.c | 4 +- tests/cases/test-key-roll.c | 4 +- tests/cases/test-list-ids.c | 6 +- tests/cases/test-map-private.m | 4 +- tests/cases/test-move-data-extents.c | 4 +- tests/cases/test-quotas.c | 5 +- tests/cases/test-resize.m | 4 +- tests/cases/test-scan-range-size.c | 4 +- tests/cases/test-secluded-rename.c | 4 +- tests/cases/test-set-protection-class.c | 6 +- tests/cases/test-sparse-dev.c | 4 +- tests/cases/test-throttled-io.c | 259 +- tests/cases/test-transcode.m | 2 +- tests/disk-image.m | 9 +- tests/gen-test-plist.sh | 11 + 123 files changed, 55626 insertions(+), 259 deletions(-) rename fs/{English.lproj => en.lproj}/InfoPlist.strings (100%) create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/livefiles_hfs.xcscheme create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/livefiles_hfs_tester.xcscheme create mode 100644 livefiles_hfs_plugin/lf_hfs.h create mode 100644 livefiles_hfs_plugin/lf_hfs_attrlist.c create mode 100644 livefiles_hfs_plugin/lf_hfs_attrlist.h create mode 100644 livefiles_hfs_plugin/lf_hfs_btree.c create mode 100644 livefiles_hfs_plugin/lf_hfs_btree.h create mode 100644 livefiles_hfs_plugin/lf_hfs_btree_allocate.c create mode 100644 livefiles_hfs_plugin/lf_hfs_btree_misc_ops.c create mode 100644 livefiles_hfs_plugin/lf_hfs_btree_node_ops.c create mode 100644 livefiles_hfs_plugin/lf_hfs_btree_node_reserve.c create mode 100644 livefiles_hfs_plugin/lf_hfs_btree_tree_ops.c create mode 100644 livefiles_hfs_plugin/lf_hfs_btrees_internal.h create mode 100644 livefiles_hfs_plugin/lf_hfs_btrees_io.c create mode 100644 livefiles_hfs_plugin/lf_hfs_btrees_io.h create mode 100644 livefiles_hfs_plugin/lf_hfs_btrees_private.h create mode 100644 livefiles_hfs_plugin/lf_hfs_catalog.c create mode 100644 livefiles_hfs_plugin/lf_hfs_catalog.h create mode 100644 livefiles_hfs_plugin/lf_hfs_chash.c create mode 100644 livefiles_hfs_plugin/lf_hfs_chash.h create mode 100644 livefiles_hfs_plugin/lf_hfs_cnode.c create mode 100644 livefiles_hfs_plugin/lf_hfs_cnode.h create mode 100644 livefiles_hfs_plugin/lf_hfs_common.h create mode 100644 livefiles_hfs_plugin/lf_hfs_defs.h create mode 100644 livefiles_hfs_plugin/lf_hfs_dirops_handler.c create mode 100644 livefiles_hfs_plugin/lf_hfs_dirops_handler.h create mode 100644 livefiles_hfs_plugin/lf_hfs_endian.c create mode 100644 livefiles_hfs_plugin/lf_hfs_endian.h create mode 100644 livefiles_hfs_plugin/lf_hfs_file_extent_mapping.c create mode 100644 livefiles_hfs_plugin/lf_hfs_file_extent_mapping.h create mode 100644 livefiles_hfs_plugin/lf_hfs_file_mgr_internal.h create mode 100644 livefiles_hfs_plugin/lf_hfs_fileops_handler.c create mode 100644 livefiles_hfs_plugin/lf_hfs_fileops_handler.h create mode 100644 livefiles_hfs_plugin/lf_hfs_format.h create mode 100644 livefiles_hfs_plugin/lf_hfs_fsops_handler.c create mode 100644 livefiles_hfs_plugin/lf_hfs_fsops_handler.h create mode 100644 livefiles_hfs_plugin/lf_hfs_generic_buf.c create mode 100644 livefiles_hfs_plugin/lf_hfs_generic_buf.h create mode 100644 livefiles_hfs_plugin/lf_hfs_journal.c create mode 100644 livefiles_hfs_plugin/lf_hfs_journal.h create mode 100644 livefiles_hfs_plugin/lf_hfs_link.c create mode 100644 livefiles_hfs_plugin/lf_hfs_link.h create mode 100644 livefiles_hfs_plugin/lf_hfs_locks.c create mode 100644 livefiles_hfs_plugin/lf_hfs_locks.h create mode 100644 livefiles_hfs_plugin/lf_hfs_logger.c create mode 100644 livefiles_hfs_plugin/lf_hfs_logger.h create mode 100644 livefiles_hfs_plugin/lf_hfs_lookup.c create mode 100644 livefiles_hfs_plugin/lf_hfs_lookup.h create mode 100644 livefiles_hfs_plugin/lf_hfs_mount.h create mode 100644 livefiles_hfs_plugin/lf_hfs_rangelist.c create mode 100644 livefiles_hfs_plugin/lf_hfs_rangelist.h create mode 100644 livefiles_hfs_plugin/lf_hfs_raw_read_write.c create mode 100644 livefiles_hfs_plugin/lf_hfs_raw_read_write.h create mode 100644 livefiles_hfs_plugin/lf_hfs_readwrite_ops.c create mode 100644 livefiles_hfs_plugin/lf_hfs_readwrite_ops.h create mode 100644 livefiles_hfs_plugin/lf_hfs_sbunicode.c create mode 100644 livefiles_hfs_plugin/lf_hfs_sbunicode.h create mode 100644 livefiles_hfs_plugin/lf_hfs_ucs_string_cmp_data.h create mode 100644 livefiles_hfs_plugin/lf_hfs_unicode_wrappers.c create mode 100644 livefiles_hfs_plugin/lf_hfs_unicode_wrappers.h create mode 100644 livefiles_hfs_plugin/lf_hfs_utfconvdata.h create mode 100644 livefiles_hfs_plugin/lf_hfs_utils.c create mode 100644 livefiles_hfs_plugin/lf_hfs_utils.h create mode 100644 livefiles_hfs_plugin/lf_hfs_vfsops.c create mode 100644 livefiles_hfs_plugin/lf_hfs_vfsops.h create mode 100644 livefiles_hfs_plugin/lf_hfs_vfsutils.c create mode 100644 livefiles_hfs_plugin/lf_hfs_vfsutils.h create mode 100644 livefiles_hfs_plugin/lf_hfs_vnode.c create mode 100644 livefiles_hfs_plugin/lf_hfs_vnode.h create mode 100644 livefiles_hfs_plugin/lf_hfs_vnops.c create mode 100644 livefiles_hfs_plugin/lf_hfs_vnops.h create mode 100644 livefiles_hfs_plugin/lf_hfs_volume_allocation.c create mode 100644 livefiles_hfs_plugin/lf_hfs_volume_allocation.h create mode 100644 livefiles_hfs_plugin/lf_hfs_xattr.c create mode 100644 livefiles_hfs_plugin/lf_hfs_xattr.h create mode 100644 livefiles_hfs_plugin/livefiles_hfs_tester.c create mode 100644 livefiles_hfs_plugin/livefiles_hfs_tester.entitlements create mode 100644 livefiles_hfs_plugin/livefiles_hfs_tester.h create mode 100755 livefiles_hfs_plugin/scripts/CreateRelease.py create mode 100644 tests/cases/test-cas-bsdflags.c create mode 100644 tests/cases/test-get-volume-create-time.c diff --git a/fs/English.lproj/InfoPlist.strings b/fs/en.lproj/InfoPlist.strings similarity index 100% rename from fs/English.lproj/InfoPlist.strings rename to fs/en.lproj/InfoPlist.strings diff --git a/fsck_hfs/dfalib/SControl.c b/fsck_hfs/dfalib/SControl.c index 5ee6c40..43b2eda 100644 --- a/fsck_hfs/dfalib/SControl.c +++ b/fsck_hfs/dfalib/SControl.c @@ -501,7 +501,7 @@ termScav: int rv = CacheRead(&fscache, offset, len, &buf); if (rv == 0) { fprintf(stderr, "Offset %llu length %u:\n", offset, len); - DumpData(buf->Buffer, len); + DumpData(buf->Buffer, len, NULL); CacheRelease(&fscache, buf, 0); } else { fprintf(stderr, "%s(%d): rv = %d\n", __FUNCTION__, __LINE__, rv); @@ -1349,7 +1349,7 @@ static int ScavSetUp( SGlob *GPtr) InitializeVolumeObject( GPtr ); /* Check if the volume type of initialized object is valid. If not, return error */ - if (VolumeObjectIsValid() == false) { + if (VolumeObjectIsValid(GPtr) == false) { return (R_BadSig); } diff --git a/fsck_hfs/dfalib/SUtils.c b/fsck_hfs/dfalib/SUtils.c index 44b9847..9c66059 100644 --- a/fsck_hfs/dfalib/SUtils.c +++ b/fsck_hfs/dfalib/SUtils.c @@ -1597,7 +1597,7 @@ static void CompareVolHeaderBTreeSizes( SGlobPtr GPtr, enum { WIDTH = 16, }; void -DumpData(const void *data, size_t len) +DumpData(const void *data, size_t len, char *label) { unsigned char *base = (unsigned char*)data; unsigned char *end = base + len; @@ -1629,8 +1629,12 @@ DumpData(const void *data, size_t len) } } allzeroes = 1; - - fprintf(stderr, "%04x: ", (int)(cp - base)); + if (label == NULL) { + fprintf(stderr, "%04x: ", (int)(cp - base)); + } + else { + fprintf(stderr, "%s %04x: ", label, (int)(cp - base)); + } for (i = 0, tmp = cp; tmp < tend; tmp++) { fprintf(stderr, "%02x", *tmp); if (++i % 2 == 0) @@ -1666,7 +1670,7 @@ DumpData(const void *data, size_t len) // Result: returns true if volume is known volume type (i.e. HFS, HFS+) // false otherwise. //****************************************************************************** -Boolean VolumeObjectIsValid(void) +Boolean VolumeObjectIsValid(SGlobPtr gptr) { VolumeObjectPtr myVOPtr = GetVolumeObjectPtr(); Boolean retval = false; @@ -1703,10 +1707,10 @@ done: GetVolumeObjectBlockNum(&myBlockNum); err = GetVolumeBlock(myVOPtr->vcbPtr, myBlockNum, kGetBlock, &theBlockDesc); if (err != noErr) { - fprintf(stderr, "%s: Cannot GetVolumetBlock: %d\n", __FUNCTION__, err); + fprintf(stderr, "%s: Cannot GetVolumeBlock: %d\n", __FUNCTION__, err); } else { uint8_t *ptr = (uint8_t*)theBlockDesc.buffer; - DumpData(ptr, theBlockDesc.blockSize); + DumpData(ptr, theBlockDesc.blockSize, gptr->deviceNode); ReleaseVolumeBlock(myVOPtr->vcbPtr, &theBlockDesc, kReleaseBlock); } } diff --git a/fsck_hfs/dfalib/Scavenger.h b/fsck_hfs/dfalib/Scavenger.h index e0ffcce..7b8768f 100644 --- a/fsck_hfs/dfalib/Scavenger.h +++ b/fsck_hfs/dfalib/Scavenger.h @@ -1006,7 +1006,7 @@ extern OSErr GetVolumeObjectPrimaryMDB( BlockDescriptor * theBlockDescPtr ); extern OSErr GetVolumeObjectVHBorMDB( BlockDescriptor * theBlockDescPtr ); extern void PrintName( int theCount, const UInt8 *theNamePtr, Boolean isUnicodeString ); extern void PrintVolumeObject( void ); -extern Boolean VolumeObjectIsValid( void ); +extern Boolean VolumeObjectIsValid( SGlobPtr gptr ); extern Boolean VolumeObjectIsHFSPlus( void ); extern Boolean VolumeObjectIsHFS( void ); extern Boolean VolumeObjectIsEmbeddedHFSPlus( void ); diff --git a/fsck_hfs/fsck_hfs.c b/fsck_hfs/fsck_hfs.c index de092f9..35d386c 100644 --- a/fsck_hfs/fsck_hfs.c +++ b/fsck_hfs/fsck_hfs.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -72,7 +73,7 @@ char quick; /* quick check returns clean, dirty, or failure */ char debug; /* output debugging info */ char disable_journal; /* If debug, and set, do not simulate journal replay */ char scanflag; /* Scan entire disk for bad blocks */ -#if !TARGET_OS_EMBEDDED +#if !TARGET_OS_IPHONE char embedded = 0; #else char embedded = 1; @@ -89,6 +90,7 @@ char errorOnExit = 0; /* Exit on first error */ int upgrading; /* upgrading format */ int lostAndFoundMode = 0; /* octal mode used when creating "lost+found" directory */ uint64_t reqCacheSize; /* Cache size requested by the caller (may be specified by the user via -c) */ +int detonator_run = 0; int fsmodified; /* 1 => write done to file system */ int fsreadfd; /* file descriptor for reading file system */ @@ -309,8 +311,10 @@ main(argc, argv) } ret = 0; - while (argc-- > 0) - ret |= checkfilesys(blockcheck(*argv++)); + while (argc-- > 0) { + char *pcBlkChk = blockcheck(*argv++); + ret |= checkfilesys(pcBlkChk); + } exit(ret); } @@ -335,6 +339,9 @@ mountpoint(const char *cdev) char *unraw = NULL; int result; int i; + + if (detonator_run) + return NULL; unraw = strdup(cdev); unrawname(unraw); @@ -388,7 +395,7 @@ checkfilesys(char * filesys) mntonname = strdup("/"); } - if (lflag) { + if (lflag && !detonator_run) { struct stat fs_stat; /* @@ -455,7 +462,7 @@ checkfilesys(char * filesys) if (debug && preen) pwarn("starting\n"); - if (setup( filesys, &canWrite ) == 0) { + if (setup( filesys, &canWrite ) == 0) { if (preen) pfatal("CAN'T CHECK FILE SYSTEM."); result = EEXIT; @@ -497,12 +504,13 @@ checkfilesys(char * filesys) chkLev = kPartialCheck; repLev = kForceRepairs; // this will force rebuild of B-Tree file } - - fsckSetVerbosity(context, logLev); + + fsckSetVerbosity(context, logLev); /* All of fsck_hfs' output should go thorugh logstring */ fsckSetOutput(context, NULL); /* Setup writer that will output to standard out */ fsckSetWriter(context, &outstring); + /* Setup logger that will write to log file */ fsckSetLogger(context, &logstring); if (guiControl) { @@ -541,7 +549,7 @@ checkfilesys(char * filesys) result = CheckHFS( filesys, fsreadfd, fswritefd, chkLev, repLev, context, lostAndFoundMode, canWrite, &fsmodified, lflag, rebuildOptions ); - if (debug) + if (debug) plog("\tCheckHFS returned %d, fsmodified = %d\n", result, fsmodified); if (!hotmount) { @@ -645,41 +653,67 @@ setup( char *dev, int *canWritePtr ) fswritefd = -1; *canWritePtr = 0; + + if (!detonator_run) + { + if (stat(dev, &statb) < 0) { + plog("Can't stat %s: %s\n", dev, strerror(errno)); + return (0); + } + if ((statb.st_mode & S_IFMT) != S_IFCHR) { + pfatal("%s is not a character device", dev); + if (reply("CONTINUE") == 0) + return (0); + } + /* Always attempt to replay the journal */ + if (!nflag && !quick) { + // We know we have a character device by now. + if (strncmp(dev, "/dev/rdisk", 10) == 0) { + char block_device[MAXPATHLEN+1]; + int rv; + snprintf(block_device, sizeof(block_device), "/dev/%s", dev + 6); + rv = journal_replay(block_device); + if (debug) + plog("journal_replay(%s) returned %d\n", block_device, rv); + } + } + /* attempt to get write access to the block device and if not check if volume is */ + /* mounted read-only. */ + if (nflag == 0 && quick == 0) { + getWriteAccess( dev, canWritePtr ); + } - if (stat(dev, &statb) < 0) { - plog("Can't stat %s: %s\n", dev, strerror(errno)); - return (0); - } - if ((statb.st_mode & S_IFMT) != S_IFCHR) { - pfatal("%s is not a character device", dev); - if (reply("CONTINUE") == 0) - return (0); - } - /* Always attempt to replay the journal */ - if (!nflag && !quick) { - // We know we have a character device by now. - if (strncmp(dev, "/dev/rdisk", 10) == 0) { - char block_device[MAXPATHLEN+1]; - int rv; - snprintf(block_device, sizeof(block_device), "/dev/%s", dev + 6); - rv = journal_replay(block_device); - if (debug) - plog("journal_replay(%s) returned %d\n", block_device, rv); - } - } - /* attempt to get write access to the block device and if not check if volume is */ - /* mounted read-only. */ - if (nflag == 0 && quick == 0) { - getWriteAccess( dev, canWritePtr ); - } - - if (nflag || quick || (fswritefd = open(dev, O_RDWR | (hotmount ? 0 : O_EXLOCK))) < 0) { - fswritefd = -1; - if (preen) { - pfatal("** %s (NO WRITE ACCESS)\n", dev); - } - } - + if (nflag || quick || (fswritefd = open(dev, O_RDWR | (hotmount ? 0 : O_EXLOCK))) < 0) { + fswritefd = -1; + if (preen) { + pfatal("** %s (NO WRITE ACCESS)\n", dev); + } + } + } else { // detonator run + plog("fsck_hfs: detonator_run (%s).\n", dev); + char *end_ptr; + fswritefd = (int)strtol(dev+8, &end_ptr, 10); + if (*end_ptr) + { + err(1, "fsck_hfs: Invalid file descriptor path: %s", dev); + } + + struct stat info; + int error = fstat(fswritefd, &info); + if (error) + { + err(1, "fsck_hfs: fstat %s", dev); + } + + error = lseek(fswritefd, 0, SEEK_SET); + if (error == -1) + { + err(1, "fsck_hfs: Could not seek %d for dev: %s, errorno %d", fswritefd, dev, errno); + } + + *canWritePtr = TRUE; + } + if (preen == 0 && !guiControl) { if (nflag || quick || fswritefd == -1) { plog("** %s (NO WRITE)\n", dev); @@ -702,7 +736,6 @@ setup( char *dev, int *canWritePtr ) } } - /* Get device block size to initialize cache */ if (ioctl(fsreadfd, DKIOCGETBLOCKSIZE, &devBlockSize) < 0) { pfatal ("Can't get device block size\n"); @@ -736,17 +769,19 @@ setup( char *dev, int *canWritePtr ) if (rv == -1) { (void)fplog(stderr, "sysctlbyname failed, not auto-setting cache size\n"); } else { - int d = (hotroot && !lflag) ? 2 : 8; - int safeMode = 0; - dsize = sizeof(safeMode); - rv = sysctlbyname("kern.safeboot", &safeMode, &dsize, NULL, 0); - if (rv != -1 && safeMode != 0 && hotroot && !lflag) { -#define kMaxSafeModeMem ((size_t)2 * 1024 * 1024 * 1024) /* 2Gbytes, means cache will max out at 1gbyte */ - if (debug) { - (void)fplog(stderr, "Safe mode and single-user, setting memsize to a maximum of 2gbytes\n"); - } - memSize = (memSize < kMaxSafeModeMem) ? memSize : kMaxSafeModeMem; - } + int d = (hotroot && !lflag) ? 2 : 8; + if (!detonator_run) { + int safeMode = 0; + dsize = sizeof(safeMode); + rv = sysctlbyname("kern.safeboot", &safeMode, &dsize, NULL, 0); + if (rv != -1 && safeMode != 0 && hotroot && !lflag) { + #define kMaxSafeModeMem ((size_t)2 * 1024 * 1024 * 1024) /* 2Gbytes, means cache will max out at 1gbyte */ + if (debug) { + (void)fplog(stderr, "Safe mode and single-user, setting memsize to a maximum of 2gbytes\n"); + } + memSize = (memSize < kMaxSafeModeMem) ? memSize : kMaxSafeModeMem; + } + } reqCacheSize = memSize / d; } } @@ -758,6 +793,7 @@ setup( char *dev, int *canWritePtr ) if (CacheInit (&fscache, fsreadfd, fswritefd, devBlockSize, cacheBlockSize, cacheTotalBlocks, CacheHashSize, preTouchMem) != EOK) { pfatal("Can't initialize disk cache\n"); + return (0); } diff --git a/fsck_hfs/fsck_hfs.h b/fsck_hfs/fsck_hfs.h index c3ccea1..04eaa5e 100644 --- a/fsck_hfs/fsck_hfs.h +++ b/fsck_hfs/fsck_hfs.h @@ -43,6 +43,8 @@ extern int fsreadfd; /* file descriptor for reading file system */ extern int fswritefd; /* file descriptor for writing file system */ extern Cache_t fscache; +extern int detonator_run; + #define DIRTYEXIT 3 /* Filesystem Dirty, no checks */ #define FIXEDROOTEXIT 4 /* Writeable Root Filesystem was fixed */ @@ -71,5 +73,5 @@ int reply __P((char *question)); void start_progress(void); void draw_progress(int); void end_progress(void); -void DumpData(const void *, size_t); +void DumpData(const void *ptr, size_t sz, char *label); diff --git a/fsck_hfs/fsck_hfs_strings.c b/fsck_hfs/fsck_hfs_strings.c index a2f7bdb..a1a8bda 100644 --- a/fsck_hfs/fsck_hfs_strings.c +++ b/fsck_hfs/fsck_hfs_strings.c @@ -197,7 +197,7 @@ hfs_errors[] = { /* 610 - 619 */ { E_BadHardLinkDate, "Bad hard link creation date", fsckMsgError, fsckLevel1, 0, }, - { E_DirtyJournal, "Journal need to be replayed but volume is read-only", fsckMsgError, fsckLevel1, 0, }, + { E_DirtyJournal, "Journal needs to be replayed but volume is read-only", fsckMsgError, fsckLevel1, 0, }, { E_LinkChainNonLink, "File record has hard link chain flag (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, { E_LinkHasData, "Hard link record has data extents (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, { E_FileLinkCountError, "File has incorrect number of links (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, diff --git a/fsck_hfs/utilities.c b/fsck_hfs/utilities.c index 3a3fbee..e0c31aa 100644 --- a/fsck_hfs/utilities.c +++ b/fsck_hfs/utilities.c @@ -162,7 +162,7 @@ char * blockcheck(char *origname) { struct stat stslash, stblock, stchar; - char *newname, *raw; + char *newname, *raw = NULL; int retried = 0; hotroot = 0; @@ -173,6 +173,13 @@ blockcheck(char *origname) } newname = origname; retry: + if (!strncmp(newname, "/dev/fd/", 8)) { + detonator_run = 1; + return (origname); + } else { + detonator_run = 0; + } + if (stat(newname, &stblock) < 0) { perror(newname); plog("Can't stat %s\n", newname); @@ -550,29 +557,29 @@ shutdown_logging(void) } for(i=0; i < 60; i++) { - log_file = safely_open_log_file(fname); - if (log_file) { - fwrite(in_mem_log, cur_in_mem_log - in_mem_log, 1, log_file); - - fflush(log_file); - fclose(log_file); - log_file = NULL; - - free(in_mem_log); - in_mem_log = cur_in_mem_log = NULL; - in_mem_log_size = 0; - - break; - } else { - // hmmm, failed to open the output file so wait - // a while only if the fs is read-only and then - // try again - if (errno == EROFS) { - sleep(1); - } else { - break; - } - } + log_file = safely_open_log_file(fname); + if (log_file) { + fwrite(in_mem_log, cur_in_mem_log - in_mem_log, 1, log_file); + + fflush(log_file); + fclose(log_file); + log_file = NULL; + + free(in_mem_log); + in_mem_log = cur_in_mem_log = NULL; + in_mem_log_size = 0; + + break; + } else { + // hmmm, failed to open the output file so wait + // a while only if the fs is read-only and then + // try again + if (errno == EROFS) { + sleep(1); + } else { + break; + } + } } } } @@ -602,6 +609,11 @@ setup_logging(void) setlinebuf(stdout); setlinebuf(stderr); } + + if (detonator_run) { + // Do not create a log file + return; + } // our copy of this variable since we may // need to change it to make the right thing diff --git a/hfs.xcconfig b/hfs.xcconfig index 630c63c..811953f 100644 --- a/hfs.xcconfig +++ b/hfs.xcconfig @@ -4,7 +4,7 @@ FS_BUNDLE_PATH = /System/Library/Filesystems/hfs.fs FS_BUNDLE_BIN_DIR = Contents/Resources FS_BUNDLE_BIN_PATH = $FS_BUNDLE_PATH/$FS_BUNDLE_BIN_DIR FS_BUNDLE_RESOURCES_PATH = $FS_BUNDLE_PATH/Contents/Resources -FS_BUNDLE_ENGLISH_PATH = $FS_BUNDLE_RESOURCES_PATH/English.lproj +FS_BUNDLE_ENGLISH_PATH = $FS_BUNDLE_RESOURCES_PATH/en.lproj VERSIONING_SYSTEM = apple-generic CURRENT_PROJECT_VERSION = $(RC_ProjectSourceVersion) diff --git a/hfs.xcodeproj/project.pbxproj b/hfs.xcodeproj/project.pbxproj index 81bc813..a6b84e0 100644 --- a/hfs.xcodeproj/project.pbxproj +++ b/hfs.xcodeproj/project.pbxproj @@ -66,6 +66,27 @@ name = All_iOS; productName = All_iOS; }; + 9430FE92211658C1009CC8AF /* hfs_livefiles */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 9430FE96211658C2009CC8AF /* Build configuration list for PBXAggregateTarget "hfs_livefiles" */; + buildPhases = ( + ); + dependencies = ( + 9430FE98211658E7009CC8AF /* PBXTargetDependency */, + ); + name = hfs_livefiles; + productName = hfs_livefiles; + }; + DB1AAB7C20472D140036167F /* Swift_iOS */ = { + isa = PBXAggregateTarget; + buildConfigurationList = DB1AAB8020472D140036167F /* Build configuration list for PBXAggregateTarget "Swift_iOS" */; + buildPhases = ( + ); + dependencies = ( + ); + name = Swift_iOS; + productName = Swift_iOS; + }; FB55AE651B7D47B300701D03 /* ios-tests */ = { isa = PBXAggregateTarget; buildConfigurationList = FB55AE701B7D47B300701D03 /* Build configuration list for PBXAggregateTarget "ios-tests" */; @@ -147,6 +168,7 @@ 0703A0541CD826160035BCFD /* test-defrag.c in Sources */ = {isa = PBXBuildFile; fileRef = 0703A0531CD826160035BCFD /* test-defrag.c */; }; 07C2BF891CB43F5E00D8327D /* test-renamex.c in Sources */ = {isa = PBXBuildFile; fileRef = 07C2BF881CB43F5E00D8327D /* test-renamex.c */; }; 09D6B7D71E317ED2003C20DC /* test_disklevel.c in Sources */ = {isa = PBXBuildFile; fileRef = 09D6B7D61E317ED2003C20DC /* test_disklevel.c */; }; + 18B450692104D958002052BF /* lf_hfs_journal.c in Sources */ = {isa = PBXBuildFile; fileRef = 18B450682104D958002052BF /* lf_hfs_journal.c */; }; 2A386A3B1C22209C007FEDAC /* test-list-ids.c in Sources */ = {isa = PBXBuildFile; fileRef = 2A386A3A1C221E67007FEDAC /* test-list-ids.c */; }; 2A84DBD41D9E15F2007964B8 /* test-raw-dev-unaligned.c in Sources */ = {isa = PBXBuildFile; fileRef = 2A84DBD31D9E1179007964B8 /* test-raw-dev-unaligned.c */; }; 2A9399951BDFEB5200FB075B /* test-access.c in Sources */ = {isa = PBXBuildFile; fileRef = 2A9399941BDFEA6E00FB075B /* test-access.c */; }; @@ -248,9 +270,85 @@ 86CBF3831831876200A64A93 /* misc.c in Sources */ = {isa = PBXBuildFile; fileRef = FDD9FA4E14A1343D0043D4A9 /* misc.c */; }; 86CBF3861831880F00A64A93 /* iterate_hfs_metadata.c in Sources */ = {isa = PBXBuildFile; fileRef = 86CBF3851831880F00A64A93 /* iterate_hfs_metadata.c */; }; 86CBF3871831884600A64A93 /* Data.h in Headers */ = {isa = PBXBuildFile; fileRef = FDD9FA4714A1343D0043D4A9 /* Data.h */; }; + 900BDEE81FF91B8C002F7EC0 /* livefiles_hfs_tester.c in Sources */ = {isa = PBXBuildFile; fileRef = 900BDECF1FF9198E002F7EC0 /* livefiles_hfs_tester.c */; }; + 900BDEEB1FF91C2A002F7EC0 /* lf_hfs_fsops_handler.h in Headers */ = {isa = PBXBuildFile; fileRef = 900BDEE91FF91C2A002F7EC0 /* lf_hfs_fsops_handler.h */; }; + 900BDEEC1FF91C2A002F7EC0 /* lf_hfs_fsops_handler.c in Sources */ = {isa = PBXBuildFile; fileRef = 900BDEEA1FF91C2A002F7EC0 /* lf_hfs_fsops_handler.c */; }; + 900BDEEE1FF91C46002F7EC0 /* lf_hfs_common.h in Headers */ = {isa = PBXBuildFile; fileRef = 900BDEED1FF91C46002F7EC0 /* lf_hfs_common.h */; }; + 900BDEF51FF9202E002F7EC0 /* lf_hfs_dirops_handler.h in Headers */ = {isa = PBXBuildFile; fileRef = 900BDEF31FF9202E002F7EC0 /* lf_hfs_dirops_handler.h */; }; + 900BDEF61FF9202E002F7EC0 /* lf_hfs_dirops_handler.c in Sources */ = {isa = PBXBuildFile; fileRef = 900BDEF41FF9202E002F7EC0 /* lf_hfs_dirops_handler.c */; }; + 900BDEF91FF92170002F7EC0 /* lf_hfs_fileops_handler.h in Headers */ = {isa = PBXBuildFile; fileRef = 900BDEF71FF92170002F7EC0 /* lf_hfs_fileops_handler.h */; }; + 900BDEFA1FF92170002F7EC0 /* lf_hfs_fileops_handler.c in Sources */ = {isa = PBXBuildFile; fileRef = 900BDEF81FF92170002F7EC0 /* lf_hfs_fileops_handler.c */; }; + 900BDEFD1FF9246F002F7EC0 /* lf_hfs_logger.h in Headers */ = {isa = PBXBuildFile; fileRef = 900BDEFB1FF9246F002F7EC0 /* lf_hfs_logger.h */; }; + 900BDEFE1FF9246F002F7EC0 /* lf_hfs_logger.c in Sources */ = {isa = PBXBuildFile; fileRef = 900BDEFC1FF9246F002F7EC0 /* lf_hfs_logger.c */; }; + 9022D170205EC16900D9A2AE /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 9022D16F205EC16900D9A2AE /* CoreFoundation.framework */; }; + 9022D171205EC18500D9A2AE /* livefiles_hfs.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 900BDED41FF919C2002F7EC0 /* livefiles_hfs.dylib */; }; + 9022D174205FE5FA00D9A2AE /* lf_hfs_utils.h in Headers */ = {isa = PBXBuildFile; fileRef = 9022D172205FE5FA00D9A2AE /* lf_hfs_utils.h */; }; + 9022D175205FE5FA00D9A2AE /* lf_hfs_utils.c in Sources */ = {isa = PBXBuildFile; fileRef = 9022D173205FE5FA00D9A2AE /* lf_hfs_utils.c */; }; + 9022D18120600D9E00D9A2AE /* lf_hfs_rangelist.h in Headers */ = {isa = PBXBuildFile; fileRef = 9022D17F20600D9E00D9A2AE /* lf_hfs_rangelist.h */; }; + 9022D18220600D9E00D9A2AE /* lf_hfs_rangelist.c in Sources */ = {isa = PBXBuildFile; fileRef = 9022D18020600D9E00D9A2AE /* lf_hfs_rangelist.c */; }; + 9022D1842060FBBE00D9A2AE /* lf_hfs_vfsops.h in Headers */ = {isa = PBXBuildFile; fileRef = 9022D1832060FBBE00D9A2AE /* lf_hfs_vfsops.h */; }; + 9022D1862060FBD200D9A2AE /* lf_hfs_vfsutils.h in Headers */ = {isa = PBXBuildFile; fileRef = 9022D1852060FBD200D9A2AE /* lf_hfs_vfsutils.h */; }; + 906EBF722063DB6C00B21E94 /* lf_hfs_generic_buf.h in Headers */ = {isa = PBXBuildFile; fileRef = 906EBF702063DB6C00B21E94 /* lf_hfs_generic_buf.h */; }; + 906EBF732063DB6C00B21E94 /* lf_hfs_generic_buf.c in Sources */ = {isa = PBXBuildFile; fileRef = 906EBF712063DB6C00B21E94 /* lf_hfs_generic_buf.c */; }; + 906EBF762063E44900B21E94 /* lf_hfs_readwrite_ops.h in Headers */ = {isa = PBXBuildFile; fileRef = 906EBF742063E44900B21E94 /* lf_hfs_readwrite_ops.h */; }; + 906EBF772063E44900B21E94 /* lf_hfs_readwrite_ops.c in Sources */ = {isa = PBXBuildFile; fileRef = 906EBF752063E44900B21E94 /* lf_hfs_readwrite_ops.c */; }; + 906EBF792063E76D00B21E94 /* lf_hfs_endian.c in Sources */ = {isa = PBXBuildFile; fileRef = 906EBF782063E76D00B21E94 /* lf_hfs_endian.c */; }; + 906EBF7B2063F7CE00B21E94 /* lf_hfs_btree_node_reserve.c in Sources */ = {isa = PBXBuildFile; fileRef = 906EBF7A2063F7CE00B21E94 /* lf_hfs_btree_node_reserve.c */; }; + 906EBF7D2063FB4A00B21E94 /* lf_hfs_btrees_io.h in Headers */ = {isa = PBXBuildFile; fileRef = 90F5EBB22063AA77004397B2 /* lf_hfs_btrees_io.h */; }; + 906EBF7F2063FC0900B21E94 /* lf_hfs_file_mgr_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 906EBF7E2063FC0900B21E94 /* lf_hfs_file_mgr_internal.h */; }; + 906EBF812063FF2700B21E94 /* lf_hfs_file_extent_mapping.h in Headers */ = {isa = PBXBuildFile; fileRef = 906EBF802063FE3900B21E94 /* lf_hfs_file_extent_mapping.h */; }; + 906EBF8720640CDF00B21E94 /* lf_hfs_unicode_wrappers.h in Headers */ = {isa = PBXBuildFile; fileRef = 906EBF8520640CDF00B21E94 /* lf_hfs_unicode_wrappers.h */; }; + 906EBF8820640CDF00B21E94 /* lf_hfs_unicode_wrappers.c in Sources */ = {isa = PBXBuildFile; fileRef = 906EBF8620640CDF00B21E94 /* lf_hfs_unicode_wrappers.c */; }; + 906EBF8C2067884300B21E94 /* lf_hfs_lookup.h in Headers */ = {isa = PBXBuildFile; fileRef = 906EBF8A2067884300B21E94 /* lf_hfs_lookup.h */; }; + 906EBF8D2067884300B21E94 /* lf_hfs_lookup.c in Sources */ = {isa = PBXBuildFile; fileRef = 906EBF8B2067884300B21E94 /* lf_hfs_lookup.c */; }; + 90F5EBA62061476A004397B2 /* lf_hfs_btree.h in Headers */ = {isa = PBXBuildFile; fileRef = 90F5EBA42061476A004397B2 /* lf_hfs_btree.h */; }; + 90F5EBA72061476A004397B2 /* lf_hfs_btree.c in Sources */ = {isa = PBXBuildFile; fileRef = 90F5EBA52061476A004397B2 /* lf_hfs_btree.c */; }; + 90F5EBAC2063A089004397B2 /* lf_hfs_btrees_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 90F5EBAA2063A089004397B2 /* lf_hfs_btrees_private.h */; }; + 90F5EBAF2063A109004397B2 /* lf_hfs_btrees_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 90F5EBAE2063A109004397B2 /* lf_hfs_btrees_internal.h */; }; + 90F5EBB12063A929004397B2 /* lf_hfs_defs.h in Headers */ = {isa = PBXBuildFile; fileRef = 90F5EBB02063A929004397B2 /* lf_hfs_defs.h */; }; + 90F5EBB52063AA77004397B2 /* lf_hfs_btrees_io.c in Sources */ = {isa = PBXBuildFile; fileRef = 90F5EBB32063AA77004397B2 /* lf_hfs_btrees_io.c */; }; + 90F5EBB72063B212004397B2 /* lf_hfs_file_extent_mapping.c in Sources */ = {isa = PBXBuildFile; fileRef = 90F5EBB62063B212004397B2 /* lf_hfs_file_extent_mapping.c */; }; + 90F5EBB92063CC22004397B2 /* lf_hfs_btree_tree_ops.c in Sources */ = {isa = PBXBuildFile; fileRef = 90F5EBB82063CC22004397B2 /* lf_hfs_btree_tree_ops.c */; }; + 90F5EBBB2063CC3A004397B2 /* lf_hfs_btree_node_ops.c in Sources */ = {isa = PBXBuildFile; fileRef = 90F5EBBA2063CC3A004397B2 /* lf_hfs_btree_node_ops.c */; }; + 90F5EBBF2063CCE0004397B2 /* lf_hfs_btree_misc_ops.c in Sources */ = {isa = PBXBuildFile; fileRef = 90F5EBBD2063CCE0004397B2 /* lf_hfs_btree_misc_ops.c */; }; + 90F5EBC12063CE12004397B2 /* lf_hfs_btree_allocate.c in Sources */ = {isa = PBXBuildFile; fileRef = 90F5EBC02063CE12004397B2 /* lf_hfs_btree_allocate.c */; }; + A6E6D74020909C72002125B0 /* test-get-volume-create-time.c in Sources */ = {isa = PBXBuildFile; fileRef = A6E6D73F20909C72002125B0 /* test-get-volume-create-time.c */; }; C1B6FA0810CC0A0A00778D48 /* hfsutil_jnl.c in Sources */ = {isa = PBXBuildFile; fileRef = C1B6FA0610CC0A0A00778D48 /* hfsutil_jnl.c */; }; C1B6FA0910CC0A0A00778D48 /* hfsutil_main.c in Sources */ = {isa = PBXBuildFile; fileRef = C1B6FA0710CC0A0A00778D48 /* hfsutil_main.c */; }; C1B6FA3010CC0B9500778D48 /* hfs.util.8 in Copy man8 */ = {isa = PBXBuildFile; fileRef = C1B6FA2F10CC0B8A00778D48 /* hfs.util.8 */; }; + D759E27020AD75FC00792EDA /* lf_hfs_link.h in Headers */ = {isa = PBXBuildFile; fileRef = D759E26E20AD75FC00792EDA /* lf_hfs_link.h */; }; + D759E27120AD75FC00792EDA /* lf_hfs_link.c in Sources */ = {isa = PBXBuildFile; fileRef = D759E26F20AD75FC00792EDA /* lf_hfs_link.c */; }; + D769A1CC206107190022791F /* lf_hfs_vnode.c in Sources */ = {isa = PBXBuildFile; fileRef = D769A1CB206107190022791F /* lf_hfs_vnode.c */; }; + D769A1CE206107DF0022791F /* lf_hfs_cnode.c in Sources */ = {isa = PBXBuildFile; fileRef = D769A1CD206107DF0022791F /* lf_hfs_cnode.c */; }; + D769A1D0206118490022791F /* lf_hfs_chash.h in Headers */ = {isa = PBXBuildFile; fileRef = D769A1CF206118490022791F /* lf_hfs_chash.h */; }; + D769A1D3206136420022791F /* lf_hfs_vnops.h in Headers */ = {isa = PBXBuildFile; fileRef = D769A1D1206136420022791F /* lf_hfs_vnops.h */; }; + D769A1D4206136420022791F /* lf_hfs_vnops.c in Sources */ = {isa = PBXBuildFile; fileRef = D769A1D2206136420022791F /* lf_hfs_vnops.c */; }; + D769A1E62063AD680022791F /* lf_hfs_volume_allocation.h in Headers */ = {isa = PBXBuildFile; fileRef = D769A1E42063AD680022791F /* lf_hfs_volume_allocation.h */; }; + D769A1E72063AD680022791F /* lf_hfs_volume_allocation.c in Sources */ = {isa = PBXBuildFile; fileRef = D769A1E52063AD680022791F /* lf_hfs_volume_allocation.c */; }; + D769A1E92063CEA50022791F /* lf_hfs_journal.h in Headers */ = {isa = PBXBuildFile; fileRef = D769A1E82063CEA50022791F /* lf_hfs_journal.h */; }; + D769A1EC2067E6BB0022791F /* lf_hfs_attrlist.h in Headers */ = {isa = PBXBuildFile; fileRef = D769A1EA2067E6BB0022791F /* lf_hfs_attrlist.h */; }; + D769A1ED2067E6BB0022791F /* lf_hfs_attrlist.c in Sources */ = {isa = PBXBuildFile; fileRef = D769A1EB2067E6BB0022791F /* lf_hfs_attrlist.c */; }; + D7850549206B831000B9C5E4 /* lf_hfs_xattr.h in Headers */ = {isa = PBXBuildFile; fileRef = D7850547206B831000B9C5E4 /* lf_hfs_xattr.h */; }; + D785054A206B831000B9C5E4 /* lf_hfs_xattr.c in Sources */ = {isa = PBXBuildFile; fileRef = D7850548206B831000B9C5E4 /* lf_hfs_xattr.c */; }; + D79783FD205EC09000E93B37 /* lf_hfs_vnode.h in Headers */ = {isa = PBXBuildFile; fileRef = D79783FC205EC09000E93B37 /* lf_hfs_vnode.h */; }; + D79783FF205EC0E000E93B37 /* lf_hfs.h in Headers */ = {isa = PBXBuildFile; fileRef = D79783FE205EC0E000E93B37 /* lf_hfs.h */; }; + D7978402205EC12700E93B37 /* lf_hfs_locks.h in Headers */ = {isa = PBXBuildFile; fileRef = D7978400205EC12700E93B37 /* lf_hfs_locks.h */; }; + D7978404205EC12700E93B37 /* lf_hfs_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = D7978401205EC12700E93B37 /* lf_hfs_locks.c */; }; + D7978406205EC25B00E93B37 /* lf_hfs_mount.h in Headers */ = {isa = PBXBuildFile; fileRef = D7978405205EC25B00E93B37 /* lf_hfs_mount.h */; }; + D7978408205EC38900E93B37 /* lf_hfs_format.h in Headers */ = {isa = PBXBuildFile; fileRef = D7978407205EC38900E93B37 /* lf_hfs_format.h */; }; + D797840A205EC43000E93B37 /* lf_hfs_catalog.h in Headers */ = {isa = PBXBuildFile; fileRef = D7978409205EC42C00E93B37 /* lf_hfs_catalog.h */; }; + D7978410205EC76100E93B37 /* lf_hfs_cnode.h in Headers */ = {isa = PBXBuildFile; fileRef = D797840F205EC76100E93B37 /* lf_hfs_cnode.h */; }; + D7978417205EC9C300E93B37 /* lf_hfs_vfsops.c in Sources */ = {isa = PBXBuildFile; fileRef = D7978414205EC9C300E93B37 /* lf_hfs_vfsops.c */; }; + D7978420205ED7E600E93B37 /* lf_hfs_vfsutils.c in Sources */ = {isa = PBXBuildFile; fileRef = D797841E205ED7E600E93B37 /* lf_hfs_vfsutils.c */; }; + D7978423205FB57600E93B37 /* lf_hfs_chash.c in Sources */ = {isa = PBXBuildFile; fileRef = D7978421205FB57600E93B37 /* lf_hfs_chash.c */; }; + D7978426205FC09A00E93B37 /* lf_hfs_endian.h in Headers */ = {isa = PBXBuildFile; fileRef = D7978424205FC09A00E93B37 /* lf_hfs_endian.h */; }; + D79784412060037400E93B37 /* lf_hfs_raw_read_write.h in Headers */ = {isa = PBXBuildFile; fileRef = D797843F2060037400E93B37 /* lf_hfs_raw_read_write.h */; }; + D79784422060037400E93B37 /* lf_hfs_raw_read_write.c in Sources */ = {isa = PBXBuildFile; fileRef = D79784402060037400E93B37 /* lf_hfs_raw_read_write.c */; }; + D7BD8F9C20AC388E00E93640 /* lf_hfs_catalog.c in Sources */ = {isa = PBXBuildFile; fileRef = 906EBF82206409B800B21E94 /* lf_hfs_catalog.c */; }; + EE73740520644328004C2F0E /* lf_hfs_sbunicode.h in Headers */ = {isa = PBXBuildFile; fileRef = EE73740320644328004C2F0E /* lf_hfs_sbunicode.h */; }; + EE73740620644328004C2F0E /* lf_hfs_sbunicode.c in Sources */ = {isa = PBXBuildFile; fileRef = EE73740420644328004C2F0E /* lf_hfs_sbunicode.c */; }; + EE737408206443A1004C2F0E /* lf_hfs_utfconvdata.h in Headers */ = {isa = PBXBuildFile; fileRef = EE737407206443A1004C2F0E /* lf_hfs_utfconvdata.h */; }; + F90E174921ADFFD100345EE3 /* test-cas-bsdflags.c in Sources */ = {isa = PBXBuildFile; fileRef = F90E174821ADFFD100345EE3 /* test-cas-bsdflags.c */; }; FB20E0E51AE950C200CEBE7B /* hfs_iokit.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FB20E0E41AE950C200CEBE7B /* hfs_iokit.cpp */; }; FB20E12D1AE9529400CEBE7B /* BTree.c in Sources */ = {isa = PBXBuildFile; fileRef = FB20E0E81AE9529400CEBE7B /* BTree.c */; }; FB20E12E1AE9529400CEBE7B /* BTreeAllocate.c in Sources */ = {isa = PBXBuildFile; fileRef = FB20E0E91AE9529400CEBE7B /* BTreeAllocate.c */; }; @@ -457,6 +555,20 @@ remoteGlobalIDString = 4DFD94BC15373C2C0039B6BA; remoteInfo = fsck_makestrings; }; + 900BDEE51FF919E7002F7EC0 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 900BDED31FF919C2002F7EC0; + remoteInfo = livefiles_hfs; + }; + 9430FE97211658E7009CC8AF /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 900BDED31FF919C2002F7EC0; + remoteInfo = livefiles_hfs; + }; FB48E4BB1BB30CC400523121 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -673,6 +785,15 @@ name = "Copy man8"; runOnlyForDeploymentPostprocessing = 1; }; + 900BDEDB1FF919DE002F7EC0 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; FB76B3D01B7A4BE600FA9F2B /* CopyFiles */ = { isa = PBXCopyFilesBuildPhase; buildActionMask = 2147483647; @@ -738,6 +859,7 @@ 0703A0531CD826160035BCFD /* test-defrag.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "test-defrag.c"; sourceTree = ""; }; 07C2BF881CB43F5E00D8327D /* test-renamex.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "test-renamex.c"; sourceTree = ""; }; 09D6B7D61E317ED2003C20DC /* test_disklevel.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = test_disklevel.c; sourceTree = ""; }; + 18B450682104D958002052BF /* lf_hfs_journal.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = lf_hfs_journal.c; sourceTree = ""; }; 2A386A3A1C221E67007FEDAC /* test-list-ids.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "test-list-ids.c"; sourceTree = ""; }; 2A84DBD31D9E1179007964B8 /* test-raw-dev-unaligned.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "test-raw-dev-unaligned.c"; sourceTree = ""; }; 2A9399941BDFEA6E00FB075B /* test-access.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "test-access.c"; sourceTree = ""; }; @@ -852,7 +974,7 @@ 4DFD94AF153649070039B6BA /* newfs_hfs_debug */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = newfs_hfs_debug; sourceTree = BUILT_PRODUCTS_DIR; }; 4DFD94E615373C2C0039B6BA /* fsck_makestrings */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = fsck_makestrings; sourceTree = BUILT_PRODUCTS_DIR; }; 4DFD95121537402A0039B6BA /* hfs.fs */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = hfs.fs; sourceTree = BUILT_PRODUCTS_DIR; }; - 4DFD9537153746210039B6BA /* English */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = English; path = English.lproj/InfoPlist.strings; sourceTree = ""; }; + 4DFD9537153746210039B6BA /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; 4DFD9539153746B30039B6BA /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 4DFD953D15377C7D0039B6BA /* hfs.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = hfs.xcconfig; sourceTree = ""; }; 7204A9401BE94359007A9898 /* img-to-c.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "img-to-c.c"; sourceTree = ""; }; @@ -863,14 +985,96 @@ 863D03961820761900A4F0C4 /* util.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = util.c; sourceTree = ""; }; 86CBF37F183186C300A64A93 /* libhfs_metadata.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libhfs_metadata.a; sourceTree = BUILT_PRODUCTS_DIR; }; 86CBF3851831880F00A64A93 /* iterate_hfs_metadata.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = iterate_hfs_metadata.c; path = libhfs_metadata/iterate_hfs_metadata.c; sourceTree = SOURCE_ROOT; }; + 900BDECE1FF9198E002F7EC0 /* livefiles_hfs_tester.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = livefiles_hfs_tester.h; sourceTree = ""; }; + 900BDECF1FF9198E002F7EC0 /* livefiles_hfs_tester.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = livefiles_hfs_tester.c; sourceTree = ""; }; + 900BDED41FF919C2002F7EC0 /* livefiles_hfs.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = livefiles_hfs.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + 900BDEDD1FF919DE002F7EC0 /* livefiles_hfs_tester */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = livefiles_hfs_tester; sourceTree = BUILT_PRODUCTS_DIR; }; + 900BDEE71FF91ADF002F7EC0 /* livefiles_hfs_tester.entitlements */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.entitlements; path = livefiles_hfs_tester.entitlements; sourceTree = ""; }; + 900BDEE91FF91C2A002F7EC0 /* lf_hfs_fsops_handler.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_fsops_handler.h; sourceTree = ""; }; + 900BDEEA1FF91C2A002F7EC0 /* lf_hfs_fsops_handler.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_fsops_handler.c; sourceTree = ""; }; + 900BDEED1FF91C46002F7EC0 /* lf_hfs_common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_common.h; sourceTree = ""; }; + 900BDEF31FF9202E002F7EC0 /* lf_hfs_dirops_handler.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_dirops_handler.h; sourceTree = ""; }; + 900BDEF41FF9202E002F7EC0 /* lf_hfs_dirops_handler.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_dirops_handler.c; sourceTree = ""; }; + 900BDEF71FF92170002F7EC0 /* lf_hfs_fileops_handler.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_fileops_handler.h; sourceTree = ""; }; + 900BDEF81FF92170002F7EC0 /* lf_hfs_fileops_handler.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_fileops_handler.c; sourceTree = ""; usesTabs = 0; }; + 900BDEFB1FF9246F002F7EC0 /* lf_hfs_logger.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_logger.h; sourceTree = ""; }; + 900BDEFC1FF9246F002F7EC0 /* lf_hfs_logger.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_logger.c; sourceTree = ""; }; + 9022D16F205EC16900D9A2AE /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.Internal.sdk/System/Library/Frameworks/CoreFoundation.framework; sourceTree = DEVELOPER_DIR; }; + 9022D172205FE5FA00D9A2AE /* lf_hfs_utils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_utils.h; sourceTree = ""; }; + 9022D173205FE5FA00D9A2AE /* lf_hfs_utils.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_utils.c; sourceTree = ""; }; + 9022D177205FEBE200D9A2AE /* lf_MacOSStubs.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_MacOSStubs.c; sourceTree = ""; }; + 9022D17F20600D9E00D9A2AE /* lf_hfs_rangelist.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_rangelist.h; sourceTree = ""; }; + 9022D18020600D9E00D9A2AE /* lf_hfs_rangelist.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_rangelist.c; sourceTree = ""; }; + 9022D1832060FBBE00D9A2AE /* lf_hfs_vfsops.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_vfsops.h; sourceTree = ""; }; + 9022D1852060FBD200D9A2AE /* lf_hfs_vfsutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_vfsutils.h; sourceTree = ""; }; + 906EBF702063DB6C00B21E94 /* lf_hfs_generic_buf.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_generic_buf.h; sourceTree = ""; }; + 906EBF712063DB6C00B21E94 /* lf_hfs_generic_buf.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_generic_buf.c; sourceTree = ""; }; + 906EBF742063E44900B21E94 /* lf_hfs_readwrite_ops.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_readwrite_ops.h; sourceTree = ""; }; + 906EBF752063E44900B21E94 /* lf_hfs_readwrite_ops.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_readwrite_ops.c; sourceTree = ""; }; + 906EBF782063E76D00B21E94 /* lf_hfs_endian.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_endian.c; sourceTree = ""; }; + 906EBF7A2063F7CE00B21E94 /* lf_hfs_btree_node_reserve.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_btree_node_reserve.c; sourceTree = ""; }; + 906EBF7E2063FC0900B21E94 /* lf_hfs_file_mgr_internal.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_file_mgr_internal.h; sourceTree = ""; }; + 906EBF802063FE3900B21E94 /* lf_hfs_file_extent_mapping.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_file_extent_mapping.h; sourceTree = ""; }; + 906EBF82206409B800B21E94 /* lf_hfs_catalog.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_catalog.c; sourceTree = ""; }; + 906EBF8520640CDF00B21E94 /* lf_hfs_unicode_wrappers.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_unicode_wrappers.h; sourceTree = ""; }; + 906EBF8620640CDF00B21E94 /* lf_hfs_unicode_wrappers.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_unicode_wrappers.c; sourceTree = ""; }; + 906EBF8920640D8200B21E94 /* lf_hfs_ucs_string_cmp_data.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_ucs_string_cmp_data.h; sourceTree = ""; }; + 906EBF8A2067884300B21E94 /* lf_hfs_lookup.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_lookup.h; sourceTree = ""; }; + 906EBF8B2067884300B21E94 /* lf_hfs_lookup.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_lookup.c; sourceTree = ""; }; + 90F5EBA42061476A004397B2 /* lf_hfs_btree.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_btree.h; sourceTree = ""; }; + 90F5EBA52061476A004397B2 /* lf_hfs_btree.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_btree.c; sourceTree = ""; }; + 90F5EBAA2063A089004397B2 /* lf_hfs_btrees_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_btrees_private.h; sourceTree = ""; }; + 90F5EBAE2063A109004397B2 /* lf_hfs_btrees_internal.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_btrees_internal.h; sourceTree = ""; }; + 90F5EBB02063A929004397B2 /* lf_hfs_defs.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_defs.h; sourceTree = ""; }; + 90F5EBB22063AA77004397B2 /* lf_hfs_btrees_io.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_btrees_io.h; sourceTree = ""; }; + 90F5EBB32063AA77004397B2 /* lf_hfs_btrees_io.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_btrees_io.c; sourceTree = ""; }; + 90F5EBB62063B212004397B2 /* lf_hfs_file_extent_mapping.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_file_extent_mapping.c; sourceTree = ""; }; + 90F5EBB82063CC22004397B2 /* lf_hfs_btree_tree_ops.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_btree_tree_ops.c; sourceTree = ""; }; + 90F5EBBA2063CC3A004397B2 /* lf_hfs_btree_node_ops.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_btree_node_ops.c; sourceTree = ""; }; + 90F5EBBD2063CCE0004397B2 /* lf_hfs_btree_misc_ops.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_btree_misc_ops.c; sourceTree = ""; }; + 90F5EBC02063CE12004397B2 /* lf_hfs_btree_allocate.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_btree_allocate.c; sourceTree = ""; }; 9D7AAC861B44874E0001F573 /* mount_hfs.osx.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.xml; path = mount_hfs.osx.entitlements; sourceTree = ""; }; 9D7AAC871B44880B0001F573 /* hfs_util.osx.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.xml; path = hfs_util.osx.entitlements; sourceTree = ""; }; 9D9067881B44633C003D2117 /* fsck_hfs.osx.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.xml; path = fsck_hfs.osx.entitlements; sourceTree = ""; }; + A6E6D73F20909C72002125B0 /* test-get-volume-create-time.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = "test-get-volume-create-time.c"; sourceTree = ""; }; C1B6FA0610CC0A0A00778D48 /* hfsutil_jnl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = hfsutil_jnl.c; sourceTree = ""; }; C1B6FA0710CC0A0A00778D48 /* hfsutil_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = hfsutil_main.c; sourceTree = ""; }; C1B6FA2210CC0AF400778D48 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = /System/Library/Frameworks/CoreFoundation.framework; sourceTree = ""; }; C1B6FA2F10CC0B8A00778D48 /* hfs.util.8 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = hfs.util.8; sourceTree = ""; }; C1B6FD2B10CC0DB200778D48 /* hfs.util */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = hfs.util; sourceTree = BUILT_PRODUCTS_DIR; }; + D759E26E20AD75FC00792EDA /* lf_hfs_link.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_link.h; sourceTree = ""; }; + D759E26F20AD75FC00792EDA /* lf_hfs_link.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_link.c; sourceTree = ""; }; + D769A1CB206107190022791F /* lf_hfs_vnode.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_vnode.c; sourceTree = ""; }; + D769A1CD206107DF0022791F /* lf_hfs_cnode.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_cnode.c; sourceTree = ""; }; + D769A1CF206118490022791F /* lf_hfs_chash.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_chash.h; sourceTree = ""; }; + D769A1D1206136420022791F /* lf_hfs_vnops.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_vnops.h; sourceTree = ""; }; + D769A1D2206136420022791F /* lf_hfs_vnops.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_vnops.c; sourceTree = ""; }; + D769A1E42063AD680022791F /* lf_hfs_volume_allocation.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_volume_allocation.h; sourceTree = ""; }; + D769A1E52063AD680022791F /* lf_hfs_volume_allocation.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_volume_allocation.c; sourceTree = ""; }; + D769A1E82063CEA50022791F /* lf_hfs_journal.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_journal.h; sourceTree = ""; }; + D769A1EA2067E6BB0022791F /* lf_hfs_attrlist.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_attrlist.h; sourceTree = ""; }; + D769A1EB2067E6BB0022791F /* lf_hfs_attrlist.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_attrlist.c; sourceTree = ""; }; + D7850547206B831000B9C5E4 /* lf_hfs_xattr.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_xattr.h; sourceTree = ""; }; + D7850548206B831000B9C5E4 /* lf_hfs_xattr.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_xattr.c; sourceTree = ""; }; + D79783FC205EC09000E93B37 /* lf_hfs_vnode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_vnode.h; sourceTree = ""; }; + D79783FE205EC0E000E93B37 /* lf_hfs.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs.h; sourceTree = ""; }; + D7978400205EC12700E93B37 /* lf_hfs_locks.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_locks.h; sourceTree = ""; }; + D7978401205EC12700E93B37 /* lf_hfs_locks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_locks.c; sourceTree = ""; }; + D7978405205EC25B00E93B37 /* lf_hfs_mount.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_mount.h; sourceTree = ""; }; + D7978407205EC38900E93B37 /* lf_hfs_format.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_format.h; sourceTree = ""; }; + D7978409205EC42C00E93B37 /* lf_hfs_catalog.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_catalog.h; sourceTree = ""; }; + D797840F205EC76100E93B37 /* lf_hfs_cnode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_cnode.h; sourceTree = ""; }; + D7978414205EC9C300E93B37 /* lf_hfs_vfsops.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_vfsops.c; sourceTree = ""; usesTabs = 0; }; + D797841E205ED7E600E93B37 /* lf_hfs_vfsutils.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_vfsutils.c; sourceTree = ""; }; + D7978421205FB57600E93B37 /* lf_hfs_chash.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_chash.c; sourceTree = ""; }; + D7978424205FC09A00E93B37 /* lf_hfs_endian.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_endian.h; sourceTree = ""; }; + D797843D206001F000E93B37 /* lf_MAcOSStubs.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_MAcOSStubs.c; sourceTree = ""; }; + D797843F2060037400E93B37 /* lf_hfs_raw_read_write.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_raw_read_write.h; sourceTree = ""; }; + D79784402060037400E93B37 /* lf_hfs_raw_read_write.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_raw_read_write.c; sourceTree = ""; }; + EE73740320644328004C2F0E /* lf_hfs_sbunicode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_sbunicode.h; sourceTree = ""; }; + EE73740420644328004C2F0E /* lf_hfs_sbunicode.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = lf_hfs_sbunicode.c; sourceTree = ""; }; + EE737407206443A1004C2F0E /* lf_hfs_utfconvdata.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lf_hfs_utfconvdata.h; sourceTree = ""; }; + F90E174821ADFFD100345EE3 /* test-cas-bsdflags.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = "test-cas-bsdflags.c"; sourceTree = ""; }; FB02B8CE1B5ED0B50093DD47 /* make_opensource.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = make_opensource.sh; sourceTree = ""; }; FB20E0E01AE950C200CEBE7B /* HFS.kext */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = HFS.kext; sourceTree = BUILT_PRODUCTS_DIR; }; FB20E0E31AE950C200CEBE7B /* macosx-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "macosx-Info.plist"; sourceTree = ""; }; @@ -1069,6 +1273,22 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + 900BDED11FF919C2002F7EC0 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 900BDEDA1FF919DE002F7EC0 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 9022D171205EC18500D9A2AE /* livefiles_hfs.dylib in Frameworks */, + 9022D170205EC16900D9A2AE /* CoreFoundation.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; FB20E0DC1AE950C200CEBE7B /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; @@ -1133,6 +1353,7 @@ 08FB7794FE84155DC02AAC07 /* hfs */ = { isa = PBXGroup; children = ( + 900BDECD1FF91960002F7EC0 /* livefiles_hfs_plugin */, 4DFD953D15377C7D0039B6BA /* hfs.xcconfig */, FDD9FA4614A1343D0043D4A9 /* CopyHFSMeta */, 86CBF384183187D500A64A93 /* libhfs_metadata */, @@ -1149,6 +1370,7 @@ FDD9FA4014A133A50043D4A9 /* Frameworks */, C1B6FD2C10CC0DB200778D48 /* Products */, FB02B8CE1B5ED0B50093DD47 /* make_opensource.sh */, + D7978444206006CC00E93B37 /* Recovered References */, ); name = hfs; sourceTree = ""; @@ -1293,6 +1515,89 @@ path = CopyHFSMeta; sourceTree = ""; }; + 900BDECD1FF91960002F7EC0 /* livefiles_hfs_plugin */ = { + isa = PBXGroup; + children = ( + 18B450682104D958002052BF /* lf_hfs_journal.c */, + 906EBF82206409B800B21E94 /* lf_hfs_catalog.c */, + D7978409205EC42C00E93B37 /* lf_hfs_catalog.h */, + D7978421205FB57600E93B37 /* lf_hfs_chash.c */, + D769A1CF206118490022791F /* lf_hfs_chash.h */, + D769A1CD206107DF0022791F /* lf_hfs_cnode.c */, + D797840F205EC76100E93B37 /* lf_hfs_cnode.h */, + 900BDEED1FF91C46002F7EC0 /* lf_hfs_common.h */, + 900BDEF41FF9202E002F7EC0 /* lf_hfs_dirops_handler.c */, + 900BDEF31FF9202E002F7EC0 /* lf_hfs_dirops_handler.h */, + D7978424205FC09A00E93B37 /* lf_hfs_endian.h */, + 900BDEF81FF92170002F7EC0 /* lf_hfs_fileops_handler.c */, + 900BDEF71FF92170002F7EC0 /* lf_hfs_fileops_handler.h */, + D7978407205EC38900E93B37 /* lf_hfs_format.h */, + 900BDEEA1FF91C2A002F7EC0 /* lf_hfs_fsops_handler.c */, + 900BDEE91FF91C2A002F7EC0 /* lf_hfs_fsops_handler.h */, + D769A1E82063CEA50022791F /* lf_hfs_journal.h */, + D7978401205EC12700E93B37 /* lf_hfs_locks.c */, + D7978400205EC12700E93B37 /* lf_hfs_locks.h */, + 900BDEFC1FF9246F002F7EC0 /* lf_hfs_logger.c */, + 900BDEFB1FF9246F002F7EC0 /* lf_hfs_logger.h */, + D7978405205EC25B00E93B37 /* lf_hfs_mount.h */, + 9022D18020600D9E00D9A2AE /* lf_hfs_rangelist.c */, + 9022D17F20600D9E00D9A2AE /* lf_hfs_rangelist.h */, + D79784402060037400E93B37 /* lf_hfs_raw_read_write.c */, + D797843F2060037400E93B37 /* lf_hfs_raw_read_write.h */, + 9022D173205FE5FA00D9A2AE /* lf_hfs_utils.c */, + 9022D172205FE5FA00D9A2AE /* lf_hfs_utils.h */, + D7978414205EC9C300E93B37 /* lf_hfs_vfsops.c */, + 9022D1832060FBBE00D9A2AE /* lf_hfs_vfsops.h */, + D797841E205ED7E600E93B37 /* lf_hfs_vfsutils.c */, + 9022D1852060FBD200D9A2AE /* lf_hfs_vfsutils.h */, + D769A1CB206107190022791F /* lf_hfs_vnode.c */, + D79783FC205EC09000E93B37 /* lf_hfs_vnode.h */, + D769A1D2206136420022791F /* lf_hfs_vnops.c */, + D769A1D1206136420022791F /* lf_hfs_vnops.h */, + D769A1E52063AD680022791F /* lf_hfs_volume_allocation.c */, + D769A1E42063AD680022791F /* lf_hfs_volume_allocation.h */, + D79783FE205EC0E000E93B37 /* lf_hfs.h */, + 900BDECF1FF9198E002F7EC0 /* livefiles_hfs_tester.c */, + 900BDEE71FF91ADF002F7EC0 /* livefiles_hfs_tester.entitlements */, + 900BDECE1FF9198E002F7EC0 /* livefiles_hfs_tester.h */, + 90F5EBA42061476A004397B2 /* lf_hfs_btree.h */, + 90F5EBA52061476A004397B2 /* lf_hfs_btree.c */, + 90F5EBAA2063A089004397B2 /* lf_hfs_btrees_private.h */, + 90F5EBAE2063A109004397B2 /* lf_hfs_btrees_internal.h */, + 90F5EBB02063A929004397B2 /* lf_hfs_defs.h */, + 90F5EBB22063AA77004397B2 /* lf_hfs_btrees_io.h */, + 90F5EBB32063AA77004397B2 /* lf_hfs_btrees_io.c */, + 90F5EBB62063B212004397B2 /* lf_hfs_file_extent_mapping.c */, + 90F5EBB82063CC22004397B2 /* lf_hfs_btree_tree_ops.c */, + 90F5EBBA2063CC3A004397B2 /* lf_hfs_btree_node_ops.c */, + 90F5EBBD2063CCE0004397B2 /* lf_hfs_btree_misc_ops.c */, + 90F5EBC02063CE12004397B2 /* lf_hfs_btree_allocate.c */, + 906EBF702063DB6C00B21E94 /* lf_hfs_generic_buf.h */, + 906EBF712063DB6C00B21E94 /* lf_hfs_generic_buf.c */, + 906EBF742063E44900B21E94 /* lf_hfs_readwrite_ops.h */, + 906EBF752063E44900B21E94 /* lf_hfs_readwrite_ops.c */, + 906EBF782063E76D00B21E94 /* lf_hfs_endian.c */, + 906EBF7A2063F7CE00B21E94 /* lf_hfs_btree_node_reserve.c */, + 906EBF7E2063FC0900B21E94 /* lf_hfs_file_mgr_internal.h */, + 906EBF802063FE3900B21E94 /* lf_hfs_file_extent_mapping.h */, + 906EBF8520640CDF00B21E94 /* lf_hfs_unicode_wrappers.h */, + 906EBF8620640CDF00B21E94 /* lf_hfs_unicode_wrappers.c */, + 906EBF8920640D8200B21E94 /* lf_hfs_ucs_string_cmp_data.h */, + EE73740320644328004C2F0E /* lf_hfs_sbunicode.h */, + EE73740420644328004C2F0E /* lf_hfs_sbunicode.c */, + EE737407206443A1004C2F0E /* lf_hfs_utfconvdata.h */, + 906EBF8A2067884300B21E94 /* lf_hfs_lookup.h */, + 906EBF8B2067884300B21E94 /* lf_hfs_lookup.c */, + D769A1EA2067E6BB0022791F /* lf_hfs_attrlist.h */, + D769A1EB2067E6BB0022791F /* lf_hfs_attrlist.c */, + D7850547206B831000B9C5E4 /* lf_hfs_xattr.h */, + D7850548206B831000B9C5E4 /* lf_hfs_xattr.c */, + D759E26E20AD75FC00792EDA /* lf_hfs_link.h */, + D759E26F20AD75FC00792EDA /* lf_hfs_link.c */, + ); + path = livefiles_hfs_plugin; + sourceTree = ""; + }; C1B6FD2C10CC0DB200778D48 /* Products */ = { isa = PBXGroup; children = ( @@ -1316,10 +1621,21 @@ FBCC52FE1B852758008B752C /* hfs-alloc-trace */, FB48E4A61BB3070500523121 /* Kernel.framework */, FB48E5041BB3798500523121 /* Sim_Headers */, + 900BDED41FF919C2002F7EC0 /* livefiles_hfs.dylib */, + 900BDEDD1FF919DE002F7EC0 /* livefiles_hfs_tester */, ); name = Products; sourceTree = ""; }; + D7978444206006CC00E93B37 /* Recovered References */ = { + isa = PBXGroup; + children = ( + 9022D177205FEBE200D9A2AE /* lf_MacOSStubs.c */, + D797843D206001F000E93B37 /* lf_MAcOSStubs.c */, + ); + name = "Recovered References"; + sourceTree = ""; + }; FB20E0E11AE950C200CEBE7B /* core */ = { isa = PBXGroup; children = ( @@ -1438,6 +1754,7 @@ 2A93999C1BE0146000FB075B /* test-deep-rm.c */, 2A9399961BDFEF3900FB075B /* test-chflags.c */, 2A9399941BDFEA6E00FB075B /* test-access.c */, + F90E174821ADFFD100345EE3 /* test-cas-bsdflags.c */, FB55AE521B7C271000701D03 /* test-doc-tombstone.c */, FB76B3DA1B7A52BE00FA9F2B /* test-external-jnl.c */, FB2B5C721B87A0BF00ACEDD9 /* test-getattrlist.c */, @@ -1448,6 +1765,7 @@ FB2B5C551B87656900ACEDD9 /* test-transcode.m */, FBD69AF81B91309C0022ECAD /* test-dateadded.c */, 09D6B7D61E317ED2003C20DC /* test_disklevel.c */, + A6E6D73F20909C72002125B0 /* test-get-volume-create-time.c */, ); path = cases; sourceTree = ""; @@ -1514,6 +1832,7 @@ isa = PBXGroup; children = ( C1B6FA2210CC0AF400778D48 /* CoreFoundation.framework */, + 9022D16F205EC16900D9A2AE /* CoreFoundation.framework */, 4DE6C7461535012200C11066 /* IOKit.framework */, FDD9FA5B14A135840043D4A9 /* libz.dylib */, 4DE6C74A1535018100C11066 /* libutil.dylib */, @@ -1553,6 +1872,51 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + 900BDED21FF919C2002F7EC0 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 906EBF812063FF2700B21E94 /* lf_hfs_file_extent_mapping.h in Headers */, + 900BDEF91FF92170002F7EC0 /* lf_hfs_fileops_handler.h in Headers */, + D7978410205EC76100E93B37 /* lf_hfs_cnode.h in Headers */, + D769A1EC2067E6BB0022791F /* lf_hfs_attrlist.h in Headers */, + 906EBF8C2067884300B21E94 /* lf_hfs_lookup.h in Headers */, + D769A1E92063CEA50022791F /* lf_hfs_journal.h in Headers */, + 900BDEF51FF9202E002F7EC0 /* lf_hfs_dirops_handler.h in Headers */, + D797840A205EC43000E93B37 /* lf_hfs_catalog.h in Headers */, + D7978426205FC09A00E93B37 /* lf_hfs_endian.h in Headers */, + D769A1D0206118490022791F /* lf_hfs_chash.h in Headers */, + D769A1E62063AD680022791F /* lf_hfs_volume_allocation.h in Headers */, + 900BDEEB1FF91C2A002F7EC0 /* lf_hfs_fsops_handler.h in Headers */, + 9022D18120600D9E00D9A2AE /* lf_hfs_rangelist.h in Headers */, + 9022D1842060FBBE00D9A2AE /* lf_hfs_vfsops.h in Headers */, + D79783FF205EC0E000E93B37 /* lf_hfs.h in Headers */, + 900BDEFD1FF9246F002F7EC0 /* lf_hfs_logger.h in Headers */, + 90F5EBA62061476A004397B2 /* lf_hfs_btree.h in Headers */, + 906EBF7F2063FC0900B21E94 /* lf_hfs_file_mgr_internal.h in Headers */, + EE737408206443A1004C2F0E /* lf_hfs_utfconvdata.h in Headers */, + 90F5EBAC2063A089004397B2 /* lf_hfs_btrees_private.h in Headers */, + D769A1D3206136420022791F /* lf_hfs_vnops.h in Headers */, + 9022D1862060FBD200D9A2AE /* lf_hfs_vfsutils.h in Headers */, + 9022D174205FE5FA00D9A2AE /* lf_hfs_utils.h in Headers */, + EE73740520644328004C2F0E /* lf_hfs_sbunicode.h in Headers */, + D759E27020AD75FC00792EDA /* lf_hfs_link.h in Headers */, + 90F5EBAF2063A109004397B2 /* lf_hfs_btrees_internal.h in Headers */, + D79784412060037400E93B37 /* lf_hfs_raw_read_write.h in Headers */, + D7978406205EC25B00E93B37 /* lf_hfs_mount.h in Headers */, + 906EBF722063DB6C00B21E94 /* lf_hfs_generic_buf.h in Headers */, + 906EBF7D2063FB4A00B21E94 /* lf_hfs_btrees_io.h in Headers */, + D7978408205EC38900E93B37 /* lf_hfs_format.h in Headers */, + D7850549206B831000B9C5E4 /* lf_hfs_xattr.h in Headers */, + 906EBF8720640CDF00B21E94 /* lf_hfs_unicode_wrappers.h in Headers */, + 90F5EBB12063A929004397B2 /* lf_hfs_defs.h in Headers */, + 900BDEEE1FF91C46002F7EC0 /* lf_hfs_common.h in Headers */, + 906EBF762063E44900B21E94 /* lf_hfs_readwrite_ops.h in Headers */, + D7978402205EC12700E93B37 /* lf_hfs_locks.h in Headers */, + D79783FD205EC09000E93B37 /* lf_hfs_vnode.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; FB20E0DD1AE950C200CEBE7B /* Headers */ = { isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; @@ -1653,7 +2017,6 @@ 4D0E89981534FE65004CD678 /* Sources */, 4D0E89991534FE65004CD678 /* Frameworks */, 4D0E899A1534FE65004CD678 /* Copy man8 */, - 4DFD953E153783DA0039B6BA /* Create symlink */, ); buildRules = ( ); @@ -1671,7 +2034,6 @@ 4DE6C757153504C100C11066 /* Sources */, 4DE6C758153504C100C11066 /* Frameworks */, 4DE6C759153504C100C11066 /* Copy man8 */, - 4DFD953F1537841C0039B6BA /* Create symlink */, ); buildRules = ( ); @@ -1690,7 +2052,6 @@ 4DFD93F11535FF510039B6BA /* Frameworks */, 4DFD93F21535FF510039B6BA /* Copy man8 */, 4DFD953B15377BC60039B6BA /* Copy fsck_keys.h */, - 4DFD95401537844E0039B6BA /* Create symlink */, ); buildRules = ( ); @@ -1740,6 +2101,7 @@ buildPhases = ( 4DFD95101537402A0039B6BA /* Resources */, FB7B02E71B55634200BEE4BE /* CopyFiles */, + 52C08E592179679300D1618E /* Create Symlink */, ); buildRules = ( ); @@ -1790,6 +2152,41 @@ productReference = C1B6FD2B10CC0DB200778D48 /* hfs.util */; productType = "com.apple.product-type.tool"; }; + 900BDED31FF919C2002F7EC0 /* livefiles_hfs */ = { + isa = PBXNativeTarget; + buildConfigurationList = 900BDED51FF919C2002F7EC0 /* Build configuration list for PBXNativeTarget "livefiles_hfs" */; + buildPhases = ( + 900BDED01FF919C2002F7EC0 /* Sources */, + 900BDED11FF919C2002F7EC0 /* Frameworks */, + 900BDED21FF919C2002F7EC0 /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = livefiles_hfs; + productName = livefiles_hfs; + productReference = 900BDED41FF919C2002F7EC0 /* livefiles_hfs.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; + 900BDEDC1FF919DE002F7EC0 /* livefiles_hfs_tester */ = { + isa = PBXNativeTarget; + buildConfigurationList = 900BDEE11FF919DE002F7EC0 /* Build configuration list for PBXNativeTarget "livefiles_hfs_tester" */; + buildPhases = ( + 900BDED91FF919DE002F7EC0 /* Sources */, + 900BDEDA1FF919DE002F7EC0 /* Frameworks */, + 900BDEDB1FF919DE002F7EC0 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 900BDEE61FF919E7002F7EC0 /* PBXTargetDependency */, + ); + name = livefiles_hfs_tester; + productName = livefiles_hfs_tester; + productReference = 900BDEDD1FF919DE002F7EC0 /* livefiles_hfs_tester */; + productType = "com.apple.product-type.tool"; + }; FB20E0DF1AE950C200CEBE7B /* kext */ = { isa = PBXNativeTarget; buildConfigurationList = FB20E0E61AE950C200CEBE7B /* Build configuration list for PBXNativeTarget "kext" */; @@ -1988,6 +2385,22 @@ CreatedOnToolsVersion = 8.3; ProvisioningStyle = Automatic; }; + 900BDED31FF919C2002F7EC0 = { + CreatedOnToolsVersion = 9.3; + ProvisioningStyle = Automatic; + }; + 900BDEDC1FF919DE002F7EC0 = { + CreatedOnToolsVersion = 9.3; + ProvisioningStyle = Automatic; + }; + 9430FE92211658C1009CC8AF = { + CreatedOnToolsVersion = 10.0; + ProvisioningStyle = Automatic; + }; + DB1AAB7C20472D140036167F = { + CreatedOnToolsVersion = 9.3; + ProvisioningStyle = Automatic; + }; FB20E0DF1AE950C200CEBE7B = { CreatedOnToolsVersion = 6.3; }; @@ -2028,11 +2441,10 @@ }; buildConfigurationList = 1DEB928908733DD80010E9CD /* Build configuration list for PBXProject "hfs" */; compatibilityVersion = "Xcode 3.2"; - developmentRegion = English; + developmentRegion = en; hasScannedForEncodings = 1; knownRegions = ( en, - English, ); mainGroup = 08FB7794FE84155DC02AAC07 /* hfs */; productRefGroup = C1B6FD2C10CC0DB200778D48 /* Products */; @@ -2041,6 +2453,7 @@ targets = ( 4DD302571538DB2700001AA0 /* All_MacOSX */, 4DD3025A1538DB3A00001AA0 /* All_iOS */, + DB1AAB7C20472D140036167F /* Swift_iOS */, 4DBD523B1548A488007AA736 /* Common */, 4DFD95111537402A0039B6BA /* hfs.fs */, 8DD76FA90486AB0100D96B5E /* hfs.util */, @@ -2069,6 +2482,9 @@ FB48E5031BB3798500523121 /* Sim_Headers */, FB7C140C1C2368E6004F8B2C /* kext-version */, 07828B591E3FDD25009D2106 /* hfs_libraries */, + 900BDED31FF919C2002F7EC0 /* livefiles_hfs */, + 900BDEDC1FF919DE002F7EC0 /* livefiles_hfs_tester */, + 9430FE92211658C1009CC8AF /* hfs_livefiles */, ); }; /* End PBXProject section */ @@ -2109,56 +2525,29 @@ shellScript = "${BUILT_PRODUCTS_DIR}/fsck_makestrings | iconv -f UTF-8 -t UTF-16 > ${DSTROOT}${FS_BUNDLE_ENGLISH_PATH}/fsck.strings"; showEnvVarsInLog = 0; }; - 4DFD953E153783DA0039B6BA /* Create symlink */ = { + 52C08E592179679300D1618E /* Create Symlink */ = { isa = PBXShellScriptBuildPhase; - buildActionMask = 8; + buildActionMask = 2147483647; files = ( ); - inputPaths = ( - "$(DSTROOT)$(FS_BUNDLE_BIN_PATH)/mount_hfs", - ); - name = "Create symlink"; - outputPaths = ( - "$(DSTROOT)/sbin/mount_hfs", - ); - runOnlyForDeploymentPostprocessing = 1; - shellPath = /bin/sh; - shellScript = "ln -sfhv ${FS_BUNDLE_BIN_PATH}/mount_hfs ${DSTROOT}/sbin/mount_hfs\nif [[ $UID == 0 ]] ; then\n chgrp -h wheel ${DSTROOT}/sbin/mount_hfs\nfi\n"; - showEnvVarsInLog = 0; - }; - 4DFD953F1537841C0039B6BA /* Create symlink */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 8; - files = ( + inputFileListPaths = ( ); inputPaths = ( "$(DSTROOT)$(FS_BUNDLE_BIN_PATH)/newfs_hfs", - ); - name = "Create symlink"; - outputPaths = ( - "$(DSTROOT)/sbin/newfs_hfs", - ); - runOnlyForDeploymentPostprocessing = 1; - shellPath = /bin/sh; - shellScript = "ln -sfhv ${FS_BUNDLE_BIN_PATH}/newfs_hfs ${DSTROOT}/sbin/newfs_hfs\nif [[ $UID == 0 ]] ; then\n chgrp -h wheel ${DSTROOT}/sbin/newfs_hfs\nfi\n"; - showEnvVarsInLog = 0; - }; - 4DFD95401537844E0039B6BA /* Create symlink */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 8; - files = ( - ); - inputPaths = ( "$(DSTROOT)$(FS_BUNDLE_BIN_PATH)/fsck_hfs", + "$(DSTROOT)$(FS_BUNDLE_BIN_PATH)/mount_hfs", + ); + name = "Create Symlink"; + outputFileListPaths = ( ); - name = "Create symlink"; outputPaths = ( + "$(DSTROOT)/sbin/newfs_hfs", "$(DSTROOT)/sbin/fsck_hfs", + "$(DSTROOT)/sbin/mount_hfs", ); - runOnlyForDeploymentPostprocessing = 1; + runOnlyForDeploymentPostprocessing = 0; shellPath = /bin/sh; - shellScript = "ln -sfhv ${FS_BUNDLE_BIN_PATH}/fsck_hfs ${DSTROOT}/sbin/fsck_hfs\nif [[ $UID == 0 ]] ; then\n chgrp -h wheel ${DSTROOT}/sbin/fsck_hfs\nfi\n"; - showEnvVarsInLog = 0; + shellScript = "ln -sfhv ${FS_BUNDLE_BIN_PATH}/newfs_hfs ${DSTROOT}/sbin/newfs_hfs\nif [[ $UID == 0 ]] ; then\nchgrp -h wheel ${DSTROOT}/sbin/newfs_hfs\nfi\n\nln -sfhv ${FS_BUNDLE_BIN_PATH}/fsck_hfs ${DSTROOT}/sbin/fsck_hfs\nif [[ $UID == 0 ]] ; then\nchgrp -h wheel ${DSTROOT}/sbin/fsck_hfs\nfi\n\nln -sfhv ${FS_BUNDLE_BIN_PATH}/mount_hfs ${DSTROOT}/sbin/mount_hfs\nif [[ $UID == 0 ]] ; then\nchgrp -h wheel ${DSTROOT}/sbin/mount_hfs\nfi\n"; }; FB48E5131BB385FF00523121 /* System.framework */ = { isa = PBXShellScriptBuildPhase; @@ -2424,6 +2813,55 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + 900BDED01FF919C2002F7EC0 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D769A1ED2067E6BB0022791F /* lf_hfs_attrlist.c in Sources */, + EE73740620644328004C2F0E /* lf_hfs_sbunicode.c in Sources */, + 90F5EBB52063AA77004397B2 /* lf_hfs_btrees_io.c in Sources */, + D769A1CC206107190022791F /* lf_hfs_vnode.c in Sources */, + 90F5EBA72061476A004397B2 /* lf_hfs_btree.c in Sources */, + D7BD8F9C20AC388E00E93640 /* lf_hfs_catalog.c in Sources */, + 90F5EBC12063CE12004397B2 /* lf_hfs_btree_allocate.c in Sources */, + 90F5EBBF2063CCE0004397B2 /* lf_hfs_btree_misc_ops.c in Sources */, + D7978404205EC12700E93B37 /* lf_hfs_locks.c in Sources */, + D7978423205FB57600E93B37 /* lf_hfs_chash.c in Sources */, + D7978417205EC9C300E93B37 /* lf_hfs_vfsops.c in Sources */, + 906EBF7B2063F7CE00B21E94 /* lf_hfs_btree_node_reserve.c in Sources */, + 906EBF8D2067884300B21E94 /* lf_hfs_lookup.c in Sources */, + D79784422060037400E93B37 /* lf_hfs_raw_read_write.c in Sources */, + 906EBF792063E76D00B21E94 /* lf_hfs_endian.c in Sources */, + 906EBF732063DB6C00B21E94 /* lf_hfs_generic_buf.c in Sources */, + D785054A206B831000B9C5E4 /* lf_hfs_xattr.c in Sources */, + 18B450692104D958002052BF /* lf_hfs_journal.c in Sources */, + D769A1CE206107DF0022791F /* lf_hfs_cnode.c in Sources */, + 90F5EBB72063B212004397B2 /* lf_hfs_file_extent_mapping.c in Sources */, + 9022D18220600D9E00D9A2AE /* lf_hfs_rangelist.c in Sources */, + 906EBF8820640CDF00B21E94 /* lf_hfs_unicode_wrappers.c in Sources */, + 900BDEF61FF9202E002F7EC0 /* lf_hfs_dirops_handler.c in Sources */, + D769A1E72063AD680022791F /* lf_hfs_volume_allocation.c in Sources */, + 900BDEFA1FF92170002F7EC0 /* lf_hfs_fileops_handler.c in Sources */, + 900BDEFE1FF9246F002F7EC0 /* lf_hfs_logger.c in Sources */, + 9022D175205FE5FA00D9A2AE /* lf_hfs_utils.c in Sources */, + D7978420205ED7E600E93B37 /* lf_hfs_vfsutils.c in Sources */, + 906EBF772063E44900B21E94 /* lf_hfs_readwrite_ops.c in Sources */, + 90F5EBB92063CC22004397B2 /* lf_hfs_btree_tree_ops.c in Sources */, + 90F5EBBB2063CC3A004397B2 /* lf_hfs_btree_node_ops.c in Sources */, + D769A1D4206136420022791F /* lf_hfs_vnops.c in Sources */, + D759E27120AD75FC00792EDA /* lf_hfs_link.c in Sources */, + 900BDEEC1FF91C2A002F7EC0 /* lf_hfs_fsops_handler.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 900BDED91FF919DE002F7EC0 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 900BDEE81FF91B8C002F7EC0 /* livefiles_hfs_tester.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; FB20E0DB1AE950C200CEBE7B /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -2500,6 +2938,7 @@ 2A9399C71BE172A400FB075B /* test-log2phys.m in Sources */, 2A9399BF1BE15F6800FB075B /* test-fsinfo-sig.c in Sources */, 2A9399C01BE15F6800FB075B /* test-key-roll.c in Sources */, + A6E6D74020909C72002125B0 /* test-get-volume-create-time.c in Sources */, 2A9399C11BE15F6800FB075B /* test-msync-16k.c in Sources */, 2A9399C21BE15F6800FB075B /* test-readdir.c in Sources */, 2A9399C31BE15F6800FB075B /* test-set-create-time.c in Sources */, @@ -2526,6 +2965,7 @@ FB76B3D91B7A4BF000FA9F2B /* hfs-tests.mm in Sources */, FB76B3EE1B7BE24B00FA9F2B /* disk-image.m in Sources */, FB76B3F21B7BE79800FA9F2B /* systemx.c in Sources */, + F90E174921ADFFD100345EE3 /* test-cas-bsdflags.c in Sources */, FB285C2A1B7E81180099B2ED /* test-sparse-dev.c in Sources */, FB55AE541B7C271000701D03 /* test-doc-tombstone.c in Sources */, FBD69AFA1B9132E40022ECAD /* test-dateadded.c in Sources */, @@ -2649,6 +3089,16 @@ target = 4DFD94BC15373C2C0039B6BA /* fsck_makestrings */; targetProxy = 4DBD52521548A4D4007AA736 /* PBXContainerItemProxy */; }; + 900BDEE61FF919E7002F7EC0 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 900BDED31FF919C2002F7EC0 /* livefiles_hfs */; + targetProxy = 900BDEE51FF919E7002F7EC0 /* PBXContainerItemProxy */; + }; + 9430FE98211658E7009CC8AF /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 900BDED31FF919C2002F7EC0 /* livefiles_hfs */; + targetProxy = 9430FE97211658E7009CC8AF /* PBXContainerItemProxy */; + }; FB48E4BC1BB30CC400523121 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = FB48E49B1BB3070400523121 /* OSX Kernel Framework Headers */; @@ -2760,7 +3210,7 @@ 4DFD9536153746210039B6BA /* InfoPlist.strings */ = { isa = PBXVariantGroup; children = ( - 4DFD9537153746210039B6BA /* English */, + 4DFD9537153746210039B6BA /* en */, ); name = InfoPlist.strings; sourceTree = ""; @@ -2805,6 +3255,8 @@ isa = XCBuildConfiguration; baseConfigurationReference = 4DFD953D15377C7D0039B6BA /* hfs.xcconfig */; buildSettings = { + SUPPORTS_TEXT_BASED_API = YES; + TAPI_VERIFY_MODE = Pedantic; }; name = Release; }; @@ -2967,6 +3419,426 @@ }; name = Release; }; + 900BDED61FF919C2002F7EC0 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_ANALYZER_OBJC_UNUSED_IVARS = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = NO; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + CODE_SIGN_IDENTITY = "-"; + CODE_SIGN_STYLE = Automatic; + DEAD_CODE_STRIPPING = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + EXECUTABLE_PREFIX = ""; + FRAMEWORK_SEARCH_PATHS = "$(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks"; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = NO; + GCC_WARN_UNUSED_LABEL = NO; + GCC_WARN_UNUSED_PARAMETER = NO; + GCC_WARN_UNUSED_VALUE = NO; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = "livefiles_hfs_plugin/**"; + INSTALL_PATH = /System/Library/PrivateFrameworks/UserFS.framework/PlugIns; + MACOSX_DEPLOYMENT_TARGET = 10.13; + MTL_ENABLE_DEBUG_INFO = NO; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx.internal; + SUPPORTS_TEXT_BASED_API = NO; + }; + name = Release; + }; + 900BDED71FF919C2002F7EC0 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_ANALYZER_OBJC_UNUSED_IVARS = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = NO; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + CODE_SIGN_IDENTITY = "-"; + CODE_SIGN_STYLE = Automatic; + DEAD_CODE_STRIPPING = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + ENABLE_STRICT_OBJC_MSGSEND = YES; + EXECUTABLE_PREFIX = ""; + FRAMEWORK_SEARCH_PATHS = "$(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks"; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = NO; + GCC_WARN_UNUSED_LABEL = NO; + GCC_WARN_UNUSED_PARAMETER = NO; + GCC_WARN_UNUSED_VALUE = NO; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = "livefiles_hfs_plugin/**"; + INSTALL_PATH = /System/Library/PrivateFrameworks/UserFS.framework/PlugIns; + MACOSX_DEPLOYMENT_TARGET = 10.13; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = NO; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx.internal; + SUPPORTS_TEXT_BASED_API = NO; + }; + name = Debug; + }; + 900BDED81FF919C2002F7EC0 /* Coverage */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_ANALYZER_OBJC_UNUSED_IVARS = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = NO; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + CODE_SIGN_IDENTITY = "-"; + CODE_SIGN_STYLE = Automatic; + DEAD_CODE_STRIPPING = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + EXECUTABLE_PREFIX = ""; + FRAMEWORK_SEARCH_PATHS = "$(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks"; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = NO; + GCC_WARN_UNUSED_LABEL = NO; + GCC_WARN_UNUSED_PARAMETER = NO; + GCC_WARN_UNUSED_VALUE = NO; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = "livefiles_hfs_plugin/**"; + INSTALL_PATH = /System/Library/PrivateFrameworks/UserFS.framework/PlugIns; + MACOSX_DEPLOYMENT_TARGET = 10.13; + MTL_ENABLE_DEBUG_INFO = NO; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx.internal; + SUPPORTS_TEXT_BASED_API = NO; + }; + name = Coverage; + }; + 900BDEE21FF919DE002F7EC0 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = NO; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + CODE_SIGN_ENTITLEMENTS = livefiles_hfs_plugin/livefiles_hfs_tester.entitlements; + CODE_SIGN_IDENTITY = "-"; + CODE_SIGN_STYLE = Automatic; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + FRAMEWORK_SEARCH_PATHS = "$(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks"; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = livefiles_hfs_plugin/; + MACOSX_DEPLOYMENT_TARGET = 10.12; + MTL_ENABLE_DEBUG_INFO = NO; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx.internal; + SUPPORTS_TEXT_BASED_API = NO; + }; + name = Release; + }; + 900BDEE31FF919DE002F7EC0 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = NO; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + CODE_SIGN_ENTITLEMENTS = livefiles_hfs_plugin/livefiles_hfs_tester.entitlements; + CODE_SIGN_IDENTITY = "-"; + CODE_SIGN_STYLE = Automatic; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + FRAMEWORK_SEARCH_PATHS = "$(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks"; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = livefiles_hfs_plugin/; + MACOSX_DEPLOYMENT_TARGET = 10.12; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx.internal; + SUPPORTS_TEXT_BASED_API = NO; + }; + name = Debug; + }; + 900BDEE41FF919DE002F7EC0 /* Coverage */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = NO; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + CODE_SIGN_ENTITLEMENTS = livefiles_hfs_plugin/livefiles_hfs_tester.entitlements; + CODE_SIGN_IDENTITY = "-"; + CODE_SIGN_STYLE = Automatic; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + FRAMEWORK_SEARCH_PATHS = "$(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks"; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = livefiles_hfs_plugin/; + MACOSX_DEPLOYMENT_TARGET = 10.12; + MTL_ENABLE_DEBUG_INFO = NO; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx.internal; + SUPPORTS_TEXT_BASED_API = NO; + }; + name = Coverage; + }; + 9430FE93211658C2009CC8AF /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = iphoneos.internal; + SUPPORTED_PLATFORMS = "iphonesimulator iphoneos"; + SUPPORTS_TEXT_BASED_API = NO; + }; + name = Release; + }; + 9430FE94211658C2009CC8AF /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = iphoneos.internal; + SUPPORTED_PLATFORMS = "iphonesimulator iphoneos"; + SUPPORTS_TEXT_BASED_API = NO; + }; + name = Debug; + }; + 9430FE95211658C2009CC8AF /* Coverage */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = iphoneos.internal; + SUPPORTED_PLATFORMS = "iphonesimulator iphoneos"; + SUPPORTS_TEXT_BASED_API = NO; + }; + name = Coverage; + }; + DB1AAB7D20472D140036167F /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + DB1AAB7E20472D140036167F /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + DB1AAB7F20472D140036167F /* Coverage */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Coverage; + }; FB20E0E71AE950C200CEBE7B /* Release */ = { isa = XCBuildConfiguration; baseConfigurationReference = FB20E1781AE968BD00CEBE7B /* kext.xcconfig */; @@ -3867,6 +4739,8 @@ baseConfigurationReference = 4DFD953D15377C7D0039B6BA /* hfs.xcconfig */; buildSettings = { ENABLE_TESTABILITY = YES; + SUPPORTS_TEXT_BASED_API = YES; + TAPI_VERIFY_MODE = Pedantic; }; name = Debug; }; @@ -4114,6 +4988,8 @@ baseConfigurationReference = 4DFD953D15377C7D0039B6BA /* hfs.xcconfig */; buildSettings = { ENABLE_TESTABILITY = YES; + SUPPORTS_TEXT_BASED_API = YES; + TAPI_VERIFY_MODE = Pedantic; }; name = Coverage; }; @@ -4862,6 +5738,46 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + 900BDED51FF919C2002F7EC0 /* Build configuration list for PBXNativeTarget "livefiles_hfs" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 900BDED61FF919C2002F7EC0 /* Release */, + 900BDED71FF919C2002F7EC0 /* Debug */, + 900BDED81FF919C2002F7EC0 /* Coverage */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 900BDEE11FF919DE002F7EC0 /* Build configuration list for PBXNativeTarget "livefiles_hfs_tester" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 900BDEE21FF919DE002F7EC0 /* Release */, + 900BDEE31FF919DE002F7EC0 /* Debug */, + 900BDEE41FF919DE002F7EC0 /* Coverage */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 9430FE96211658C2009CC8AF /* Build configuration list for PBXAggregateTarget "hfs_livefiles" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 9430FE93211658C2009CC8AF /* Release */, + 9430FE94211658C2009CC8AF /* Debug */, + 9430FE95211658C2009CC8AF /* Coverage */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DB1AAB8020472D140036167F /* Build configuration list for PBXAggregateTarget "Swift_iOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB1AAB7D20472D140036167F /* Release */, + DB1AAB7E20472D140036167F /* Debug */, + DB1AAB7F20472D140036167F /* Coverage */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; FB20E0E61AE950C200CEBE7B /* Build configuration list for PBXNativeTarget "kext" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/livefiles_hfs.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/livefiles_hfs.xcscheme new file mode 100644 index 0000000..7c73282 --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/livefiles_hfs.xcscheme @@ -0,0 +1,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/livefiles_hfs_tester.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/livefiles_hfs_tester.xcscheme new file mode 100644 index 0000000..965e177 --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/livefiles_hfs_tester.xcscheme @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hfs_encodings/hfs_encodings.h b/hfs_encodings/hfs_encodings.h index aedb3c7..02b1336 100644 --- a/hfs_encodings/hfs_encodings.h +++ b/hfs_encodings/hfs_encodings.h @@ -32,7 +32,7 @@ #ifndef _HFS_ENCODINGS_H_ #define _HFS_ENCODINGS_H_ -#if !TARGET_OS_EMBEDDED +#if !TARGET_OS_IPHONE #include @@ -84,6 +84,6 @@ __END_DECLS #endif /* __APPLE_API_UNSTABLE */ -#endif // !TARGET_OS_EMBEDDED +#endif // !TARGET_OS_IPHONE #endif /* ! _HFS_ENCODINGS_H_ */ diff --git a/hfs_util/hfs_util.osx.entitlements b/hfs_util/hfs_util.osx.entitlements index 0288bfa..fb35d7f 100644 --- a/hfs_util/hfs_util.osx.entitlements +++ b/hfs_util/hfs_util.osx.entitlements @@ -2,7 +2,7 @@ - com.apple.rootless.install + com.apple.rootless.restricted-block-devices diff --git a/livefiles_hfs_plugin/lf_hfs.h b/livefiles_hfs_plugin/lf_hfs.h new file mode 100644 index 0000000..8ddb76e --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs.h @@ -0,0 +1,600 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs.h + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#ifndef lf_hfs_h +#define lf_hfs_h + +#include +#include "lf_hfs_locks.h" +#include "lf_hfs_format.h" +#include "lf_hfs_catalog.h" +#include "lf_hfs_rangelist.h" +#include "lf_hfs_cnode.h" +#include "lf_hfs_defs.h" + + +#define HFS_MAX_DEFERED_ALLOC (1024*1024) + +#define HFS_MAX_FILES (UINT32_MAX - kHFSFirstUserCatalogNodeID) + +// 400 megs is a "big" file (i.e. one that when deleted +// would touch enough data that we should break it into +// multiple separate transactions) +#define HFS_BIGFILE_SIZE (400LL * 1024LL * 1024LL) + +enum { kMDBSize = 512 }; /* Size of I/O transfer to read entire MDB */ + +enum { kMasterDirectoryBlock = 2 }; /* MDB offset on disk in 512-byte blocks */ +enum { kMDBOffset = kMasterDirectoryBlock * 512 }; /* MDB offset on disk in bytes */ + +#define kRootDirID kHFSRootFolderID + +/* How many free extents to cache per volume */ +#define kMaxFreeExtents 10 + +/* Maximum file size that we're willing to defrag on open */ +#define HFS_MAX_DEFRAG_SIZE (104857600) // 100 * 1024 * 1024 (100MB) +#define HFS_INITIAL_DEFRAG_SIZE (20971520) // 20 * 1024 * 1024 (20MB) + +#define HFS_AVERAGE_NAME_SIZE 22 +#define AVERAGE_HFSDIRENTRY_SIZE (8+HFS_AVERAGE_NAME_SIZE+4) + +/* + * HFS_MINFREE gives the minimum acceptable percentage + * of file system blocks which may be free (but this + * minimum will never exceed HFS_MAXRESERVE bytes). If + * the free block count drops below this level only the + * superuser may continue to allocate blocks. + */ +#define HFS_MINFREE (1) +#define HFS_MAXRESERVE ((u_int64_t)(250*1024*1024)) +#define HFS_BT_MAXRESERVE ((u_int64_t)(10*1024*1024)) + +/* + * HFS_META_DELAY is a duration (in usecs) used for triggering the + * hfs_syncer() routine. We will back off if writes are in + * progress, but... + * HFS_MAX_META_DELAY is the maximum time we will allow the + * syncer to be delayed. + */ +enum { + HFS_META_DELAY = 100 * 1000, // 0.1 secs + //HFS_META_DELAY = 10 * 1000, // 0.01 secs + HFS_MAX_META_DELAY = 5000 * 1000 // 5 secs + //HFS_MAX_META_DELAY = 1000 * 1000 // 1 secs +}; + +#define HFS_META_DELAY_TS ((struct timespec){ 0, HFS_META_DELAY * NSEC_PER_USEC }) + + +/* This structure describes the HFS specific mount structure data. */ +typedef struct hfsmount { + u_int32_t hfs_flags; /* see below */ + + /* Physical Description */ + u_int32_t hfs_logical_block_size; /* Logical block size of the disk as reported by ioctl(DKIOCGETBLOCKSIZE), always a multiple of 512 */ + daddr64_t hfs_logical_block_count; /* Number of logical blocks on the disk, as reported by ioctl(DKIOCGETBLOCKCOUNT) */ + u_int64_t hfs_logical_bytes; /* Number of bytes on the disk device this HFS is mounted on (blockcount * blocksize) */ + /* + * Regarding the two AVH sector fields below: + * Under normal circumstances, the filesystem's notion of the "right" location for the AVH is such that + * the partition and filesystem's are in sync. However, during a filesystem resize, HFS proactively + * writes a new AVH at the end of the filesystem, assuming that the partition will be resized accordingly. + * + * However, it is not technically a corruption if the partition size is never modified. As a result, we need + * to keep two copies of the AVH around "just in case" the partition size is not modified. + */ + daddr64_t hfs_partition_avh_sector; /* location of Alt VH w.r.t partition size */ + daddr64_t hfs_fs_avh_sector; /* location of Alt VH w.r.t filesystem size */ + + u_int32_t hfs_physical_block_size; /* Physical block size of the disk as reported by ioctl(DKIOCGETPHYSICALBLOCKSIZE) */ + u_int32_t hfs_log_per_phys; /* Number of logical blocks per physical block size */ + + /* Access to VFS and devices */ + struct mount * hfs_mp; /* filesystem vfs structure */ + struct vnode * hfs_devvp; /* block device mounted vnode */ + struct vnode * hfs_extents_vp; + struct vnode * hfs_catalog_vp; + struct vnode * hfs_allocation_vp; + struct vnode * hfs_attribute_vp; + struct vnode * hfs_startup_vp; + struct vnode * hfs_attrdata_vp; /* pseudo file */ + struct cnode * hfs_extents_cp; + struct cnode * hfs_catalog_cp; + struct cnode * hfs_allocation_cp; + struct cnode * hfs_attribute_cp; + struct cnode * hfs_startup_cp; + dev_t hfs_raw_dev; /* device mounted */ + u_int32_t hfs_logBlockSize; /* Size of buffer cache buffer for I/O */ + + /* Default values for HFS standard and non-init access */ + uid_t hfs_uid; /* uid to set as owner of the files */ + gid_t hfs_gid; /* gid to set as owner of the files */ + mode_t hfs_dir_mask; /* mask to and with directory protection bits */ + mode_t hfs_file_mask; /* mask to and with file protection bits */ + u_int32_t hfs_encoding; /* Default encoding for non hfs+ volumes */ + + /* Persistent fields (on disk, dynamic) */ + time_t hfs_mtime; /* file system last modification time */ + u_int32_t hfs_filecount; /* number of files in file system */ + u_int32_t hfs_dircount; /* number of directories in file system */ + u_int32_t freeBlocks; /* free allocation blocks */ + u_int32_t reclaimBlocks; /* number of blocks we are reclaiming during resize */ + u_int32_t tentativeBlocks; /* tentative allocation blocks -- see note below */ + u_int32_t nextAllocation; /* start of next allocation search */ + u_int32_t sparseAllocation; /* start of allocations for sparse devices */ + u_int32_t vcbNxtCNID; /* next unused catalog node ID - protected by catalog lock */ + u_int32_t vcbWrCnt; /* file system write count */ + u_int64_t encodingsBitmap; /* in-use encodings */ + u_int16_t vcbNmFls; /* HFS Only - root dir file count */ + u_int16_t vcbNmRtDirs; /* HFS Only - root dir directory count */ + + /* Persistent fields (on disk, static) */ + u_int16_t vcbSigWord; + + // Volume will be inconsistent if header is not flushed + bool hfs_header_dirty; + + // Volume header is dirty, but won't be inconsistent if not flushed + bool hfs_header_minor_change; + + u_int32_t vcbAtrb; + u_int32_t vcbJinfoBlock; + u_int32_t localCreateDate;/* volume create time from volume header (For HFS+, value is in local time) */ + time_t hfs_itime; /* file system creation time (creation date of the root folder) */ + time_t hfs_btime; /* file system last backup time */ + u_int32_t blockSize; /* size of allocation blocks */ + u_int32_t totalBlocks; /* total allocation blocks */ + u_int32_t allocLimit; /* Do not allocate this block or beyond */ + /* + * NOTE: When resizing a volume to make it smaller, allocLimit is set to the allocation + * block number which will contain the new alternate volume header. At all other times, + * allocLimit is set to totalBlocks. The allocation code uses allocLimit instead of + * totalBlocks to limit which blocks may be allocated, so that during a resize, we don't + * put new content into the blocks we're trying to truncate away. + */ + int32_t vcbClpSiz; + u_int32_t vcbFndrInfo[8]; + int16_t vcbVBMSt; /* HFS only */ + int16_t vcbAlBlSt; /* HFS only */ + + /* vcb stuff */ + u_int8_t vcbVN[256]; /* volume name in UTF-8 */ + u_int32_t volumeNameEncodingHint; + u_int32_t hfsPlusIOPosOffset; /* Disk block where HFS+ starts */ + u_int32_t vcbVBMIOSize; /* volume bitmap I/O size */ + + /* cache of largest known free extents */ + u_int32_t vcbFreeExtCnt; + HFSPlusExtentDescriptor vcbFreeExt[kMaxFreeExtents]; + pthread_mutex_t vcbFreeExtLock; + + /* Summary Table */ + u_int8_t *hfs_summary_table; /* Each bit is 1 vcbVBMIOSize of bitmap, byte indexed */ + u_int32_t hfs_summary_size; /* number of BITS in summary table defined above (not bytes!) */ + u_int32_t hfs_summary_bytes; /* number of BYTES in summary table */ + + u_int32_t scan_var; /* For initializing the summary table */ + + + u_int32_t reserveBlocks; /* free block reserve */ + u_int32_t loanedBlocks; /* blocks on loan for delayed allocations */ + u_int32_t lockedBlocks; /* blocks reserved and locked */ + + /* + * HFS+ Private system directories (two). Any access + * (besides looking at the cd_cnid) requires holding + * the Catalog File lock. + */ + struct cat_desc hfs_private_desc[2]; + struct cat_attr hfs_private_attr[2]; + + u_int32_t hfs_metadata_createdate; + + /* Journaling variables: */ + struct journal *jnl; // the journal for this volume (if one exists) + struct vnode *jvp; // device where the journal lives (may be equal to devvp) + u_int64_t jnl_start; // start block of the journal file (so we don't delete it) + u_int64_t jnl_size; + u_int64_t hfs_jnlfileid; + u_int64_t hfs_jnlinfoblkid; + pthread_rwlock_t hfs_global_lock; + pthread_t hfs_global_lockowner; + u_int32_t hfs_transaction_nesting; + + /* + * Notification variables + * See comments in hfs mount code for what the + * default levels are set to. + */ + u_int32_t hfs_notification_conditions; + u_int32_t hfs_freespace_notify_dangerlimit; + u_int32_t hfs_freespace_notify_warninglimit; + u_int32_t hfs_freespace_notify_nearwarninglimit; + u_int32_t hfs_freespace_notify_desiredlevel; + + /* time mounted and last mounted mod time "snapshot" */ + time_t hfs_mount_time; + time_t hfs_last_mounted_mtime; + + /* Metadata allocation zone variables: */ + u_int32_t hfs_metazone_start; + u_int32_t hfs_metazone_end; + u_int32_t hfs_min_alloc_start; + u_int32_t hfs_freed_block_count; + int hfs_overflow_maxblks; + int hfs_catalog_maxblks; + + /* defrag-on-open variables */ + int hfs_defrag_nowait; //issue defrags now, regardless of whether or not we've gone past 3 min. + uint64_t hfs_defrag_max; //maximum file size we'll defragment on this mount + +#if HFS_SPARSE_DEV + /* Sparse device variables: */ + struct vnode * hfs_backingvp; + u_int32_t hfs_last_backingstatfs; + u_int32_t hfs_sparsebandblks; + u_int64_t hfs_backingfs_maxblocks; +#endif + size_t hfs_max_inline_attrsize; + + pthread_mutex_t hfs_mutex; /* protects access to hfsmount data */ + pthread_mutex_t sync_mutex; + + enum { + HFS_THAWED, + HFS_WANT_TO_FREEZE, // This state stops hfs_sync from starting + HFS_FREEZING, // We're in this state whilst we're flushing + HFS_FROZEN // Everything gets blocked in hfs_lock_global + } hfs_freeze_state; + union { + /* + * When we're freezing (HFS_FREEZING) but not yet + * frozen (HFS_FROZEN), we record the freezing thread + * so that we stop other threads from taking locks, + * but allow the freezing thread. + */ + pthread_t hfs_freezing_thread; + /* + * Once we have frozen (HFS_FROZEN), we record the + * process so that if it dies, we can automatically + * unfreeze. + */ + proc_t hfs_freezing_proc; + }; + + pthread_t hfs_downgrading_thread; /* thread who's downgrading to rdonly */ + + /* Resize variables: */ + u_int32_t hfs_resize_blocksmoved; + u_int32_t hfs_resize_totalblocks; + u_int32_t hfs_resize_progress; + + /* the full UUID of the volume, not the one stored in finderinfo */ + uuid_t hfs_full_uuid; + + /* Per mount cnode hash variables: */ + pthread_mutex_t hfs_chash_mutex; /* protects access to cnode hash table */ + u_long hfs_cnodehash; /* size of cnode hash table - 1 */ + LIST_HEAD(cnodehashhead, cnode) *hfs_cnodehashtbl; /* base of cnode hash */ + + /* Per mount fileid hash variables (protected by catalog lock!) */ + u_long hfs_idhash; /* size of cnid/fileid hash table -1 */ + LIST_HEAD(idhashhead, cat_preflightid) *hfs_idhashtbl; /* base of ID hash */ + + // Records the oldest outstanding sync request + struct timeval hfs_sync_req_oldest; + + /* Records the syncer thread so that we can avoid the syncer + queing more syncs. */ + pthread_t hfs_syncer_thread; + + // Not currently used except for debugging purposes + // Since we pass this to OSAddAtomic, this needs to be 4-byte aligned. + uint32_t hfs_active_threads; + + enum { + // These are indices into the array below + + // Tentative ranges can be claimed back at any time + HFS_TENTATIVE_BLOCKS = 0, + + // Locked ranges cannot be claimed back, but the allocation + // won't have been written to disk yet + HFS_LOCKED_BLOCKS = 1, + }; + // These lists are not sorted like a range list usually is + struct rl_head hfs_reserved_ranges[2]; + + //General counter of link id + int cur_link_id; + +} hfsmount_t; + +typedef hfsmount_t ExtendedVCB; + + +/* Aliases for legacy (Mac OS 9) field names */ +#define vcbLsMod hfs_mtime +#define vcbVolBkUp hfs_btime +#define extentsRefNum hfs_extents_vp +#define catalogRefNum hfs_catalog_vp +#define allocationsRefNum hfs_allocation_vp +#define vcbFilCnt hfs_filecount +#define vcbDirCnt hfs_dircount + +static inline void MarkVCBDirty(hfsmount_t *hfsmp) +{ + hfsmp->hfs_header_dirty = true; +} + +static inline void MarkVCBClean(hfsmount_t *hfsmp) +{ + hfsmp->hfs_header_dirty = false; + hfsmp->hfs_header_minor_change = false; +} + +static inline bool IsVCBDirty(ExtendedVCB *vcb) +{ + return vcb->hfs_header_minor_change || vcb->hfs_header_dirty; +} + +// Header is changed but won't be inconsistent if we don't write it +static inline void hfs_note_header_minor_change(hfsmount_t *hfsmp) +{ + hfsmp->hfs_header_minor_change = true; +} + +// Must header be flushed for volume to be consistent? +static inline bool hfs_header_needs_flushing(hfsmount_t *hfsmp) +{ + return (hfsmp->hfs_header_dirty + || ISSET(hfsmp->hfs_catalog_cp->c_flag, C_MODIFIED) + || ISSET(hfsmp->hfs_extents_cp->c_flag, C_MODIFIED) + || (hfsmp->hfs_attribute_cp + && ISSET(hfsmp->hfs_attribute_cp->c_flag, C_MODIFIED)) + || (hfsmp->hfs_allocation_cp + && ISSET(hfsmp->hfs_allocation_cp->c_flag, C_MODIFIED)) + || (hfsmp->hfs_startup_cp + && ISSET(hfsmp->hfs_startup_cp->c_flag, C_MODIFIED))); +} + +enum privdirtype { + FILE_HARDLINKS, + DIR_HARDLINKS +}; + +#define HFS_ALLOCATOR_SCAN_INFLIGHT 0x0001 /* scan started */ +#define HFS_ALLOCATOR_SCAN_COMPLETED 0x0002 /* initial scan was completed */ + +/* HFS mount point flags */ +#define HFS_READ_ONLY 0x00001 +#define HFS_UNKNOWN_PERMS 0x00002 +#define HFS_WRITEABLE_MEDIA 0x00004 +#define HFS_CLEANED_ORPHANS 0x00008 +#define HFS_X 0x00010 +#define HFS_CASE_SENSITIVE 0x00020 +//#define HFS_STANDARD 0x00040 +#define HFS_METADATA_ZONE 0x00080 +#define HFS_FRAGMENTED_FREESPACE 0x00100 +#define HFS_NEED_JNL_RESET 0x00200 +//#define HFS_HAS_SPARSE_DEVICE 0x00400 +#define HFS_RESIZE_IN_PROGRESS 0x00800 +#define HFS_QUOTAS 0x01000 +#define HFS_CREATING_BTREE 0x02000 +/* When set, do not update nextAllocation in the mount structure */ +#define HFS_SKIP_UPDATE_NEXT_ALLOCATION 0x04000 +/* When set, the file system supports extent-based extended attributes */ +#define HFS_XATTR_EXTENTS 0x08000 +#define HFS_FOLDERCOUNT 0x10000 +/* When set, the file system exists on a virtual device, like disk image */ +//#define HFS_VIRTUAL_DEVICE 0x20000 +/* When set, we're in hfs_changefs, so hfs_sync should do nothing. */ +#define HFS_IN_CHANGEFS 0x40000 +/* When set, we are in process of downgrading or have downgraded to read-only, + * so hfs_start_transaction should return EROFS. + */ +#define HFS_RDONLY_DOWNGRADE 0x80000 +#define HFS_DID_CONTIG_SCAN 0x100000 +#define HFS_UNMAP 0x200000 +//#define HFS_SSD 0x400000 +#define HFS_SUMMARY_TABLE 0x800000 +//#define HFS_CS 0x1000000 +//#define HFS_CS_METADATA_PIN 0x2000000 +#define HFS_FEATURE_BARRIER 0x8000000 /* device supports barrier-only flush */ +//#define HFS_CS_SWAPFILE_PIN 0x10000000 + +/* Macro to update next allocation block in the HFS mount structure. If + * the HFS_SKIP_UPDATE_NEXT_ALLOCATION is set, do not update + * nextAllocation block. + */ +#define HFS_UPDATE_NEXT_ALLOCATION(hfsmp, new_nextAllocation) \ +{ \ + if ((hfsmp->hfs_flags & HFS_SKIP_UPDATE_NEXT_ALLOCATION) == 0) \ + hfsmp->nextAllocation = new_nextAllocation; \ +} + +/* Macro for incrementing and decrementing the folder count in a cnode + * attribute only if the HFS_FOLDERCOUNT bit is set in the mount flags + * and kHFSHasFolderCount bit is set in the cnode flags. Currently these + * bits are only set for case sensitive HFS+ volumes. + */ +#define INC_FOLDERCOUNT(hfsmp, cattr) \ + if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) && \ + (cattr.ca_recflags & kHFSHasFolderCountMask)) \ + { \ + cattr.ca_dircount++; \ + } \ + +#define DEC_FOLDERCOUNT(hfsmp, cattr) \ + if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) && \ + (cattr.ca_recflags & kHFSHasFolderCountMask) && \ + (cattr.ca_dircount > 0)) \ + { \ + cattr.ca_dircount--; \ + } \ + +typedef struct filefork FCB; + +/* + * Macros for creating item names for our special/private directories. + */ +#define MAKE_INODE_NAME(name, size, linkno) \ +(void) snprintf((name), size, "%s%d", HFS_INODE_PREFIX, (linkno)) +#define HFS_INODE_PREFIX_LEN 5 + +#define MAKE_DIRINODE_NAME(name, size, linkno) \ +(void) snprintf((name), size, "%s%d", HFS_DIRINODE_PREFIX, (linkno)) +#define HFS_DIRINODE_PREFIX_LEN 4 + +#define MAKE_DELETED_NAME(NAME, size, FID) \ +(void) snprintf((NAME), size, "%s%d", HFS_DELETE_PREFIX, (FID)) +#define HFS_DELETE_PREFIX_LEN 4 + +enum { kHFSPlusMaxFileNameBytes = kHFSPlusMaxFileNameChars * 3 }; + + +/* macro to determine if hfs or hfsplus */ +#define ISHFSPLUS(VCB) ((VCB)->vcbSigWord == kHFSPlusSigWord) + +/* + * Various ways to acquire a VFS mount point pointer: + */ +#define VTOVFS(VP) (vp->sFSParams.vnfs_mp) +#define HFSTOVFS(HFSMP) ((HFSMP)->hfs_mp) +#define VCBTOVFS(VCB) (HFSTOVFS(VCB)) + +/* + * Various ways to acquire an HFS mount point pointer: + */ +#define VTOHFS(vp) ((struct hfsmount *)(vp->sFSParams.vnfs_mp->psHfsmount)) +#define VFSTOHFS(mp) ((struct hfsmount *)(mp->psHfsmount)) +#define VCBTOHFS(vcb) (vcb) +#define FCBTOHFS(fcb) ((struct hfsmount *)((vnode_mount((fcb)->ff_cp->c_vp))->psHfsmount)) + +/* + * Various ways to acquire a VCB (legacy) pointer: + */ +#define VTOVCB(VP) (VTOHFS(VP)) +#define VFSTOVCB(MP) (VFSTOHFS(MP)) +#define HFSTOVCB(HFSMP) (HFSMP) +#define FCBTOVCB(FCB) (FCBTOHFS(FCB)) + +#define E_NONE (0) +#define kHFSBlockSize (512) + +/* + * Macros for getting the MDB/VH sector and offset + */ +#define HFS_PRI_SECTOR(blksize) (1024 / (blksize)) +#define HFS_PRI_OFFSET(blksize) ((blksize) > 1024 ? 1024 : 0) + +#define HFS_ALT_SECTOR(blksize, blkcnt) (((blkcnt) - 1) - (512 / (blksize))) +#define HFS_ALT_OFFSET(blksize) ((blksize) > 1024 ? (blksize) - 1024 : 0) + +#define HFS_PHYSBLK_ROUNDDOWN(sector_num, log_per_phys) ((sector_num / log_per_phys) * log_per_phys) + +/* HFS System file locking */ +#define SFL_CATALOG 0x0001 +#define SFL_EXTENTS 0x0002 +#define SFL_BITMAP 0x0004 +#define SFL_ATTRIBUTE 0x0008 +#define SFL_STARTUP 0x0010 +#define SFL_VM_PRIV 0x0020 +#define SFL_VALIDMASK (SFL_CATALOG | SFL_EXTENTS | SFL_BITMAP | SFL_ATTRIBUTE | SFL_STARTUP | SFL_VM_PRIV) + +/* If a runtime corruption is detected, mark the volume inconsistent + * bit in the volume attributes. + */ +typedef enum { + HFS_INCONSISTENCY_DETECTED, + // Used when unable to rollback an operation that failed + HFS_ROLLBACK_FAILED, + // Used when the latter part of an operation failed, but we chose not to roll back + HFS_OP_INCOMPLETE, + // Used when someone told us to force an fsck on next mount + HFS_FSCK_FORCED, +} hfs_inconsistency_reason_t; + +#define HFS_ERESERVEDNAME (-8) + +typedef enum hfs_sync_mode { + HFS_FSYNC, + HFS_FSYNC_FULL, + HFS_FSYNC_BARRIER +} hfs_fsync_mode_t; + +typedef enum hfs_flush_mode { + HFS_FLUSH_JOURNAL, // Flush journal + HFS_FLUSH_JOURNAL_META, // Flush journal and metadata blocks + HFS_FLUSH_FULL, // Flush journal and does a cache flush + HFS_FLUSH_CACHE, // Flush track cache to media + HFS_FLUSH_BARRIER, // Barrier-only flush to ensure write order + HFS_FLUSH_JOURNAL_BARRIER // Flush journal with barrier +} hfs_flush_mode_t; + +/* Number of bits used to represent maximum extended attribute size */ +#define HFS_XATTR_SIZE_BITS 31 + +#define HFS_LINK_MAX 32767 + +typedef enum { + // Push all modifications to disk (including minor ones) + HFS_UPDATE_FORCE = 0x01, +} hfs_update_options_t; + +/* + * Maximum extended attribute size supported for all extended attributes except + * resource fork and finder info. + */ +#define HFS_XATTR_MAXSIZE INT32_MAX + +#if DEBUG + #define HFS_CRASH_TEST 1 +#else + #define HFS_CRASH_TEST 0 +#endif + +#if HFS_CRASH_TEST +typedef enum { + CRASH_ABORT_NONE, + CRASH_ABORT_MAKE_DIR, + CRASH_ABORT_JOURNAL_BEFORE_FINISH, // Crash driver before journal update starts + CRASH_ABORT_JOURNAL_AFTER_JOURNAL_DATA, // Crash driver after the journal data has been written but before the journal header has been updated + CRASH_ABORT_JOURNAL_AFTER_JOURNAL_HEADER, // Crash driver after the journal header has been updated but before blocks were written to destination + CRASH_ABORT_JOURNAL_IN_BLOCK_DATA, // Crash driver while writing data blocks + CRASH_ABORT_JOURNAL_AFTER_BLOCK_DATA, // Crash the driver after the data blocks were written + CRASH_ABORT_ON_UNMOUNT, // Crash on unmount + CRASH_ABORT_RANDOM, // Crach at random time (introduced by tester) + CRASH_ABORT_LAST +} CrashAbort_E; + +typedef int (*CrashAbortFunction_FP)(CrashAbort_E eAbort, int iFD, UVFSFileNode psNode, pthread_t pSyncerThread); + +extern CrashAbortFunction_FP gpsCrashAbortFunctionArray[]; + +#define CRASH_ABORT(CrashAbortCondition, psHfsmount, Vnode) \ + { \ + if (gpsCrashAbortFunctionArray[(CrashAbortCondition)]) { \ + \ + pthread_t pSyncerThread = 0; \ + if ( ((psHfsmount)->hfs_syncer_thread) && \ + ((psHfsmount)->hfs_syncer_thread != (void*)1) ) { \ + pSyncerThread = (psHfsmount)->hfs_syncer_thread; \ + } \ + gpsCrashAbortFunctionArray[(CrashAbortCondition)]( \ + (CrashAbortCondition), \ + (psHfsmount)->hfs_devvp->psFSRecord->iFD, \ + (Vnode), pSyncerThread ); \ + } \ + } + +#endif // HFS_CRASH_TEST + + +#endif /* lf_hfs_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_attrlist.c b/livefiles_hfs_plugin/lf_hfs_attrlist.c new file mode 100644 index 0000000..7f91fdf --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_attrlist.c @@ -0,0 +1,682 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_attrlist.c + * livefiles_hfs + * + * Created by Or Haimovich on 25/3/18. + */ + +#include "lf_hfs_attrlist.h" +#include "lf_hfs_locks.h" +#include "lf_hfs.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_vnops.h" +#include "lf_hfs_fileops_handler.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_chash.h" + +static void SetAttrIntoStruct(UVFSDirEntryAttr* psAttrEntry, struct cat_attr* pAttr, struct cat_desc* psDesc, struct hfsmount* psHfsm, struct cat_fork* pDataFork) +{ + psAttrEntry->dea_attrs.fa_validmask = VALID_OUT_ATTR_MASK; + + psAttrEntry->dea_attrs.fa_atime.tv_sec = pAttr->ca_atime; + psAttrEntry->dea_attrs.fa_ctime.tv_sec = pAttr->ca_ctime; + psAttrEntry->dea_attrs.fa_mtime.tv_sec = pAttr->ca_mtime; + psAttrEntry->dea_attrs.fa_birthtime.tv_sec = pAttr->ca_btime; + psAttrEntry->dea_attrs.fa_atime.tv_nsec = psAttrEntry->dea_attrs.fa_mtime.tv_nsec = psAttrEntry->dea_attrs.fa_ctime.tv_nsec = psAttrEntry->dea_attrs.fa_birthtime.tv_nsec = 0; + psAttrEntry->dea_attrs.fa_fileid = psDesc->cd_cnid; + psAttrEntry->dea_attrs.fa_parentid = psDesc->cd_parentcnid; + psAttrEntry->dea_attrs.fa_mode = pAttr->ca_mode & ALL_UVFS_MODES; + psAttrEntry->dea_attrs.fa_bsd_flags = pAttr->ca_bsdflags; + + if (VTTOUVFS(IFTOVT(pAttr->ca_mode)) == UVFS_FA_TYPE_DIR) + { + //If its a directory need to add . and .. to the direntries count + psAttrEntry->dea_attrs.fa_nlink = 2 + pAttr->ca_entries; + + //If this is the root folder need to hide the journal files */ + if (psHfsm->jnl && psDesc->cd_cnid == kHFSRootFolderID) + { + psAttrEntry->dea_attrs.fa_nlink -= 2; + } + psAttrEntry->dea_attrs.fa_size = (pAttr->ca_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE ; + } else { + psAttrEntry->dea_attrs.fa_nlink = pAttr->ca_linkcount; + psAttrEntry->dea_attrs.fa_size = pDataFork->cf_size; + } + + psAttrEntry->dea_attrs.fa_allocsize = pDataFork->cf_blocks * HFSTOVCB(psHfsm)->blockSize; + psAttrEntry->dea_attrs.fa_type = VTTOUVFS(IFTOVT(pAttr->ca_mode)); + psAttrEntry->dea_attrs.fa_gid = pAttr->ca_gid; + psAttrEntry->dea_attrs.fa_uid = pAttr->ca_uid ; +} + + +static void AddNewAttrEntry(ReadDirBuff_s* psReadDirBuffer, struct hfsmount* psHfsm, struct cat_desc* psDesc, struct cat_attr* pAttr, struct cat_fork* pDataFork, uint32_t* puUioSize, int iIndex, UVFSDirEntryAttr** ppsPrevAttrEntry) +{ + UVFSDirEntryAttr* psAttrEntry = (UVFSDirEntryAttr*) (psReadDirBuffer->pvBuffer + READDIR_BUF_OFFSET(psReadDirBuffer)); + + SetAttrIntoStruct(psAttrEntry, pAttr, psDesc, psHfsm, pDataFork); + + psAttrEntry->dea_namelen = psDesc->cd_namelen; + psAttrEntry->dea_nextrec = _UVFS_DIRENTRYATTR_RECLEN(UVFS_DIRENTRYATTR_NAMEOFF, psDesc->cd_namelen); + psAttrEntry->dea_spare0 = 0; + psAttrEntry->dea_nameoff = UVFS_DIRENTRYATTR_NAMEOFF; + const u_int8_t * name = psDesc->cd_nameptr; + memcpy( UVFS_DIRENTRYATTR_NAMEPTR(psAttrEntry), name, psDesc->cd_namelen ); + UVFS_DIRENTRYATTR_NAMEPTR(psAttrEntry)[psAttrEntry->dea_namelen] = 0; + + *puUioSize = psAttrEntry->dea_nextrec; + + //Update prevEntry with cookie + if (*ppsPrevAttrEntry != NULL) + { + (*ppsPrevAttrEntry)->dea_nextcookie = iIndex | ((u_int64_t)psDesc->cd_cnid << 32); + } + + *ppsPrevAttrEntry = psAttrEntry; + psReadDirBuffer->uBufferResid -= *puUioSize; + + return; +} + +static bool CompareTimes(const struct timespec* psTimeA, const struct timespec* psTimeB) +{ + //Returns true if a happened at or after b. + if (psTimeA->tv_sec == psTimeB->tv_sec) + return psTimeA->tv_nsec == psTimeB->tv_nsec; + else + return psTimeA->tv_sec > psTimeB->tv_sec; +} + +static bool DirScanIsMatch(ScanDirRequest_s* psScanDirRequest, struct cat_desc* psDesc, struct cat_attr* pAttr, struct hfsmount* psHfsm, struct cat_fork* pDataFork) +{ + bool bIsMatch = true; + UVFSDirEntryAttr* psDirEntry = psScanDirRequest->psMatchingResult->smr_entry; + const char* pcName = (const char *) psDesc->cd_nameptr; + + if (pcName && ((psDesc->cd_namelen == (sizeof(HFSPLUSMETADATAFOLDER) - 1) && + memcmp(pcName, HFSPLUSMETADATAFOLDER, sizeof(HFSPLUSMETADATAFOLDER))) || + (psDesc->cd_namelen == (sizeof(HFSPLUS_DIR_METADATA_FOLDER) - 1) && + memcmp(pcName, HFSPLUS_DIR_METADATA_FOLDER, sizeof(HFSPLUS_DIR_METADATA_FOLDER))))) + { + // Skip over special dirs + return false; + } else if (pcName == NULL) + { + //XXXab: Should not happen anymore + LFHFS_LOG(LEVEL_ERROR, "got NULL name during scandir: %#x!", psDesc->cd_cnid); + return false; + } + + bool bAllowHiddenFiles = (psScanDirRequest->psMatchingCriteria->smr_attribute_filter->fa_validmask & LI_FA_VALID_BSD_FLAGS) && + (psScanDirRequest->psMatchingCriteria->smr_attribute_filter->fa_bsd_flags & UF_HIDDEN); + + + // filter out hidden files + if (bAllowHiddenFiles == false) { + // Filter out files with BSD UF_HIDDEN flag set or filenames that begins with a dot + if ( (pAttr->ca_flags & UF_HIDDEN) || (pcName[0] == '.') ) { + return false; + } + // Filter out directories and files with kFinderInvisibleMask flag set + if ( S_ISDIR(pAttr->ca_mode)) { + if (pAttr->ca_finderdirinfo.frFlags & OSSwapHostToBigConstInt16(kFinderInvisibleMask)) { + return false; + } + } else { // file + if (pAttr->ca_finderfileinfo.fdFlags & OSSwapHostToBigConstInt16(kFinderInvisibleMask)) { + return false; + } + } + } + + // If need to verify name contains + if (psScanDirRequest->psMatchingCriteria->smr_filename_contains != NULL) + { + //For each name in smr_filename_contains + bool bAtLeastOneNameContainsMatched = false; + char** ppcNameContainsStr = psScanDirRequest->psMatchingCriteria->smr_filename_contains; + while ( (*ppcNameContainsStr) && (strlen(*ppcNameContainsStr) != 0) && !bAtLeastOneNameContainsMatched) + { + uint64_t uNameContainsLength = strlen(*ppcNameContainsStr); + if (uNameContainsLength <= strlen(pcName)) + { + if(!hfs_strstr((const u_int8_t*) pcName, strlen(pcName), (const u_int8_t*) *ppcNameContainsStr, uNameContainsLength)) + { + bAtLeastOneNameContainsMatched |= true; + } + } + ppcNameContainsStr++; + } + bIsMatch = bAtLeastOneNameContainsMatched; + } + + if (!bIsMatch) goto check_if_directory; + + // If need to verify name appendix + if (psScanDirRequest->psMatchingCriteria->smr_filename_ends_with != NULL) + { + //For each name in smr_filename_contains + bool bAtLeastOneNameEndWithMatched = false; + char** ppcNameEndsWithStr = psScanDirRequest->psMatchingCriteria->smr_filename_ends_with; + while ( (*ppcNameEndsWithStr) && (strlen(*ppcNameEndsWithStr) != 0) && !bAtLeastOneNameEndWithMatched) + { + uint64_t uNameEndsWithLength = strlen(*ppcNameEndsWithStr); + if (uNameEndsWithLength <= strlen(pcName)) + { + if ( !hfs_apendixcmp((const u_int8_t*) pcName, strlen(pcName),(const u_int8_t*) *ppcNameEndsWithStr, uNameEndsWithLength) ) + { + bAtLeastOneNameEndWithMatched |= true; + } + } + ppcNameEndsWithStr++; + } + bIsMatch = bAtLeastOneNameEndWithMatched; + } + + if (!bIsMatch) goto check_if_directory; + + //If need to validate any other param + if (psScanDirRequest->psMatchingCriteria->smr_attribute_filter->fa_validmask != 0) + { + // If need to verify the file type + if (psScanDirRequest->psMatchingCriteria->smr_attribute_filter->fa_validmask & UVFS_FA_VALID_TYPE) + { + uint32_t uEntryType = VTTOUVFS(IFTOVT(pAttr->ca_mode)); + if ((psScanDirRequest->psMatchingCriteria->smr_attribute_filter->fa_type & uEntryType) != uEntryType) + { + bIsMatch = false; + } + } + + if (!bIsMatch) goto check_if_directory; + + // If need to verify the file mTime + if (psScanDirRequest->psMatchingCriteria->smr_attribute_filter->fa_validmask & UVFS_FA_VALID_MTIME) + { + //Comapre if the Mtime of the found entry is after the search Mtime + struct timespec mTime = {0}; + mTime.tv_sec = pAttr->ca_mtime; + if (!CompareTimes(&mTime, &psScanDirRequest->psMatchingCriteria->smr_attribute_filter->fa_mtime)) + { + bIsMatch = false; + } + } + } + + if (bIsMatch) + { + psScanDirRequest->psMatchingResult->smr_result_type = SEARCH_RESULT_MATCH; + } + +check_if_directory: + //In case that one of the requested creteria wasn't fullfiled we need to check if this is a folder and return + if (VTTOUVFS(IFTOVT(pAttr->ca_mode)) == UVFS_FA_TYPE_DIR) + { + psScanDirRequest->psMatchingResult->smr_result_type |= SEARCH_RESULT_PUSH; + bIsMatch = true; + } + + if (bIsMatch) + { + UVFSDirEntryAttr* psAttrEntry = psScanDirRequest->psMatchingResult->smr_entry; + SetAttrIntoStruct(psAttrEntry, pAttr, psDesc, psHfsm, pDataFork); + + psDirEntry->dea_namelen = strlen( pcName ); + psDirEntry->dea_spare0 = 0; + psDirEntry->dea_nameoff = UVFS_DIRENTRYATTR_NAMEOFF; + memcpy( UVFS_DIRENTRYATTR_NAMEPTR(psDirEntry), pcName, psDesc->cd_namelen); + UVFS_DIRENTRYATTR_NAMEPTR(psDirEntry)[psDirEntry->dea_namelen] = 0; + } + return bIsMatch; +} + +int +hfs_scandir(struct vnode *dvp, ScanDirRequest_s* psScanDirRequest) +{ + int error = 0; + struct cat_entrylist *ce_list = NULL; + struct cnode* dcp = VTOC(dvp); + struct hfsmount* hfsmp = VTOHFS(dvp); + uint64_t uCookie = psScanDirRequest->psMatchingCriteria->smr_start_cookie; + int reachedeof = 0; + + /* + * Take an exclusive directory lock since we manipulate the directory hints + */ + if ((error = hfs_lock(dcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) + { + return (error); + } + + /* Initialize a catalog entry list - allocating 2 for eof reference. */ + ce_list = hfs_mallocz(CE_LIST_SIZE(2)); + ce_list->maxentries = 2; + + bool bContinueIterating = true; + while (bContinueIterating) + { + /* + * Populate the ce_list from the catalog file. + */ + int lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + + /* Extract directory index and tag (sequence number) from uio_offset */ + int index = uCookie & HFS_INDEX_MASK; + unsigned int tag = (unsigned int) uCookie & ~HFS_INDEX_MASK; + + /* Get a detached directory hint (cnode must be locked exclusive) */ + directoryhint_t* dirhint = hfs_getdirhint(dcp, ((index - 1) & HFS_INDEX_MASK) | tag, TRUE); + + /* Hide tag from catalog layer. */ + dirhint->dh_index &= HFS_INDEX_MASK; + if (dirhint->dh_index == HFS_INDEX_MASK) + { + dirhint->dh_index = -1; + } + + error = cat_getentriesattr(hfsmp, dirhint, ce_list, &reachedeof); + /* Don't forget to release the descriptors later! */ + + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error == ENOENT) + { + //In case of an empty directory need to set EOF and go to exit + //We can get ENOENT with partial results so check if really no file was found + error = 0; + if (ce_list->realentries == 0) + { + psScanDirRequest->psMatchingResult->smr_entry->dea_nextcookie = UVFS_DIRCOOKIE_EOF; + hfs_reldirhint(dcp, dirhint); + goto exit; + } + } + else if (error) + { + hfs_reldirhint(dcp, dirhint); + goto exit; + } + + dcp->c_touch_acctime = true; + + /* + * Check for a FS corruption in the valence. We're holding the cnode lock + * exclusive since we need to serialize the directory hints, so if we found + * that the valence reported 0, but we actually found some items here, then + * silently minimally self-heal and bump the valence to 1. + */ + if ((dcp->c_entries == 0) && (ce_list->realentries > 0)) + { + dcp->c_entries++; + dcp->c_flag |= C_MODIFIED; + LFHFS_LOG(LEVEL_DEBUG, "hfs_scandir: repairing valence to non-zero! \n"); + + /* force an update on dcp while we're still holding the lock. */ + hfs_update(dvp, 0); + } + + struct cnode *cp = NULL; + struct cat_desc* psDesc = &ce_list->entry[0].ce_desc; + struct cat_attr* psAttr = &ce_list->entry[0].ce_attr; + struct hfsmount* psHfsmp = VTOHFS(dvp); + struct cat_fork sDataFork; + + bzero(&sDataFork, sizeof(sDataFork)); + sDataFork.cf_size = ce_list->entry[0].ce_datasize; + sDataFork.cf_blocks = ce_list->entry[0].ce_datablks; + + struct vnode *vp = hfs_chash_getvnode(hfsmp, psAttr->ca_fileid, false, false, false); + + if (vp != NULL) + { + cp = VTOC(vp); + /* Only use cnode's decriptor for non-hardlinks */ + if (!(cp->c_flag & C_HARDLINK) && cp->c_desc.cd_nameptr != NULL) + psDesc = &cp->c_desc; + psAttr = &cp->c_attr; + if (cp->c_datafork) + { + sDataFork.cf_size = cp->c_datafork->ff_size; + sDataFork.cf_blocks = cp->c_datafork->ff_blocks; + } + } + + bool bIsAMatch = DirScanIsMatch(psScanDirRequest, psDesc, psAttr, psHfsmp, &sDataFork); + + if (vp != NULL) + { + /* All done with cnode. */ + hfs_unlock(cp); + cp = NULL; + + hfs_vnop_reclaim(vp); + } + + /* If we skipped catalog entries for reserved files that should + * not be listed in namespace, update the index accordingly. + */ + index++; + if (ce_list->skipentries) + { + index += ce_list->skipentries; + ce_list->skipentries = 0; + } + + uCookie = reachedeof ? UVFS_DIRCOOKIE_EOF : (index | ((u_int64_t)ce_list->entry[1].ce_desc.cd_cnid << 32)); + + if (bIsAMatch) + { + bContinueIterating = false; + psScanDirRequest->psMatchingResult->smr_entry->dea_nextcookie = uCookie; + psScanDirRequest->psMatchingResult->smr_entry->dea_nextrec = 0; + } + + if (reachedeof) + hfs_reldirhint(dcp, dirhint); + else + hfs_insertdirhint(dcp, dirhint); + + // **** check if can move to exit ******* + /* All done with the catalog descriptors. */ + for (uint32_t i =0; i < ce_list->realentries; i++) + { + cat_releasedesc(&ce_list->entry[i].ce_desc); + } + ce_list->realentries = 0; + } + +exit: + //Drop the directory lock + hfs_unlock(dcp); + dcp = NULL; + + if (ce_list) + { + for (int i = 0; i < (int)ce_list->realentries; ++i) + { + cat_releasedesc(&ce_list->entry[i].ce_desc); + } + hfs_free(ce_list); + } + + return (error); +} + +/* + * Common function for both hfs_vnop_readdirattr and hfs_vnop_getattrlistbulk. + * This either fills in a vnode_attr structure or fills in an attrbute buffer + * Currently the difference in behaviour required for the two vnops is keyed + * on whether the passed in vnode_attr pointer is null or not. If the pointer + * is null we fill in buffer passed and if it is not null we fill in the fields + * of the vnode_attr structure. + */ +int +hfs_readdirattr_internal(struct vnode *dvp, ReadDirBuff_s* psReadDirBuffer, int maxcount, uint32_t *newstate, int *eofflag, int *actualcount, uint64_t uCookie) +{ + int error = 0; + struct cat_desc *lastdescp = NULL; + struct cat_entrylist *ce_list = NULL; + UVFSDirEntryAttr* psPrevAttrEntry = NULL; + + int reachedeof = 0; + *(actualcount) = *(eofflag) = 0; + + /* + * Take an exclusive directory lock since we manipulate the directory hints + */ + if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) + { + return (error); + } + struct cnode* dcp = VTOC(dvp); + struct hfsmount* hfsmp = VTOHFS(dvp); + + u_int32_t dirchg = dcp->c_dirchangecnt; + + /* Extract directory index and tag (sequence number) from uio_offset */ + int index = uCookie & HFS_INDEX_MASK; + unsigned int tag = (unsigned int) uCookie & ~HFS_INDEX_MASK; + + /* Get a detached directory hint (cnode must be locked exclusive) */ + directoryhint_t* dirhint = hfs_getdirhint(dcp, ((index - 1) & HFS_INDEX_MASK) | tag, TRUE); + + /* Hide tag from catalog layer. */ + dirhint->dh_index &= HFS_INDEX_MASK; + if (dirhint->dh_index == HFS_INDEX_MASK) + { + dirhint->dh_index = -1; + } + + /* + * Obtain a list of catalog entries and pack their attributes until + * the output buffer is full or maxcount entries have been packed. + */ + if (maxcount < 1) + { + error = EINVAL; + goto exit2; + } + + /* Initialize a catalog entry list. */ + ce_list = hfs_mallocz(CE_LIST_SIZE(maxcount)); + ce_list->maxentries = maxcount; + + /* + * Populate the ce_list from the catalog file. + */ + int lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + + error = cat_getentriesattr(hfsmp, dirhint, ce_list, &reachedeof); + /* Don't forget to release the descriptors later! */ + + hfs_systemfile_unlock(hfsmp, lockflags); + + if ((error == ENOENT) || (reachedeof != 0)) + { + *(eofflag) = true; + error = 0; + } + if (error) + { + goto exit1; + } + + dcp->c_touch_acctime = true; + + /* + * Check for a FS corruption in the valence. We're holding the cnode lock + * exclusive since we need to serialize the directory hints, so if we found + * that the valence reported 0, but we actually found some items here, then + * silently minimally self-heal and bump the valence to 1. + */ + if ((dcp->c_entries == 0) && (ce_list->realentries > 0)) + { + dcp->c_entries++; + dcp->c_flag |= C_MODIFIED; + LFHFS_LOG(LEVEL_DEBUG, "hfs_readdirattr_internal: repairing valence to non-zero! \n"); + + /* force an update on dcp while we're still holding the lock. */ + hfs_update(dvp, 0); + } + + /* + * Drop the directory lock + */ + hfs_unlock(dcp); + dcp = NULL; + + /* Process the catalog entries. */ + for (int i = 0; i < (int)ce_list->realentries; ++i) + { + struct cnode *cp = NULL; + struct vnode *vp = NULL; + struct cat_desc * cdescp; + struct cat_attr * cattrp; + struct cat_fork c_datafork; + struct cat_fork c_rsrcfork; + + bzero(&c_datafork, sizeof(c_datafork)); + bzero(&c_rsrcfork, sizeof(c_rsrcfork)); + cdescp = &ce_list->entry[i].ce_desc; + cattrp = &ce_list->entry[i].ce_attr; + c_datafork.cf_size = ce_list->entry[i].ce_datasize; + c_datafork.cf_blocks = ce_list->entry[i].ce_datablks; + c_rsrcfork.cf_size = ce_list->entry[i].ce_rsrcsize; + c_rsrcfork.cf_blocks = ce_list->entry[i].ce_rsrcblks; + + vp = hfs_chash_getvnode(hfsmp, cattrp->ca_fileid, false, false, false); + + if (vp != NULL) + { + cp = VTOC(vp); + /* Only use cnode's decriptor for non-hardlinks */ + if (!(cp->c_flag & C_HARDLINK)) + cdescp = &cp->c_desc; + cattrp = &cp->c_attr; + if (cp->c_datafork) + { + c_datafork.cf_size = cp->c_datafork->ff_size; + c_datafork.cf_blocks = cp->c_datafork->ff_blocks; + } + if (cp->c_rsrcfork) + { + c_rsrcfork.cf_size = cp->c_rsrcfork->ff_size; + c_rsrcfork.cf_blocks = cp->c_rsrcfork->ff_blocks; + } + /* All done with cnode. */ + hfs_unlock(cp); + cp = NULL; + + hfs_vnop_reclaim(vp); + } + + u_int32_t currattrbufsize; + AddNewAttrEntry(psReadDirBuffer, hfsmp, cdescp, cattrp, &c_datafork, &currattrbufsize, index, &psPrevAttrEntry); + //Check if there was enough space to add the new entry + if (currattrbufsize == 0) + { + break; + } + + /* Save the last valid catalog entry */ + lastdescp = &ce_list->entry[i].ce_desc; + index++; + *actualcount += 1; + + /* Termination checks */ + if ((--maxcount <= 0) || (psReadDirBuffer->uBufferResid < _UVFS_DIRENTRYATTR_RECLEN(UVFS_DIRENTRYATTR_NAMEOFF, 128))) + { + break; + } + + } /* for each catalog entry */ + + /* + * If we couldn't fit all the entries requested in the user's buffer, + * it's not EOF. + */ + if (*eofflag && (*actualcount < (int)ce_list->realentries)) + *eofflag = 0; + + /* If we skipped catalog entries for reserved files that should + * not be listed in namespace, update the index accordingly. + */ + if (ce_list->skipentries) + { + index += ce_list->skipentries; + ce_list->skipentries = 0; + } + + /* + * If there are more entries then save the last name. + * Key this behavior based on whether or not we observed EOFFLAG. + * + * Do not use the valence as a way to determine if we hit EOF, since + * it can be wrong. Use the catalog's output only. + */ + if ((*(eofflag) == 0) && (lastdescp != NULL)) + { + /* Remember last entry */ + if ((dirhint->dh_desc.cd_flags & CD_HASBUF) && (dirhint->dh_desc.cd_nameptr != NULL)) + { + dirhint->dh_desc.cd_flags &= ~CD_HASBUF; + if (dirhint->dh_desc.cd_nameptr != NULL) + hfs_free((void *) dirhint->dh_desc.cd_nameptr); + dirhint->dh_desc.cd_nameptr = NULL; + } + if (lastdescp->cd_nameptr != NULL) + { + dirhint->dh_desc.cd_namelen = lastdescp->cd_namelen; + dirhint->dh_desc.cd_nameptr = hfs_malloc(sizeof(char)*lastdescp->cd_namelen); + if (dirhint->dh_desc.cd_nameptr == NULL) + { + error = ENOMEM; + goto exit2; + } + memcpy((void *) dirhint->dh_desc.cd_nameptr,(void *) lastdescp->cd_nameptr,lastdescp->cd_namelen); + dirhint->dh_desc.cd_flags |= CD_HASBUF; + } + else + { + dirhint->dh_desc.cd_namelen = 0; + dirhint->dh_desc.cd_nameptr = NULL; + } + dirhint->dh_index = index - 1; + dirhint->dh_desc.cd_cnid = lastdescp->cd_cnid; + dirhint->dh_desc.cd_hint = lastdescp->cd_hint; + dirhint->dh_desc.cd_encoding = lastdescp->cd_encoding; + } + + /* All done with the catalog descriptors. */ + for (int i = 0; i < (int)ce_list->realentries; ++i) + { + cat_releasedesc(&ce_list->entry[i].ce_desc); + } + ce_list->realentries = 0; + + (void) hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS); + dcp = VTOC(dvp); + +exit1: + /* Pack directory index and tag into uio_offset. */ + while (tag == 0){ + tag = (++dcp->c_dirhinttag) << HFS_INDEX_BITS; + } + + if (psPrevAttrEntry != NULL) + { + //Need to update next cookie in the last entry + psPrevAttrEntry->dea_nextcookie = *eofflag ? UVFS_DIRCOOKIE_EOF : index | tag; + // Last entry in the buffer should always have dea_nextrec = 0 + psPrevAttrEntry->dea_nextrec = 0; + } + dirhint->dh_index |= tag; + +exit2: + if (newstate) + *newstate = dirchg; + + /* + * Drop directory hint on error or if there are no more entries, + * only if EOF was seen. + */ + if (dirhint) + { + if ((error != 0) || *(eofflag)) + { + hfs_reldirhint(dcp, dirhint); + } + else + { + hfs_insertdirhint(dcp, dirhint); + } + } + + if (ce_list) + hfs_free(ce_list); + + hfs_unlock(dcp); + return (error); +} diff --git a/livefiles_hfs_plugin/lf_hfs_attrlist.h b/livefiles_hfs_plugin/lf_hfs_attrlist.h new file mode 100644 index 0000000..d997c7a --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_attrlist.h @@ -0,0 +1,61 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_attrlist.h + * livefiles_hfs + * + * Created by Or Haimovich on 25/3/18. + */ + +#ifndef lf_hfs_attrlist_h +#define lf_hfs_attrlist_h + +#include + +#include "lf_hfs_catalog.h" +#include "lf_hfs_vnode.h" +/* + * The following define the attributes that HFS supports: + */ + +typedef struct +{ + scandir_matching_request_t* psMatchingCriteria; + scandir_matching_reply_t* psMatchingResult; +} ScanDirRequest_s; + +#define HFS_ATTR_CMN_VALID (ATTR_CMN_NAME | ATTR_CMN_DEVID | \ + ATTR_CMN_FSID | ATTR_CMN_OBJTYPE | \ + ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | \ + ATTR_CMN_OBJPERMANENTID | ATTR_CMN_PAROBJID | \ + ATTR_CMN_SCRIPT | ATTR_CMN_CRTIME | \ + ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | \ + ATTR_CMN_ACCTIME | ATTR_CMN_BKUPTIME | \ + ATTR_CMN_FNDRINFO |ATTR_CMN_OWNERID | \ + ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | \ + ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS | \ + ATTR_CMN_FILEID | ATTR_CMN_PARENTID ) + +#define HFS_ATTR_CMN_SEARCH_VALID (ATTR_CMN_NAME | ATTR_CMN_OBJID | \ + ATTR_CMN_PAROBJID | ATTR_CMN_CRTIME | \ + ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | \ + ATTR_CMN_ACCTIME | ATTR_CMN_BKUPTIME | \ + ATTR_CMN_FNDRINFO | ATTR_CMN_OWNERID | \ + ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | \ + ATTR_CMN_FILEID | ATTR_CMN_PARENTID ) + +#define HFS_ATTR_DIR_VALID (ATTR_DIR_LINKCOUNT | ATTR_DIR_ENTRYCOUNT | ATTR_DIR_MOUNTSTATUS) + +#define HFS_ATTR_DIR_SEARCH_VALID (ATTR_DIR_ENTRYCOUNT) + +#define HFS_ATTR_FILE_VALID (ATTR_FILE_LINKCOUNT |ATTR_FILE_TOTALSIZE | \ + ATTR_FILE_ALLOCSIZE | ATTR_FILE_IOBLOCKSIZE | \ + ATTR_FILE_CLUMPSIZE | ATTR_FILE_DEVTYPE | \ + ATTR_FILE_DATALENGTH | ATTR_FILE_DATAALLOCSIZE | \ + ATTR_FILE_RSRCLENGTH | ATTR_FILE_RSRCALLOCSIZE) + +#define HFS_ATTR_FILE_SEARCH_VALID (ATTR_FILE_DATALENGTH | ATTR_FILE_DATAALLOCSIZE | \ + ATTR_FILE_RSRCLENGTH | ATTR_FILE_RSRCALLOCSIZE ) + +int hfs_readdirattr_internal(struct vnode *dvp, ReadDirBuff_s* psReadDirBuffer, int maxcount, uint32_t *newstate, int *eofflag, int *actualcount, uint64_t uCookie); +int hfs_scandir(struct vnode *dvp, ScanDirRequest_s* psScanDirRequest); +#endif /* lf_hfs_attrlist_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_btree.c b/livefiles_hfs_plugin/lf_hfs_btree.c new file mode 100644 index 0000000..dea9394 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btree.c @@ -0,0 +1,1952 @@ + + +#include "lf_hfs.h" +#include "lf_hfs_defs.h" +#include "lf_hfs_file_mgr_internal.h" +#include "lf_hfs_btrees_private.h" +#include "lf_hfs_btrees_internal.h" +#include "lf_hfs_btrees_io.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_file_extent_mapping.h" +#include "lf_hfs_utils.h" + +//////////////////////////////////// Globals //////////////////////////////////// + + +/////////////////////////// BTree Module Entry Points /////////////////////////// + + + +/*------------------------------------------------------------------------------- + Routine: BTOpenPath - Open a file for access as a B*Tree. + + Function: Create BTree control block for a file, if necessary. Validates the + file to be sure it looks like a BTree file. + + + Input: filePtr - pointer to file to open as a B-tree + keyCompareProc - pointer to client's KeyCompare function + + Result: noErr - success + paramErr - required ptr was nil + fsBTInvalidFileErr - + memFullErr - + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus BTOpenPath(FCB *filePtr, KeyCompareProcPtr keyCompareProc) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + BTHeaderRec *header; + NodeRec nodeRec; + + ////////////////////// Preliminary Error Checking /////////////////////////// + + if ( filePtr == nil ) + { + return paramErr; + } + + /* + * Subsequent opens allow key compare proc to be changed. + */ + if ( filePtr->fcbBTCBPtr != nil && keyCompareProc != nil) { + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + btreePtr->keyCompareProc = keyCompareProc; + return noErr; + } + + if ( filePtr->fcbEOF < kMinNodeSize ) + return fsBTInvalidFileErr; + + + //////////////////////// Allocate Control Block ///////////////////////////// + + btreePtr = hfs_mallocz(sizeof(BTreeControlBlock)); + + btreePtr->getBlockProc = GetBTreeBlock; + btreePtr->releaseBlockProc = ReleaseBTreeBlock; + btreePtr->setEndOfForkProc = ExtendBTreeFile; + btreePtr->keyCompareProc = keyCompareProc; + + /////////////////////////// Read Header Node //////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + btreePtr->fileRefNum = GetFileRefNumFromFCB(filePtr); + filePtr->fcbBTCBPtr = (Ptr) btreePtr; // attach btree cb to file + + /* Prefer doing I/O a physical block at a time */ + nodeRec.blockSize = VTOHFS(btreePtr->fileRefNum)->hfs_physical_block_size; + + /* Start with the allocation block size for regular files. */ + if (FTOC(filePtr)->c_fileid >= kHFSFirstUserCatalogNodeID) + { + nodeRec.blockSize = FCBTOVCB(filePtr)->blockSize; + } + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + // it is now safe to call M_ExitOnError (err) + + err = SetBTreeBlockSize (btreePtr->fileRefNum, nodeRec.blockSize, 1); + M_ExitOnError (err); + + + err = GetBTreeBlock(btreePtr->fileRefNum, + kHeaderNodeNum, + kGetBlock, + &nodeRec ); + if (err != noErr) + { + nodeRec.buffer = nil; + nodeRec.blockHeader = nil; + + LFHFS_LOG(LEVEL_ERROR, "BTOpen: getNodeProc returned error getting header node."); + goto ErrorExit; + } + ++btreePtr->numGetNodes; + header = (BTHeaderRec*) ((uintptr_t)nodeRec.buffer + sizeof(BTNodeDescriptor)); + + + ///////////////////////////// verify header ///////////////////////////////// + + err = VerifyHeader (filePtr, header); + M_ExitOnError (err); + + + ///////////////////// Initalize fields from header ////////////////////////// + + if ( (FCBTOVCB(filePtr)->vcbSigWord != 0x4244) && (header->nodeSize == 512)) + { + LFHFS_LOG(LEVEL_ERROR, "BTOpenPath: wrong node size for HFS+ volume!"); + hfs_assert(0); + } + + btreePtr->treeDepth = header->treeDepth; + btreePtr->rootNode = header->rootNode; + btreePtr->leafRecords = header->leafRecords; + btreePtr->firstLeafNode = header->firstLeafNode; + btreePtr->lastLeafNode = header->lastLeafNode; + btreePtr->nodeSize = header->nodeSize; + btreePtr->maxKeyLength = header->maxKeyLength; + btreePtr->totalNodes = header->totalNodes; + btreePtr->freeNodes = header->freeNodes; + if (FTOC(filePtr)->c_fileid >= kHFSFirstUserCatalogNodeID) + filePtr->ff_clumpsize = header->clumpSize; + btreePtr->btreeType = header->btreeType; + + btreePtr->keyCompareType = header->keyCompareType; + + btreePtr->attributes = header->attributes; + + if ( btreePtr->maxKeyLength > 40 ) + btreePtr->attributes |= (kBTBigKeysMask + kBTVariableIndexKeysMask); //we need a way to save these attributes + + /////////////////////// Initialize dynamic fields /////////////////////////// + + btreePtr->version = kBTreeVersion; + btreePtr->flags = 0; + btreePtr->writeCount = 1; + + /////////////////////////// Check Header Node /////////////////////////////// + + // set kBadClose attribute bit, and UpdateNode + + /* b-tree node size must be at least as big as the logical block size */ + if (btreePtr->nodeSize < VTOHFS(btreePtr->fileRefNum)->hfs_logical_block_size) + { + /* + * If this tree has any records or the media is writeable then + * we cannot mount using the current physical block size. + */ + if (btreePtr->leafRecords > 0 || + VTOHFS(btreePtr->fileRefNum)->hfs_flags & HFS_WRITEABLE_MEDIA) + { + err = fsBTBadNodeSize; + goto ErrorExit; + } + } + + /* + * If the actual node size is different than the amount we read, + * then release and trash this block, and re-read with the correct + * node size. + */ + if ( btreePtr->nodeSize != nodeRec.blockSize ) + { + err = SetBTreeBlockSize (btreePtr->fileRefNum, btreePtr->nodeSize, 32); + M_ExitOnError (err); + + /* + * Need to use kTrashBlock option to force the + * buffer cache to read the entire node + */ + err = ReleaseBTreeBlock(btreePtr->fileRefNum, &nodeRec, kTrashBlock); + ++btreePtr->numReleaseNodes; + M_ExitOnError (err); + + err = GetNode (btreePtr, kHeaderNodeNum, 0, &nodeRec ); + M_ExitOnError (err); + } + + //total nodes * node size <= LEOF? + + + err = ReleaseNode (btreePtr, &nodeRec); + M_ExitOnError (err); + + /* + * Under Mac OS, b-tree nodes can be non-contiguous on disk when the + * allocation block size is smaller than the b-tree node size. + * + * If journaling is turned on for this volume we can't deal with this + * situation and so we bail out. If journaling isn't on it's ok as + * hfs_strategy_fragmented() deals with it. Journaling can't support + * this because it assumes that if you give it a block that it's + * contiguous on disk. + */ + if ( FCBTOHFS(filePtr)->jnl && !NodesAreContiguous(FCBTOVCB(filePtr), filePtr, btreePtr->nodeSize) ) { + return fsBTInvalidNodeErr; + } + + //////////////////////////////// Success //////////////////////////////////// + + //align LEOF to multiple of node size? - just on close + + return noErr; + + + /////////////////////// Error - Clean up and Exit /////////////////////////// + +ErrorExit: + + filePtr->fcbBTCBPtr = nil; + (void) ReleaseNode (btreePtr, &nodeRec); + hfs_free(btreePtr); + + return err; +} + + + +/*------------------------------------------------------------------------------- + Routine: BTClosePath - Flush BTree Header and Deallocate Memory for BTree. + + Function: Flush the BTreeControlBlock fields to header node, and delete BTree control + block and key descriptor associated with the file if filePtr is last + path of type kBTreeType ('btre'). + + + Input: filePtr - pointer to file to delete BTree control block for. + + Result: noErr - success + fsBTInvalidFileErr - + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus BTClosePath (FCB *filePtr) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + if (btreePtr == nil) + return fsBTInvalidFileErr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + ////////////////////// Check for other BTree Paths ////////////////////////// + + btreePtr->attributes &= ~kBTBadCloseMask; // clear "bad close" attribute bit + err = UpdateHeader (btreePtr, true); + M_ExitOnError (err); + + hfs_free(btreePtr); + filePtr->fcbBTCBPtr = nil; + + return noErr; + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + + return err; +} + + + +/*------------------------------------------------------------------------------- + Routine: BTSearchRecord - Search BTree for a record with a matching key. + + Function: Search for position in B*Tree indicated by searchKey. If a valid node hint + is provided, it will be searched first, then SearchTree will be called. + If a BTreeIterator is provided, it will be set to the position found as + a result of the search. If a record exists at that position, and a BufferDescriptor + is supplied, the record will be copied to the buffer (as much as will fit), + and recordLen will be set to the length of the record. + + If an error other than fsBTRecordNotFoundErr occurs, the BTreeIterator, if any, + is invalidated, and recordLen is set to 0. + + + Input: pathPtr - pointer to path for BTree file. + searchKey - pointer to search key to match. + hintPtr - pointer to hint (may be nil) + + Output: record - pointer to BufferDescriptor containing record + recordLen - length of data at recordPtr + iterator - pointer to BTreeIterator indicating position result of search + + Result: noErr - success, record contains copy of record found + fsBTRecordNotFoundErr - record was not found, no data copied + fsBTInvalidFileErr - no BTreeControlBlock is allocated for the fork + fsBTInvalidKeyLengthErr - + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus BTSearchRecord (FCB *filePtr, + BTreeIterator *searchIterator, + FSBufferDescriptor *record, + u_int16_t *recordLen, + BTreeIterator *resultIterator ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + u_int32_t nodeNum = 0; + BlockDescriptor node; + u_int16_t index = 0; + BTreeKeyPtr keyPtr = NULL; + RecordPtr recordPtr; + u_int16_t len; + Boolean foundRecord; + Boolean validHint; + + if (filePtr == nil) + { + return paramErr; + } + + if (searchIterator == nil) + { + return paramErr; + } + + node.buffer = nil; + node.blockHeader = nil; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + { + return fsBTInvalidFileErr; + } + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + foundRecord = false; + + ////////////////////////////// Take A Hint ////////////////////////////////// + + err = IsItAHint (btreePtr, searchIterator, &validHint); + M_ExitOnError (err); + + if (validHint) + { + nodeNum = searchIterator->hint.nodeNum; + + err = GetNode (btreePtr, nodeNum, kGetNodeHint, &node); + if( err == noErr ) + { + if ( ((BTNodeDescriptor*) node.buffer)->kind == kBTLeafNode && + ((BTNodeDescriptor*) node.buffer)->numRecords > 0 ) + { + foundRecord = SearchNode (btreePtr, node.buffer, &searchIterator->key, &index); + + //if !foundRecord, we could still skip tree search if ( 0 < index < numRecords ) + } + + if (foundRecord == false) + { + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + } + else + { + ++btreePtr->numValidHints; + } + } + + if( foundRecord == false ) + (void) BTInvalidateHint( searchIterator ); + } + + + //////////////////////////// Search The Tree //////////////////////////////// + + if (foundRecord == false) + { + err = SearchTree ( btreePtr, &searchIterator->key, treePathTable, &nodeNum, &node, &index); + switch (err) + { + case noErr: + foundRecord = true; + break; + case fsBTRecordNotFoundErr: + break; + default: + goto ErrorExit; + } + } + + + //////////////////////////// Get the Record ///////////////////////////////// + + if (foundRecord == true) + { + //XXX Should check for errors! Or BlockMove could choke on recordPtr!!! + GetRecordByIndex (btreePtr, node.buffer, index, &keyPtr, &recordPtr, &len); + + if (recordLen != nil) *recordLen = len; + + if (record != nil) + { + ByteCount recordSize; + + recordSize = record->itemCount * record->itemSize; + + if (len > recordSize) len = recordSize; + + BlockMoveData (recordPtr, record->bufferAddress, len); + } + } + + + /////////////////////// Success - Update Iterator /////////////////////////// + + if (resultIterator != nil) + { + if (foundRecord) { + resultIterator->hint.writeCount = btreePtr->writeCount; + resultIterator->hint.nodeNum = nodeNum; + resultIterator->hint.index = index; + } +#if DEBUG + resultIterator->hint.reserved1 = 0; + resultIterator->hint.reserved2 = 0; + resultIterator->version = 0; + resultIterator->reserved = 0; +#endif + // copy the key in the BTree when found rather than searchIterator->key to get proper case/diacriticals + if (foundRecord == true) { + BlockMoveData ((Ptr)keyPtr, (Ptr)&resultIterator->key, CalcKeySize(btreePtr, keyPtr)); + } else { + BlockMoveData ((Ptr)&searchIterator->key, (Ptr)&resultIterator->key, CalcKeySize(btreePtr, &searchIterator->key)); + } + } + + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + + if (foundRecord == false) return fsBTRecordNotFoundErr; + else return noErr; + + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + + if (recordLen != nil) + *recordLen = 0; + + if (resultIterator != nil) + { + resultIterator->hint.writeCount = 0; + resultIterator->hint.nodeNum = 0; + resultIterator->hint.index = 0; + resultIterator->hint.reserved1 = 0; + resultIterator->hint.reserved2 = 0; + + resultIterator->version = 0; + resultIterator->reserved = 0; + resultIterator->key.length16 = 0; // zero out two bytes to cover both types of keys + } + + if ( err == fsBTEmptyErr ) + err = fsBTRecordNotFoundErr; + + return err; +} + + + +/*------------------------------------------------------------------------------- + Routine: BTIterateRecord - Find the first, next, previous, or last record. + + Function: Find the first, next, previous, or last record in the BTree + + Input: pathPtr - pointer to path iterate records for. + operation - iteration operation (first,next,prev,last) + iterator - pointer to iterator indicating start position + + Output: iterator - iterator is updated to indicate new position + newKeyPtr - pointer to buffer to copy key found by iteration + record - pointer to buffer to copy record found by iteration + recordLen - length of record + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus BTIterateRecord (FCB *filePtr, + BTreeIterationOperation operation, + BTreeIterator *iterator, + FSBufferDescriptor *record, + u_int16_t *recordLen ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + BTreeKeyPtr keyPtr; + RecordPtr recordPtr; + u_int16_t len; + + Boolean foundRecord; + u_int32_t nodeNum; + + BlockDescriptor left, node, right; + u_int16_t index; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + left.buffer = nil; + left.blockHeader = nil; + right.buffer = nil; + right.blockHeader = nil; + node.buffer = nil; + node.blockHeader = nil; + + + if (filePtr == nil) + { + return paramErr; + } + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + { + return fsBTInvalidFileErr; //handle properly + } + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + if ((operation != kBTreeFirstRecord) && + (operation != kBTreeNextRecord) && + (operation != kBTreeCurrentRecord) && + (operation != kBTreePrevRecord) && + (operation != kBTreeLastRecord)) + { + err = fsInvalidIterationMovmentErr; + goto ErrorExit; + } + + /////////////////////// Find First or Last Record /////////////////////////// + + if ((operation == kBTreeFirstRecord) || (operation == kBTreeLastRecord)) + { + if (operation == kBTreeFirstRecord) nodeNum = btreePtr->firstLeafNode; + else nodeNum = btreePtr->lastLeafNode; + + if (nodeNum == 0) + { + err = fsBTEmptyErr; + goto ErrorExit; + } + + err = GetNode (btreePtr, nodeNum, 0, &node); + M_ExitOnError (err); + + if ( ((NodeDescPtr) node.buffer)->kind != kBTLeafNode || + ((NodeDescPtr) node.buffer)->numRecords <= 0 ) + { + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + + err = fsBTInvalidNodeErr; + LFHFS_LOG(LEVEL_ERROR , "BTIterateRecord() found invalid btree node on volume %s\n", FCBTOVCB(filePtr)->vcbVN); + hfs_mark_inconsistent(FCBTOVCB(filePtr), HFS_INCONSISTENCY_DETECTED); + goto ErrorExit; + } + + if (operation == kBTreeFirstRecord) index = 0; + else index = ((BTNodeDescriptor*) node.buffer)->numRecords - 1; + + goto CopyData; //is there a cleaner way? + } + + + //////////////////////// Find Iterator Position ///////////////////////////// + + // Not called for (operation == kBTreeFirstRecord || operation == kBTreeLastRecord) + err = FindIteratorPosition (btreePtr, iterator, + &left, &node, &right, &nodeNum, &index, &foundRecord); + M_ExitOnError (err); + + + ///////////////////// Find Next Or Previous Record ////////////////////////// + + if (operation == kBTreePrevRecord) + { + if (index > 0) + { + --index; + } + else + { + if (left.buffer == nil) + { + nodeNum = ((NodeDescPtr) node.buffer)->bLink; + if ( nodeNum > 0) + { + // BTree nodes are always grabbed in left to right order. + // Therefore release the current node before looking up the + // left node. + err = ReleaseNode(btreePtr, &node); + M_ExitOnError(err); + + // Look up the left node + err = GetNode (btreePtr, nodeNum, 0, &left); + M_ExitOnError (err); + + // Look up the current node again + err = GetRightSiblingNode (btreePtr, left.buffer, &node); + M_ExitOnError (err); + } else { + err = fsBTStartOfIterationErr; + goto ErrorExit; + } + } + // Before we stomp on "right", we'd better release it if needed + if (right.buffer != nil) { + err = ReleaseNode(btreePtr, &right); + M_ExitOnError(err); + } + right = node; + node = left; + left.buffer = nil; + index = ((NodeDescPtr) node.buffer)->numRecords -1; + } + } + else if (operation == kBTreeNextRecord) + { + if ((foundRecord != true) && + (((NodeDescPtr) node.buffer)->fLink == 0) && + (index == ((NodeDescPtr) node.buffer)->numRecords)) + { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + + // we did not find the record but the index is already positioned correctly + if ((foundRecord == false) && (index != ((NodeDescPtr) node.buffer)->numRecords)) + goto CopyData; + + // we found the record OR we have to look in the next node + if (index < ((NodeDescPtr) node.buffer)->numRecords -1) + { + ++index; + } + else + { + if (right.buffer == nil) + { + nodeNum = ((NodeDescPtr) node.buffer)->fLink; + if ( nodeNum > 0) + { + err = GetNode (btreePtr, nodeNum, 0, &right); + M_ExitOnError (err); + } else { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + } + // Before we stomp on "left", we'd better release it if needed + if (left.buffer != nil) { + err = ReleaseNode(btreePtr, &left); + M_ExitOnError(err); + } + left = node; + node = right; + right.buffer = nil; + index = 0; + } + } + else // operation == kBTreeCurrentRecord + { + // make sure we have something... + if ((foundRecord != true) && + (index >= ((NodeDescPtr) node.buffer)->numRecords)) + { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + } + + //////////////////// Copy Record And Update Iterator //////////////////////// + +CopyData: + + // added check for errors + err = GetRecordByIndex (btreePtr, node.buffer, index, &keyPtr, &recordPtr, &len); + M_ExitOnError (err); + + if (recordLen != nil) + *recordLen = len; + + if (record != nil) + { + ByteCount recordSize; + + recordSize = record->itemCount * record->itemSize; + + if (len > recordSize) len = recordSize; + + BlockMoveData (recordPtr, record->bufferAddress, len); + } + + if (iterator != nil) // first & last do not require iterator + { + iterator->hint.writeCount = btreePtr->writeCount; + iterator->hint.nodeNum = nodeNum; + iterator->hint.index = index; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + iterator->version = 0; + iterator->reserved = 0; + + /* SER + * Check for infinite loops by making sure we do not + * process more leaf records, than can possibly be (or the BTree header + * is seriously damaged)....a brute force method. + */ + if ((operation == kBTreeFirstRecord) || (operation == kBTreeLastRecord)) + iterator->hitCount = 1; + else if (operation != kBTreeCurrentRecord) + iterator->hitCount += 1; + /* Always use the highest max, in case the grows while iterating */ + iterator->maxLeafRecs = max(btreePtr->leafRecords, iterator->maxLeafRecs); + +#if 0 + if (iterator->hitCount > iterator->maxLeafRecs + kNumLeafRecSlack) + { + err = fsBTInvalidNodeErr; + LFHFS_LOG(LEVEL_ERROR , "BTIterateRecord() found invalid btree node on volume %s\n", FCBTOVCB(filePtr)->vcbVN); + hfs_mark_inconsistent(FCBTOVCB(filePtr), HFS_INCONSISTENCY_DETECTED); + goto ErrorExit; + } +#endif + + BlockMoveData ((Ptr)keyPtr, (Ptr)&iterator->key, CalcKeySize(btreePtr, keyPtr)); + } + + + ///////////////////////////// Release Nodes ///////////////////////////////// + + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + + if (left.buffer != nil) + { + err = ReleaseNode (btreePtr, &left); + M_ExitOnError (err); + } + + if (right.buffer != nil) + { + err = ReleaseNode (btreePtr, &right); + M_ExitOnError (err); + } + + return noErr; + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &left); + (void) ReleaseNode (btreePtr, &node); + (void) ReleaseNode (btreePtr, &right); + + if (recordLen != nil) + *recordLen = 0; + + if (iterator != nil) + { + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + iterator->version = 0; + iterator->reserved = 0; + iterator->key.length16 = 0; + } + + if ( err == fsBTEmptyErr || err == fsBTEndOfIterationErr ) + err = fsBTRecordNotFoundErr; + + return err; +} + + +/*------------------------------------------------------------------------------- + Routine: BTIterateRecords + + Function: Find a series of records + + Input: filePtr - b-tree file + operation - iteration operation (first,next,prev,last) + iterator - pointer to iterator indicating start position + callBackProc - pointer to routince to process a record + callBackState - pointer to state data (used by callBackProc) + + Output: iterator - iterator is updated to indicate new position + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus +BTIterateRecords(FCB *filePtr, BTreeIterationOperation operation, BTreeIterator *iterator, + IterateCallBackProcPtr callBackProc, void * callBackState) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + BTreeKeyPtr keyPtr; + RecordPtr recordPtr; + u_int16_t len; + Boolean foundRecord; + u_int32_t nodeNum; + BlockDescriptor left, node, right; + u_int16_t index; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + left.buffer = nil; + left.blockHeader = nil; + right.buffer = nil; + right.blockHeader = nil; + node.buffer = nil; + node.blockHeader = nil; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + if ((operation != kBTreeFirstRecord) && + (operation != kBTreeNextRecord) && + (operation != kBTreeCurrentRecord) && + (operation != kBTreePrevRecord) && + (operation != kBTreeLastRecord)) + { + err = fsInvalidIterationMovmentErr; + goto ErrorExit; + } + + /////////////////////// Find First or Last Record /////////////////////////// + + if ((operation == kBTreeFirstRecord) || (operation == kBTreeLastRecord)) + { + if (operation == kBTreeFirstRecord) + nodeNum = btreePtr->firstLeafNode; + else + nodeNum = btreePtr->lastLeafNode; + + if (nodeNum == 0) + { + err = fsBTEmptyErr; + goto ErrorExit; + } + + err = GetNode(btreePtr, nodeNum, 0, &node); + M_ExitOnError(err); + + if ( ((NodeDescPtr)node.buffer)->kind != kBTLeafNode || + ((NodeDescPtr)node.buffer)->numRecords <= 0 ) + { + err = ReleaseNode(btreePtr, &node); + M_ExitOnError(err); + + err = fsBTInvalidNodeErr; + LFHFS_LOG(LEVEL_ERROR , "BTIterateRecords() found invalid btree node on volume %s\n", FCBTOVCB(filePtr)->vcbVN); + hfs_mark_inconsistent(FCBTOVCB(filePtr), HFS_INCONSISTENCY_DETECTED); + goto ErrorExit; + } + + if (operation == kBTreeFirstRecord) + index = 0; + else + index = ((BTNodeDescriptor*) node.buffer)->numRecords - 1; + + goto ProcessData; + } + + //////////////////////// Find Iterator Position ///////////////////////////// + + // Not called for (operation == kBTreeFirstRecord || operation == kBTreeLastRecord) + err = FindIteratorPosition(btreePtr, iterator, &left, &node, &right, + &nodeNum, &index, &foundRecord); + if (err == fsBTRecordNotFoundErr) + err = 0; + M_ExitOnError(err); + + + ///////////////////// Find Next Or Previous Record ////////////////////////// + + if (operation == kBTreePrevRecord) + { + if (index > 0) + { + --index; + } + else + { + if (left.buffer == nil) + { + nodeNum = ((NodeDescPtr) node.buffer)->bLink; + if ( nodeNum > 0) + { + // BTree nodes are always grabbed in left to right order. + // Therefore release the current node before looking up the + // left node. + err = ReleaseNode(btreePtr, &node); + M_ExitOnError(err); + + // Look up the left node + err = GetNode (btreePtr, nodeNum, 0, &left); + M_ExitOnError (err); + + // Look up the current node again + err = GetRightSiblingNode (btreePtr, left.buffer, &node); + M_ExitOnError (err); + } else { + err = fsBTStartOfIterationErr; + goto ErrorExit; + } + } + // Before we stomp on "right", we'd better release it if needed + if (right.buffer != nil) { + err = ReleaseNode(btreePtr, &right); + M_ExitOnError(err); + } + right = node; + node = left; + left.buffer = nil; + index = ((NodeDescPtr) node.buffer)->numRecords -1; + } + } + else if (operation == kBTreeNextRecord) + { + if ((foundRecord != true) && + (((NodeDescPtr)node.buffer)->fLink == 0) && + (index == ((NodeDescPtr)node.buffer)->numRecords)) + { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + + // we did not find the record but the index is already positioned correctly + if ((foundRecord == false) && (index != ((NodeDescPtr)node.buffer)->numRecords)) + goto ProcessData; + + // we found the record OR we have to look in the next node + if (index < ((NodeDescPtr)node.buffer)->numRecords -1) + { + ++index; + } + else + { + if (right.buffer == nil) + { + nodeNum = ((NodeDescPtr)node.buffer)->fLink; + if ( nodeNum > 0) + { + err = GetNode(btreePtr, nodeNum, 0, &right); + M_ExitOnError(err); + } else { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + } + // Before we stomp on "left", we'd better release it if needed + if (left.buffer != nil) { + err = ReleaseNode(btreePtr, &left); + M_ExitOnError(err); + } + left = node; + node = right; + right.buffer = nil; + index = 0; + } + } + else // operation == kBTreeCurrentRecord + { + // make sure we have something... + if ((foundRecord != true) && + (index >= ((NodeDescPtr)node.buffer)->numRecords)) + { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + } + + //////////////////// Process Records Using Callback //////////////////////// + +ProcessData: + err = GetRecordByIndex(btreePtr, node.buffer, index, &keyPtr, &recordPtr, &len); + if (err) { + err = btBadNode; + goto ErrorExit; + } + + while (err == 0) { + if (callBackProc(keyPtr, recordPtr, callBackState) == 0) + break; + + if ((index+1) < ((NodeDescPtr)node.buffer)->numRecords) { + ++index; + } else { + if (right.buffer == nil) + { + nodeNum = ((NodeDescPtr)node.buffer)->fLink; + if ( nodeNum > 0) + { + err = GetNode(btreePtr, nodeNum, 0, &right); + M_ExitOnError(err); + } else { + err = fsBTEndOfIterationErr; + break; + } + } + // Before we stomp on "left", we'd better release it if needed + if (left.buffer != nil) { + err = ReleaseNode(btreePtr, &left); + M_ExitOnError(err); + } + left = node; + node = right; + right.buffer = nil; + index = 0; + } + err = GetRecordByIndex(btreePtr, node.buffer, index, + &keyPtr, &recordPtr, &len); + if (err) { + err = btBadNode; + goto ErrorExit; + } + } + + + ///////////////// Update Iterator to Last Item Processed ///////////////////// + + + if (iterator != nil) // first & last have optional iterator + { + iterator->hint.writeCount = btreePtr->writeCount; + iterator->hint.nodeNum = nodeNum; + iterator->hint.index = index; + iterator->version = 0; + + BlockMoveData((Ptr)keyPtr, (Ptr)&iterator->key, CalcKeySize(btreePtr, keyPtr)); + } + M_ExitOnError(err); + + + ///////////////////////////// Release Nodes ///////////////////////////////// + + err = ReleaseNode(btreePtr, &node); + M_ExitOnError(err); + + if (left.buffer != nil) + { + err = ReleaseNode(btreePtr, &left); + M_ExitOnError(err); + } + + if (right.buffer != nil) + { + err = ReleaseNode(btreePtr, &right); + M_ExitOnError(err); + } + + return noErr; + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + + (void) ReleaseNode(btreePtr, &left); + (void) ReleaseNode(btreePtr, &node); + (void) ReleaseNode(btreePtr, &right); + + if (iterator != nil) + { + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->version = 0; + iterator->key.length16 = 0; + } + + if ( err == fsBTEmptyErr || err == fsBTEndOfIterationErr ) + err = fsBTRecordNotFoundErr; + + return err; +} + + +//////////////////////////////// BTInsertRecord ///////////////////////////////// + +OSStatus BTInsertRecord (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + u_int16_t recordLen ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + u_int32_t nodesNeeded; + BlockDescriptor nodeRec; + u_int32_t insertNodeNum; + u_int16_t index; + Boolean recordFit; + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + nodeRec.blockHeader = nil; + + err = CheckInsertParams (filePtr, iterator, record, recordLen); + if (err != noErr) + return err; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + + ///////////////////////// Find Insert Position ////////////////////////////// + + // always call SearchTree for Insert + err = SearchTree (btreePtr, &iterator->key, treePathTable, &insertNodeNum, &nodeRec, &index); + + switch (err) // set/replace/insert decision point + { + case noErr: err = fsBTDuplicateRecordErr; + goto ErrorExit; + + case fsBTRecordNotFoundErr: break; + + case fsBTEmptyErr: // if tree empty add 1st leaf node + + if (btreePtr->freeNodes == 0) + { + err = ExtendBTree (btreePtr, btreePtr->totalNodes + 1); + M_ExitOnError (err); + } + + err = AllocateNode (btreePtr, &insertNodeNum); + M_ExitOnError (err); + + err = GetNewNode (btreePtr, insertNodeNum, &nodeRec); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &nodeRec); + + ((NodeDescPtr)nodeRec.buffer)->kind = kBTLeafNode; + ((NodeDescPtr)nodeRec.buffer)->height = 1; + + recordFit = InsertKeyRecord (btreePtr, nodeRec.buffer, 0, + &iterator->key, KeyLength(btreePtr, &iterator->key), + record->bufferAddress, recordLen ); + if (recordFit != true) + { + err = fsBTRecordTooLargeErr; + goto ErrorExit; + } + + /* + * Update the B-tree control block. Do this before + * calling UpdateNode since it will compare the node's + * height with treeDepth. + */ + btreePtr->treeDepth = 1; + btreePtr->rootNode = insertNodeNum; + btreePtr->firstLeafNode = insertNodeNum; + btreePtr->lastLeafNode = insertNodeNum; + + err = UpdateNode (btreePtr, &nodeRec, 0, kLockTransaction); + M_ExitOnError (err); + + M_BTreeHeaderDirty (btreePtr); + + goto Success; + + default: goto ErrorExit; + } + + if (index > 0) + { + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &nodeRec); + + recordFit = InsertKeyRecord (btreePtr, nodeRec.buffer, index, + &iterator->key, KeyLength(btreePtr, &iterator->key), + record->bufferAddress, recordLen); + if (recordFit == true) + { + err = UpdateNode (btreePtr, &nodeRec, 0, kLockTransaction); + M_ExitOnError (err); + + goto Success; + } + } + + /////////////////////// Extend File If Necessary //////////////////////////// + + if ((btreePtr->treeDepth + 1UL) > btreePtr->freeNodes) + { + nodesNeeded = btreePtr->treeDepth + 1 + btreePtr->totalNodes - btreePtr->freeNodes; + if (nodesNeeded > CalcMapBits (btreePtr)) // we'll need to add a map node too! + ++nodesNeeded; + + err = ExtendBTree (btreePtr, nodesNeeded); + M_ExitOnError (err); + } + + // no need to delete existing record + + err = InsertTree (btreePtr, treePathTable, &iterator->key, record->bufferAddress, + recordLen, &nodeRec, index, 1, kInsertRecord, &insertNodeNum); + M_ExitOnError (err); + + + //////////////////////////////// Success //////////////////////////////////// + +Success: + ++btreePtr->writeCount; + ++btreePtr->leafRecords; + M_BTreeHeaderDirty (btreePtr); + + // create hint + iterator->hint.writeCount = btreePtr->writeCount; + iterator->hint.nodeNum = insertNodeNum; + iterator->hint.index = 0; // unused + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &nodeRec); + + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + if (err == fsBTEmptyErr) + err = fsBTRecordNotFoundErr; + + return err; +} + + +//////////////////////////////// BTReplaceRecord //////////////////////////////// + +OSStatus BTReplaceRecord (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + u_int16_t recordLen ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + u_int32_t nodesNeeded; + BlockDescriptor nodeRec; + u_int32_t insertNodeNum; + u_int16_t index; + Boolean recordFit; + Boolean validHint; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + nodeRec.blockHeader = nil; + + err = CheckInsertParams (filePtr, iterator, record, recordLen); + if (err != noErr) + return err; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + ////////////////////////////// Take A Hint ////////////////////////////////// + + err = IsItAHint (btreePtr, iterator, &validHint); + M_ExitOnError (err); + + if (validHint) + { + insertNodeNum = iterator->hint.nodeNum; + + err = GetNode (btreePtr, insertNodeNum, kGetNodeHint, &nodeRec); + if( err == noErr ) + { + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &nodeRec); + + err = TrySimpleReplace (btreePtr, nodeRec.buffer, iterator, record, recordLen, &recordFit); + M_ExitOnError (err); + + if (recordFit) + { + err = UpdateNode (btreePtr, &nodeRec, 0, 0); + M_ExitOnError (err); + + ++btreePtr->numValidHints; + + goto Success; + } + else + { + (void) BTInvalidateHint( iterator ); + } + + err = ReleaseNode (btreePtr, &nodeRec); + M_ExitOnError (err); + } + else + { + (void) BTInvalidateHint( iterator ); + } + } + + + ////////////////////////////// Get A Clue /////////////////////////////////// + + err = SearchTree (btreePtr, &iterator->key, treePathTable, &insertNodeNum, &nodeRec, &index); + M_ExitOnError (err); // record must exit for Replace + + // optimization - if simple replace will work then don't extend btree + // if we tried this before, and failed because it wouldn't fit then we shouldn't try this again... + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &nodeRec); + + err = TrySimpleReplace (btreePtr, nodeRec.buffer, iterator, record, recordLen, &recordFit); + M_ExitOnError (err); + + if (recordFit) + { + err = UpdateNode (btreePtr, &nodeRec, 0, 0); + M_ExitOnError (err); + + goto Success; + } + + + //////////////////////////// Make Some Room ///////////////////////////////// + + if ((btreePtr->treeDepth + 1UL) > btreePtr->freeNodes) + { + nodesNeeded = btreePtr->treeDepth + 1 + btreePtr->totalNodes - btreePtr->freeNodes; + if (nodesNeeded > CalcMapBits (btreePtr)) // we'll need to add a map node too! + ++nodesNeeded; + + err = ExtendBTree (btreePtr, nodesNeeded); + M_ExitOnError (err); + } + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &nodeRec); + + DeleteRecord (btreePtr, nodeRec.buffer, index); // delete existing key/record + + err = InsertTree (btreePtr, treePathTable, &iterator->key, record->bufferAddress, + recordLen, &nodeRec, index, 1, kReplaceRecord, &insertNodeNum); + M_ExitOnError (err); + + ++btreePtr->writeCount; /* writeCount changes only if the tree structure changed */ + +Success: + // create hint + iterator->hint.writeCount = btreePtr->writeCount; + iterator->hint.nodeNum = insertNodeNum; + iterator->hint.index = 0; // unused + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &nodeRec); + + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + return err; +} + + + +//////////////////////////////// BTUpdateRecord //////////////////////////////// + +OSStatus +BTUpdateRecord(FCB *filePtr, BTreeIterator *iterator, + IterateCallBackProcPtr callBackProc, void * callBackState) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + BlockDescriptor nodeRec; + RecordPtr recordPtr; + BTreeKeyPtr keyPtr; + u_int32_t insertNodeNum; + u_int16_t recordLen; + u_int16_t index; + Boolean validHint; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + nodeRec.blockHeader = nil; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + ////////////////////////////// Take A Hint ////////////////////////////////// + + err = IsItAHint (btreePtr, iterator, &validHint); + M_ExitOnError (err); + + if (validHint) + { + insertNodeNum = iterator->hint.nodeNum; + + err = GetNode (btreePtr, insertNodeNum, kGetNodeHint, &nodeRec); + if (err == noErr) + { + if (((NodeDescPtr)nodeRec.buffer)->kind == kBTLeafNode && + SearchNode (btreePtr, nodeRec.buffer, &iterator->key, &index)) + { + err = GetRecordByIndex(btreePtr, nodeRec.buffer, index, &keyPtr, &recordPtr, &recordLen); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &nodeRec); + + err = callBackProc(keyPtr, recordPtr, callBackState); + M_ExitOnError (err); + + err = UpdateNode (btreePtr, &nodeRec, 0, 0); + M_ExitOnError (err); + + ++btreePtr->numValidHints; + + goto Success; + } + else + { + (void) BTInvalidateHint( iterator ); + } + + err = ReleaseNode (btreePtr, &nodeRec); + M_ExitOnError (err); + } + else + { + (void) BTInvalidateHint( iterator ); + } + } + + ////////////////////////////// Get A Clue /////////////////////////////////// + + err = SearchTree (btreePtr, &iterator->key, treePathTable, &insertNodeNum, &nodeRec, &index); + M_ExitOnError (err); + + err = GetRecordByIndex(btreePtr, nodeRec.buffer, index, &keyPtr, &recordPtr, &recordLen); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &nodeRec); + + err = callBackProc(keyPtr, recordPtr, callBackState); + M_ExitOnError (err); + + err = UpdateNode (btreePtr, &nodeRec, 0, 0); + M_ExitOnError (err); + +Success: + // create hint + iterator->hint.writeCount = btreePtr->writeCount; + iterator->hint.nodeNum = insertNodeNum; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + return noErr; + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &nodeRec); + + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + return err; +} + + + +//////////////////////////////// BTDeleteRecord ///////////////////////////////// + +OSStatus BTDeleteRecord (FCB *filePtr, + BTreeIterator *iterator ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + BlockDescriptor nodeRec; + u_int32_t nodesNeeded; + u_int32_t nodeNum; + u_int16_t index; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + nodeRec.blockHeader = nil; + + M_ReturnErrorIf (filePtr == nil, paramErr); + M_ReturnErrorIf (iterator == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + { + err = fsBTInvalidFileErr; + goto ErrorExit; + } + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + + /////////////////////////////// Find Key //////////////////////////////////// + + // check hint for simple delete case (index > 0, numRecords > 2) + + err = SearchTree (btreePtr, &iterator->key, treePathTable, &nodeNum, &nodeRec, &index); + M_ExitOnError (err); // record must exit for Delete + + + /////////////////////// Extend File If Necessary //////////////////////////// + + /* + * Worst case: we delete the first record in the tree and + * following key is sufficiently larger to cause all parents to + * require splitting and we need a new root node and a new map + * node. + */ + if (index == 0 && btreePtr->treeDepth + 1 > btreePtr->freeNodes) + { + nodesNeeded = btreePtr->treeDepth + btreePtr->totalNodes; + if (nodesNeeded > CalcMapBits (btreePtr)) + ++nodesNeeded; + + if (nodesNeeded - btreePtr->totalNodes > btreePtr->freeNodes) { + err = ExtendBTree (btreePtr, nodesNeeded); + M_ExitOnError (err); + } + } + + ///////////////////////////// Delete Record ///////////////////////////////// + + err = DeleteTree (btreePtr, treePathTable, &nodeRec, index, 1); + M_ExitOnError (err); + + ++btreePtr->writeCount; + --btreePtr->leafRecords; + M_BTreeHeaderDirty (btreePtr); + + iterator->hint.nodeNum = 0; + + return noErr; + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + (void) ReleaseNode (btreePtr, &nodeRec); + + return err; +} + + + +OSStatus BTGetInformation (FCB *filePtr, + u_int16_t file_version, + BTreeInfoRec *info ) +{ +#pragma unused (file_version) + + BTreeControlBlockPtr btreePtr; + + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + /* + * XXX SER + * This should not require the whole tree to be locked, just maybe the BTreeControlBlockPtr + * + * REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + */ + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + M_ReturnErrorIf (info == nil, paramErr); + + //check version? + + info->nodeSize = btreePtr->nodeSize; + info->maxKeyLength = btreePtr->maxKeyLength; + info->treeDepth = btreePtr->treeDepth; + info->numRecords = btreePtr->leafRecords; + info->numNodes = btreePtr->totalNodes; + info->numFreeNodes = btreePtr->freeNodes; + info->lastfsync = btreePtr->lastfsync; + info->keyCompareType = btreePtr->keyCompareType; + return noErr; +} + +// XXXdbg +OSStatus +BTIsDirty(FCB *filePtr) +{ + BTreeControlBlockPtr btreePtr; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + return TreeIsDirty(btreePtr); +} + +/*------------------------------------------------------------------------------- + Routine: BTFlushPath - Flush BTreeControlBlock to Header Node. + + Function: Brief_description_of_the_function_and_any_side_effects + + + Input: pathPtr - pointer to path control block for B*Tree file to flush + + Output: none + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus BTFlushPath (FCB *filePtr) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + err = UpdateHeader (btreePtr, false); + + return err; +} + + +/*------------------------------------------------------------------------------- + Routine: BTReload - Reload B-tree Header Data. + + Function: Reload B-tree header data from disk. This is called after fsck + has made repairs to the root filesystem. The filesystem is + mounted read-only when BTReload is caled. + + + Input: filePtr - the B*Tree file that needs its header updated + + Output: none + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus +BTReloadData(FCB *filePtr) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + BlockDescriptor node; + BTHeaderRec *header; + + + node.buffer = nil; + node.blockHeader = nil; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + return (fsBTInvalidFileErr); + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + err = GetNode(btreePtr, kHeaderNodeNum, 0, &node); + if (err != noErr) + return (err); + + header = (BTHeaderRec*)((char *)node.buffer + sizeof(BTNodeDescriptor)); + if ((err = VerifyHeader (filePtr, header)) == 0) { + btreePtr->treeDepth = header->treeDepth; + btreePtr->rootNode = header->rootNode; + btreePtr->leafRecords = header->leafRecords; + btreePtr->firstLeafNode = header->firstLeafNode; + btreePtr->lastLeafNode = header->lastLeafNode; + btreePtr->maxKeyLength = header->maxKeyLength; + btreePtr->totalNodes = header->totalNodes; + btreePtr->freeNodes = header->freeNodes; + btreePtr->btreeType = header->btreeType; + + btreePtr->flags &= (~kBTHeaderDirty); + } + + (void) ReleaseNode(btreePtr, &node); + + return err; +} + + +/*------------------------------------------------------------------------------- + Routine: BTInvalidateHint - Invalidates the hint within a BTreeInterator. + + Function: Invalidates the hint within a BTreeInterator. + + + Input: iterator - pointer to BTreeIterator + + Output: iterator - iterator with the hint.nodeNum cleared + + Result: noErr - success + paramErr - iterator == nil + -------------------------------------------------------------------------------*/ + + +OSStatus BTInvalidateHint (BTreeIterator *iterator ) +{ + if (iterator == nil) + return paramErr; + + iterator->hint.nodeNum = 0; + + return noErr; +} + + + + +/*------------------------------------------------------------------------------- + Routine: BTGetLastSync + + Function: Returns the last time that this btree was flushed, does not include header. + + Input: filePtr - pointer file control block + + Output: lastfsync - time in seconds of last update + + Result: noErr - success + paramErr - iterator == nil + -------------------------------------------------------------------------------*/ + + +OSStatus BTGetLastSync (FCB *filePtr, + u_int32_t *lastsync) +{ + BTreeControlBlockPtr btreePtr; + + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + /* Maybe instead of requiring a lock..an atomic set might be more appropriate */ + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + M_ReturnErrorIf (lastsync == nil, paramErr); + + *lastsync = btreePtr->lastfsync; + + return noErr; +} + + + + +/*------------------------------------------------------------------------------- + Routine: BTSetLastSync + + Function: Sets the last time that this btree was flushed, does not include header. + + + Input: fcb - pointer file control block + + Output: lastfsync - time in seconds of last update + + Result: noErr - success + paramErr - iterator == nil + -------------------------------------------------------------------------------*/ + + +OSStatus BTSetLastSync (FCB *filePtr, + u_int32_t lastsync) +{ + BTreeControlBlockPtr btreePtr; + + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + /* Maybe instead of requiring a lock..an atomic set might be more appropriate */ + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + M_ReturnErrorIf (lastsync == 0, paramErr); + + btreePtr->lastfsync = lastsync; + + return noErr; +} + +OSStatus BTHasContiguousNodes (FCB *filePtr) +{ + BTreeControlBlockPtr btreePtr; + + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + + return NodesAreContiguous(FCBTOVCB(filePtr), filePtr, btreePtr->nodeSize); +} + + +/*------------------------------------------------------------------------------- + Routine: BTGetUserData + + Function: Read the user data area of the b-tree header node. + + -------------------------------------------------------------------------------*/ +OSStatus +BTGetUserData(FCB *filePtr, void * dataPtr, int dataSize) +{ + BTreeControlBlockPtr btreePtr; + BlockDescriptor node; + char * offset; + OSStatus err; + + if (dataSize > kBTreeHeaderUserBytes) + return (EINVAL); + node.buffer = nil; + node.blockHeader = nil; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + return (fsBTInvalidFileErr); + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + err = GetNode(btreePtr, kHeaderNodeNum, 0, &node); + if (err) + return (err); + + offset = (char *)node.buffer + sizeof(BTNodeDescriptor) + sizeof(BTHeaderRec); + bcopy(offset, dataPtr, dataSize); + + (void) ReleaseNode(btreePtr, &node); + + return (0); +} + + +/*------------------------------------------------------------------------------- + Routine: BTSetUserData + + Function: Write the user data area of the b-tree header node. + -------------------------------------------------------------------------------*/ +OSStatus +BTSetUserData(FCB *filePtr, void * dataPtr, int dataSize) +{ + BTreeControlBlockPtr btreePtr; + BlockDescriptor node; + char * offset; + OSStatus err; + + if (dataSize > kBTreeHeaderUserBytes) + return (EINVAL); + node.buffer = nil; + node.blockHeader = nil; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + return (fsBTInvalidFileErr); + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + err = GetNode(btreePtr, kHeaderNodeNum, 0, &node); + if (err) + return (err); + + ModifyBlockStart(btreePtr->fileRefNum, &node); + + offset = (char *)node.buffer + sizeof(BTNodeDescriptor) + sizeof(BTHeaderRec); + bcopy(dataPtr, offset, dataSize); + + err = UpdateNode (btreePtr, &node, 0, 0); + + return (err); +} + diff --git a/livefiles_hfs_plugin/lf_hfs_btree.h b/livefiles_hfs_plugin/lf_hfs_btree.h new file mode 100644 index 0000000..f3e6d98 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btree.h @@ -0,0 +1,389 @@ +// +// lf_hfs_btree.h +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 20/03/2018. +// + +#ifndef lf_hfs_btree_h +#define lf_hfs_btree_h + + +#include "lf_hfs_file_mgr_internal.h" +#include "lf_hfs_btrees_internal.h" + + +/////////////////////////////////// Constants /////////////////////////////////// + +#define kBTreeVersion 1 +#define kMaxTreeDepth 16 + + +#define kHeaderNodeNum 0 +#define kKeyDescRecord 1 + + +// Header Node Record Offsets +enum { + kHeaderRecOffset = 0x000E, + kKeyDescRecOffset = 0x0078, + kHeaderMapRecOffset = 0x00F8 +}; + +#define kMinNodeSize 512 + +#define kMinRecordSize 6 +// where is minimum record size enforced? + +// miscellaneous BTree constants +enum { + kOffsetSize = 2 +}; + +// Insert Operations +typedef enum { + kInsertRecord = 0, + kReplaceRecord = 1 +} InsertType; + +// illegal string attribute bits set in mask +#define kBadStrAttribMask 0xCF + + + +//////////////////////////////////// Macros ///////////////////////////////////// + +#define M_NodesInMap(mapSize) ((mapSize) << 3) + +#define M_ClearBitNum(integer,bitNumber) ((integer) &= (~(1<<(bitNumber)))) +#define M_SetBitNum(integer,bitNumber) ((integer) |= (1<<(bitNumber))) +#define M_IsOdd(integer) (((integer) & 1) != 0) +#define M_IsEven(integer) (((integer) & 1) == 0) + +#define M_MapRecordSize(nodeSize) (nodeSize - sizeof (BTNodeDescriptor) - 6) +#define M_HeaderMapRecordSize(nodeSize) (nodeSize - sizeof(BTNodeDescriptor) - sizeof(BTHeaderRec) - 128 - 8) + +#define M_SWAP_BE16_ClearBitNum(integer,bitNumber) ((integer) &= SWAP_BE16(~(1<<(bitNumber)))) +#define M_SWAP_BE16_SetBitNum(integer,bitNumber) ((integer) |= SWAP_BE16(1<<(bitNumber))) + +///////////////////////////////////// Types ///////////////////////////////////// + +typedef struct BTreeControlBlock { // fields specific to BTree CBs + + u_int8_t keyCompareType; /* Key string Comparison Type */ + u_int8_t btreeType; + u_int16_t treeDepth; + FileReference fileRefNum; // refNum of btree file + KeyCompareProcPtr keyCompareProc; + u_int32_t rootNode; + u_int32_t leafRecords; + u_int32_t firstLeafNode; + u_int32_t lastLeafNode; + u_int16_t nodeSize; + u_int16_t maxKeyLength; + u_int32_t totalNodes; + u_int32_t freeNodes; + + u_int16_t reserved3; // 4-byte alignment + + // new fields + int16_t version; + u_int32_t flags; // dynamic flags + u_int32_t attributes; // persistent flags + u_int32_t writeCount; + u_int32_t lastfsync; /* Last time that this was fsynced */ + + GetBlockProcPtr getBlockProc; + ReleaseBlockProcPtr releaseBlockProc; + SetEndOfForkProcPtr setEndOfForkProc; + + // statistical information + u_int32_t numGetNodes; + u_int32_t numGetNewNodes; + u_int32_t numReleaseNodes; + u_int32_t numUpdateNodes; + u_int32_t numMapNodesRead; // map nodes beyond header node + u_int32_t numHintChecks; + u_int32_t numPossibleHints; // Looks like a formated hint + u_int32_t numValidHints; // Hint used to find correct record. + u_int32_t reservedNodes; + BTreeIterator iterator; // useable when holding exclusive b-tree lock + +#if DEBUG + void *madeDirtyBy[2]; +#endif +} BTreeControlBlock, *BTreeControlBlockPtr; + +u_int32_t CalcKeySize(const BTreeControlBlock *btcb, const BTreeKey *key); +#define CalcKeySize(btcb, key) ( ((btcb)->attributes & kBTBigKeysMask) ? ((key)->length16 + 2) : ((key)->length8 + 1) ) + +u_int32_t KeyLength(const BTreeControlBlock *btcb, const BTreeKey *key); +#define KeyLength(btcb, key) ( ((btcb)->attributes & kBTBigKeysMask) ? (key)->length16 : (key)->length8 ) + + + +typedef enum { + kBTHeaderDirty = 0x00000001 +} BTreeFlags; + +static inline void M_BTreeHeaderDirty(BTreeControlBlock *bt) { +#if DEBUG + bt->madeDirtyBy[0] = __builtin_return_address(0); + bt->madeDirtyBy[1] = __builtin_return_address(1); +#endif + bt->flags |= kBTHeaderDirty; +} + +typedef int8_t *NodeBuffer; +typedef BlockDescriptor NodeRec, *NodePtr; //remove this someday... + + + + +//// Tree Path Table - constructed by SearchTree, used by InsertTree and DeleteTree + +typedef struct { + u_int32_t node; // node number + u_int16_t index; + u_int16_t reserved; // align size to a power of 2 +} TreePathRecord, *TreePathRecordPtr; + +typedef TreePathRecord TreePathTable [kMaxTreeDepth]; + + +//// InsertKey - used by InsertTree, InsertLevel and InsertNode + +struct InsertKey { + BTreeKeyPtr keyPtr; + u_int8_t * recPtr; + u_int16_t keyLength; + u_int16_t recSize; + Boolean replacingKey; + Boolean skipRotate; +}; + +typedef struct InsertKey InsertKey; + + +//// For Notational Convenience + +typedef BTNodeDescriptor* NodeDescPtr; +typedef u_int8_t *RecordPtr; +typedef BTreeKeyPtr KeyPtr; + + +//////////////////////////////////// Globals //////////////////////////////////// + + +//////////////////////////////////// Macros ///////////////////////////////////// +// Exit function on error +#define M_ExitOnError( result ) do { if ( ( result ) != noErr ) goto ErrorExit; } while(0) + +// Test for passed condition and return if true +#define M_ReturnErrorIf( condition, error ) do { if ( condition ) return( error ); } while(0) + +//////////////////////////////// Key Operations ///////////////////////////////// + +int32_t CompareKeys (BTreeControlBlockPtr btreePtr, + KeyPtr searchKey, + KeyPtr trialKey ); + +//////////////////////////////// Map Operations ///////////////////////////////// + +OSStatus AllocateNode (BTreeControlBlockPtr btreePtr, + u_int32_t *nodeNum); + +OSStatus FreeNode (BTreeControlBlockPtr btreePtr, + u_int32_t nodeNum); + +OSStatus ExtendBTree (BTreeControlBlockPtr btreePtr, + u_int32_t nodes ); + +u_int32_t CalcMapBits (BTreeControlBlockPtr btreePtr); + + +void BTUpdateReserve (BTreeControlBlockPtr btreePtr, + int nodes); + +//////////////////////////////// Misc Operations //////////////////////////////// + +u_int16_t CalcKeyRecordSize (u_int16_t keySize, + u_int16_t recSize ); + +OSStatus VerifyHeader (FCB *filePtr, + BTHeaderRec *header ); + +OSStatus UpdateHeader (BTreeControlBlockPtr btreePtr, + Boolean forceWrite ); + +OSStatus FindIteratorPosition (BTreeControlBlockPtr btreePtr, + BTreeIteratorPtr iterator, + BlockDescriptor *left, + BlockDescriptor *middle, + BlockDescriptor *right, + u_int32_t *nodeNum, + u_int16_t *index, + Boolean *foundRecord ); + +OSStatus CheckInsertParams (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + u_int16_t recordLen ); + +OSStatus TrySimpleReplace (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + u_int16_t recordLen, + Boolean *recordInserted ); + +OSStatus IsItAHint (BTreeControlBlockPtr btreePtr, + BTreeIterator *iterator, + Boolean *answer ); + +extern OSStatus TreeIsDirty(BTreeControlBlockPtr btreePtr); + +//////////////////////////////// Node Operations //////////////////////////////// + +//// Node Operations + +OSStatus GetNode (BTreeControlBlockPtr btreePtr, + u_int32_t nodeNum, + u_int32_t flags, + NodeRec *returnNodePtr ); + +/* Flags for GetNode() */ +#define kGetNodeHint 0x1 /* If set, the node is being looked up using a hint */ + +OSStatus GetLeftSiblingNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + NodeRec *left ); + +#define GetLeftSiblingNode(btree,node,left) GetNode ((btree), ((NodeDescPtr)(node))->bLink, 0, (left)) + +OSStatus GetRightSiblingNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + NodeRec *right ); + +#define GetRightSiblingNode(btree,node,right) GetNode ((btree), ((NodeDescPtr)(node))->fLink, 0, (right)) + + +OSStatus GetNewNode (BTreeControlBlockPtr btreePtr, + u_int32_t nodeNum, + NodeRec *returnNodePtr ); + +OSStatus ReleaseNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ); + +OSStatus TrashNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ); + +OSStatus UpdateNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr, + u_int32_t transactionID, + u_int32_t flags ); + +//// Node Buffer Operations + +void ClearNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + +u_int16_t GetNodeDataSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + +u_int16_t GetNodeFreeSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + + +//// Record Operations + +Boolean InsertRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index, + RecordPtr recPtr, + u_int16_t recSize ); + +Boolean InsertKeyRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index, + KeyPtr keyPtr, + u_int16_t keyLength, + RecordPtr recPtr, + u_int16_t recSize ); + +void DeleteRecord (BTreeControlBlockPtr btree, + NodeDescPtr node, + u_int16_t index ); + + +Boolean SearchNode (BTreeControlBlockPtr btree, + NodeDescPtr node, + KeyPtr searchKey, + u_int16_t *index ); + +OSStatus GetRecordByIndex (BTreeControlBlockPtr btree, + NodeDescPtr node, + u_int16_t index, + KeyPtr *keyPtr, + u_int8_t * *dataPtr, + u_int16_t *dataSize ); + +u_int8_t * GetRecordAddress (BTreeControlBlockPtr btree, + NodeDescPtr node, + u_int16_t index ); + +#define GetRecordAddress(btreePtr,node,index) ((u_int8_t *)(node) + (*(short *) ((u_int8_t *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize))) + + +u_int16_t GetRecordSize (BTreeControlBlockPtr btree, + NodeDescPtr node, + u_int16_t index ); + +u_int32_t GetChildNodeNum (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + u_int16_t index ); + +void MoveRecordsLeft (u_int8_t * src, + u_int8_t * dst, + u_int16_t bytesToMove ); + +#define MoveRecordsLeft(src,dst,bytes) bcopy((src),(dst),(bytes)) + +void MoveRecordsRight (u_int8_t * src, + u_int8_t * dst, + u_int16_t bytesToMove ); + +#define MoveRecordsRight(src,dst,bytes) bcopy((src),(dst),(bytes)) + + +//////////////////////////////// Tree Operations //////////////////////////////// + +OSStatus SearchTree (BTreeControlBlockPtr btreePtr, + BTreeKeyPtr keyPtr, + TreePathTable treePathTable, + u_int32_t *nodeNum, + BlockDescriptor *nodePtr, + u_int16_t *index ); + +OSStatus InsertTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + KeyPtr keyPtr, + u_int8_t * recPtr, + u_int16_t recSize, + BlockDescriptor *targetNode, + u_int16_t index, + u_int16_t level, + Boolean replacingKey, + u_int32_t *insertNode ); + +OSStatus DeleteTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + BlockDescriptor *targetNode, + u_int16_t index, + u_int16_t level ); + +OSStatus BTFlushPath (FCB *filePtr); + +OSStatus BTSetLastSync (FCB *filePtr, + u_int32_t lastsync); +#endif /* lf_hfs_btree_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_btree_allocate.c b/livefiles_hfs_plugin/lf_hfs_btree_allocate.c new file mode 100644 index 0000000..41dfd24 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btree_allocate.c @@ -0,0 +1,655 @@ +// +// lf_hfs_btree_allocate.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// +#include "lf_hfs_btrees_io.h" +#include "lf_hfs_endian.h" +#include "lf_hfs_btrees_private.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_generic_buf.h" + +///////////////////// Routines Internal To BTreeAllocate.c ////////////////////// + +static OSStatus GetMapNode (BTreeControlBlockPtr btreePtr, + BlockDescriptor *nodePtr, + u_int16_t **mapPtr, + u_int16_t *mapSize ); + +///////////////////////////////////////////////////////////////////////////////// + +/*------------------------------------------------------------------------------- + + Routine: AllocateNode - Find Free Node, Mark It Used, and Return Node Number. + + Function: Searches the map records for the first free node, marks it "in use" and + returns the node number found. This routine should really only be called + when we know there are free blocks, otherwise it's just a waste of time. + + Note: We have to examine map nodes a word at a time rather than a long word + because the External BTree Mgr used map records that were not an integral + number of long words. Too bad. In our spare time could develop a more + sophisticated algorithm that read map records by long words (and long + word aligned) and handled the spare bytes at the beginning and end + appropriately. + + Input: btreePtr - pointer to control block for BTree file + + Output: nodeNum - number of node allocated + + + Result: noErr - success + fsBTNoMoreMapNodesErr - no free blocks were found + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus AllocateNode (BTreeControlBlockPtr btreePtr, u_int32_t *nodeNum) +{ + OSStatus err; + BlockDescriptor node; + u_int16_t *mapPtr, *pos; + u_int16_t mapSize, size; + u_int16_t freeWord; + u_int16_t mask; + u_int16_t bitOffset; + u_int32_t nodeNumber; + + + nodeNumber = 0; // first node number of header map record + node.buffer = nil; // clear node.buffer to get header node + // - and for ErrorExit + node.blockHeader = nil; + + while (true) + { + err = GetMapNode (btreePtr, &node, &mapPtr, &mapSize); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &node); + + //////////////////////// Find Word with Free Bit //////////////////////////// + + pos = mapPtr; + size = mapSize; + size >>= 1; // convert to number of words + //assumes mapRecords contain an integral number of words + + while ( size-- ) + { + if ( *pos++ != 0xFFFF ) // assume test fails, and increment pos + break; + } + + --pos; // whoa! backup + + if (*pos != 0xFFFF) // hey, we got one! + break; + + nodeNumber += mapSize << 3; // covert to number of bits (nodes) + } + + ///////////////////////// Find Free Bit in Word ///////////////////////////// + + freeWord = SWAP_BE16 (*pos); + bitOffset = 15; + mask = 0x8000; + + do { + if ( (freeWord & mask) == 0) + break; + mask >>= 1; + } while (--bitOffset); + + ////////////////////// Calculate Free Node Number /////////////////////////// + + nodeNumber += ((pos - mapPtr) << 4) + (15 - bitOffset); // (pos-mapPtr) = # of words! + + + ///////////////////////// Check for End of Map ////////////////////////////// + + if (nodeNumber >= btreePtr->totalNodes) + { + err = fsBTFullErr; + goto ErrorExit; + } + + /////////////////////////// Allocate the Node /////////////////////////////// + + *pos |= SWAP_BE16 (mask); // set the map bit for the node + + err = UpdateNode (btreePtr, &node, 0, kLockTransaction); + M_ExitOnError (err); + + --btreePtr->freeNodes; + M_BTreeHeaderDirty(btreePtr); + + /* Account for allocations from node reserve */ + BTUpdateReserve(btreePtr, 1); + + *nodeNum = nodeNumber; + + return noErr; + + ////////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &node); + *nodeNum = 0; + + return err; +} + + + +/*------------------------------------------------------------------------------- + + Routine: FreeNode - Clear allocation bit for node. + + Function: Finds the bit representing the node specified by nodeNum in the node + map and clears the bit. + + + Input: btreePtr - pointer to control block for BTree file + nodeNum - number of node to mark free + + Output: none + + Result: noErr - success + fsBTNoMoreMapNodesErr - node number is beyond end of node map + != noErr - GetNode or ReleaseNode encountered some difficulty + -------------------------------------------------------------------------------*/ + +OSStatus FreeNode (BTreeControlBlockPtr btreePtr, u_int32_t nodeNum) +{ + OSStatus err; + BlockDescriptor node; + u_int32_t nodeIndex; + u_int16_t mapSize = 0; + u_int16_t *mapPos = NULL; + u_int16_t bitOffset; + + + //////////////////////////// Find Map Record //////////////////////////////// + nodeIndex = 0; // first node number of header map record + node.buffer = nil; // invalidate node.buffer to get header node + node.blockHeader = nil; + + while (nodeNum >= nodeIndex) + { + err = GetMapNode (btreePtr, &node, &mapPos, &mapSize); + M_ExitOnError (err); + + nodeIndex += mapSize << 3; // covert to number of bits (nodes) + } + + //////////////////////////// Mark Node Free ///////////////////////////////// + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &node); + + nodeNum -= (nodeIndex - (mapSize << 3)); // relative to this map record + bitOffset = 15 - (nodeNum & 0x0000000F); // last 4 bits are bit offset + mapPos += nodeNum >> 4; // point to word containing map bit + + M_SWAP_BE16_ClearBitNum (*mapPos, bitOffset); // clear it + + err = UpdateNode (btreePtr, &node, 0, kLockTransaction); + M_ExitOnError (err); + + ++btreePtr->freeNodes; + M_BTreeHeaderDirty(btreePtr); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, &node); + + return err; +} + + + +/*------------------------------------------------------------------------------- + + Routine: ExtendBTree - Call FSAgent to extend file, and allocate necessary map nodes. + + Function: This routine calls the the FSAgent to extend the end of fork, if necessary, + to accomodate the number of nodes requested. It then allocates as many + map nodes as are necessary to account for all the nodes in the B*Tree. + If newTotalNodes is less than the current number of nodes, no action is + taken. + + Note: Internal HFS File Manager BTree Module counts on an integral number of + long words in map records, although they are not long word aligned. + + Input: btreePtr - pointer to control block for BTree file + newTotalNodes - total number of nodes the B*Tree is to extended to + + Output: none + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus ExtendBTree (BTreeControlBlockPtr btreePtr, + u_int32_t newTotalNodes ) +{ + OSStatus err; + FCB *filePtr; + FSSize minEOF, maxEOF; + u_int16_t nodeSize; + u_int32_t oldTotalNodes; + u_int32_t newMapNodes; + u_int32_t mapBits, totalMapBits; + u_int32_t recStartBit; + u_int32_t nodeNum, nextNodeNum; + u_int32_t firstNewMapNodeNum, lastNewMapNodeNum; + BlockDescriptor mapNode, newNode; + u_int16_t *mapPos; + u_int16_t *mapStart; + u_int16_t mapSize; + u_int16_t mapNodeRecSize; + u_int32_t bitInWord, bitInRecord; + u_int16_t mapIndex; + + + oldTotalNodes = btreePtr->totalNodes; + if (newTotalNodes <= oldTotalNodes) // we're done! + return noErr; + + nodeSize = btreePtr->nodeSize; + filePtr = GetFileControlBlock(btreePtr->fileRefNum); + + mapNode.buffer = nil; + mapNode.blockHeader = nil; + newNode.buffer = nil; + newNode.blockHeader = nil; + + mapNodeRecSize = nodeSize - sizeof(BTNodeDescriptor) - 6; // 2 bytes of free space (see note) + + + //////////////////////// Count Bits In Node Map ///////////////////////////// + + totalMapBits = 0; + do { + err = GetMapNode (btreePtr, &mapNode, &mapStart, &mapSize); + M_ExitOnError (err); + + mapBits = mapSize << 3; // mapSize (in bytes) * 8 + recStartBit = totalMapBits; // bit number of first bit in map record + totalMapBits += mapBits; + + } while ( ((BTNodeDescriptor*)mapNode.buffer)->fLink != 0 ); + +#if DEBUG + if (totalMapBits != CalcMapBits (btreePtr)) + LFHFS_LOG(LEVEL_ERROR, "ExtendBTree: totalMapBits != CalcMapBits"); +#endif + + /////////////////////// Extend LEOF If Necessary //////////////////////////// + + minEOF = (u_int64_t)newTotalNodes * (u_int64_t)nodeSize; + if ( (u_int64_t)filePtr->fcbEOF < minEOF ) + { + maxEOF = (u_int64_t)0x7fffffffLL * (u_int64_t)nodeSize; + + err = btreePtr->setEndOfForkProc (btreePtr->fileRefNum, minEOF, maxEOF); + M_ExitOnError (err); + } + + + //////////////////// Calc New Total Number Of Nodes ///////////////////////// + + newTotalNodes = (uint32_t)(filePtr->fcbEOF / nodeSize); // hack! + // do we wish to perform any verification of newTotalNodes at this point? + + btreePtr->totalNodes = newTotalNodes; // do we need to update freeNodes here too? + + + ////////////// Calculate Number Of New Map Nodes Required /////////////////// + + newMapNodes = 0; + if (newTotalNodes > totalMapBits) + { + newMapNodes = (((newTotalNodes - totalMapBits) >> 3) / mapNodeRecSize) + 1; + firstNewMapNodeNum = oldTotalNodes; + lastNewMapNodeNum = firstNewMapNodeNum + newMapNodes - 1; + } + else + { + err = ReleaseNode (btreePtr, &mapNode); + M_ExitOnError (err); + + goto Success; + } + + + /////////////////////// Initialize New Map Nodes //////////////////////////// + // XXXdbg - this is the correct place for this: + ModifyBlockStart(btreePtr->fileRefNum, &mapNode); + + ((BTNodeDescriptor*)mapNode.buffer)->fLink = firstNewMapNodeNum; + + nodeNum = firstNewMapNodeNum; + while (true) + { + err = GetNewNode (btreePtr, nodeNum, &newNode); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &newNode); + + ((NodeDescPtr)newNode.buffer)->numRecords = 1; + ((NodeDescPtr)newNode.buffer)->kind = kBTMapNode; + + // set free space offset + *(u_int16_t *)((Ptr)newNode.buffer + nodeSize - 4) = nodeSize - 6; + + if (nodeNum++ == lastNewMapNodeNum) + break; + + ((BTNodeDescriptor*)newNode.buffer)->fLink = nodeNum; // point to next map node + + err = UpdateNode (btreePtr, &newNode, 0, kLockTransaction); + M_ExitOnError (err); + } + + err = UpdateNode (btreePtr, &newNode, 0, kLockTransaction); + M_ExitOnError (err); + + + ///////////////////// Mark New Map Nodes Allocated ////////////////////////// + + nodeNum = firstNewMapNodeNum; + do { + bitInRecord = nodeNum - recStartBit; + + while (bitInRecord >= mapBits) + { + nextNodeNum = ((NodeDescPtr)mapNode.buffer)->fLink; + if ( nextNodeNum == 0) + { + err = fsBTNoMoreMapNodesErr; + goto ErrorExit; + } + + err = UpdateNode (btreePtr, &mapNode, 0, kLockTransaction); + M_ExitOnError (err); + + err = GetNode (btreePtr, nextNodeNum, 0, &mapNode); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &mapNode); + + mapIndex = 0; + + mapStart = (u_int16_t *) GetRecordAddress (btreePtr, mapNode.buffer, mapIndex); + mapSize = GetRecordSize (btreePtr, mapNode.buffer, mapIndex); + +#if DEBUG + if (mapSize != M_MapRecordSize (btreePtr->nodeSize) ) + { + LFHFS_LOG(LEVEL_ERROR, "ExtendBTree: mapSize != M_MapRecordSize"); + } +#endif + + mapBits = mapSize << 3; // mapSize (in bytes) * 8 + recStartBit = totalMapBits; // bit number of first bit in map record + totalMapBits += mapBits; + + bitInRecord = nodeNum - recStartBit; + } + + mapPos = mapStart + ((nodeNum - recStartBit) >> 4); + bitInWord = 15 - ((nodeNum - recStartBit) & 0x0000000F); + + M_SWAP_BE16_SetBitNum (*mapPos, bitInWord); + + ++nodeNum; + + } while (nodeNum <= lastNewMapNodeNum); + + err = UpdateNode (btreePtr, &mapNode, 0, kLockTransaction); + M_ExitOnError (err); + + + //////////////////////////////// Success //////////////////////////////////// + +Success: + + btreePtr->totalNodes = newTotalNodes; + btreePtr->freeNodes += (newTotalNodes - oldTotalNodes) - newMapNodes; + + M_BTreeHeaderDirty(btreePtr); + + /* Force the b-tree header changes to disk */ + (void) UpdateHeader (btreePtr, true); + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &mapNode); + (void) ReleaseNode (btreePtr, &newNode); + + return err; +} + + + +/*------------------------------------------------------------------------------- + + Routine: GetMapNode - Get the next map node and pointer to the map record. + + Function: Given a BlockDescriptor to a map node in nodePtr, GetMapNode releases + it and gets the next node. If nodePtr->buffer is nil, then the header + node is retrieved. + + + Input: btreePtr - pointer to control block for BTree file + nodePtr - pointer to a BlockDescriptor of a map node + + Output: nodePtr - pointer to the BlockDescriptor for the next map node + mapPtr - pointer to the map record within the map node + mapSize - number of bytes in the map record + + Result: noErr - success + fsBTNoMoreMapNodesErr - we've run out of map nodes + fsBTInvalidNodeErr - bad node, or not node type kMapNode + != noErr - failure + -------------------------------------------------------------------------------*/ + +static +OSStatus GetMapNode (BTreeControlBlockPtr btreePtr, + BlockDescriptor *nodePtr, + u_int16_t **mapPtr, + u_int16_t *mapSize ) +{ + OSStatus err; + u_int16_t mapIndex; + u_int32_t nextNodeNum; + + if (nodePtr->buffer != nil) // if iterator is valid... + { + nextNodeNum = ((NodeDescPtr)nodePtr->buffer)->fLink; + if (nextNodeNum == 0) + { + err = fsBTNoMoreMapNodesErr; + goto ErrorExit; + } + + err = ReleaseNode (btreePtr, nodePtr); + M_ExitOnError (err); + + err = GetNode (btreePtr, nextNodeNum, 0, nodePtr); + M_ExitOnError (err); + + if ( ((NodeDescPtr)nodePtr->buffer)->kind != kBTMapNode) + { + err = fsBTBadNodeType; + goto ErrorExit; + } + + ++btreePtr->numMapNodesRead; + mapIndex = 0; + } else { + err = GetNode (btreePtr, kHeaderNodeNum, 0, nodePtr); + M_ExitOnError (err); + + if ( ((NodeDescPtr)nodePtr->buffer)->kind != kBTHeaderNode) + { + err = fsBTInvalidHeaderErr; //or fsBTBadNodeType + goto ErrorExit; + } + + mapIndex = 2; + } + + + *mapPtr = (u_int16_t *) GetRecordAddress (btreePtr, nodePtr->buffer, mapIndex); + *mapSize = GetRecordSize (btreePtr, nodePtr->buffer, mapIndex); + + return noErr; + + +ErrorExit: + + (void) ReleaseNode (btreePtr, nodePtr); + + *mapPtr = nil; + *mapSize = 0; + + return err; +} + + + +////////////////////////////////// CalcMapBits ////////////////////////////////// + +u_int32_t CalcMapBits (BTreeControlBlockPtr btreePtr) +{ + u_int32_t mapBits; + + mapBits = (u_int32_t)(M_HeaderMapRecordSize (btreePtr->nodeSize) << 3); + + while (mapBits < btreePtr->totalNodes) + mapBits += M_MapRecordSize (btreePtr->nodeSize) << 3; + + return mapBits; +} + +/*------------------------------------------------------------------------------- + Routine: BTZeroUnusedNodes + + Function: Write zeros to all nodes in the B-tree that are not currently in use. + -------------------------------------------------------------------------------*/ +int +BTZeroUnusedNodes(FCB *filePtr) +{ + int err=0; + u_int16_t *mapPtr, *pos; + u_int16_t mapSize, size; + u_int16_t mask; + u_int16_t bitNumber; + u_int16_t word; + + vnode_t vp = FTOV(filePtr); + BTreeControlBlockPtr btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + GenericLFBufPtr bp = NULL; + u_int32_t nodeNumber = 0; + BlockDescriptor mapNode = {0}; + mapNode.buffer = nil; + mapNode.blockHeader = nil; + + /* Iterate over map nodes. */ + while (true) + { + err = GetMapNode (btreePtr, &mapNode, &mapPtr, &mapSize); + if (err) + { + err = MacToVFSError(err); + goto ErrorExit; + } + + pos = mapPtr; + size = mapSize; + size >>= 1; /* convert to number of 16-bit words */ + + /* Iterate over 16-bit words in the map record. */ + while (size--) + { + if (*pos != 0xFFFF) /* Anything free in this word? */ + { + word = SWAP_BE16(*pos); + + /* Iterate over bits in the word. */ + for (bitNumber = 0, mask = 0x8000; + bitNumber < 16; + ++bitNumber, mask >>= 1) + { + if (word & mask) + continue; /* This node is in use. */ + + if (nodeNumber + bitNumber >= btreePtr->totalNodes) + { + /* We've processed all of the nodes. */ + goto done; + } + + /* + * Get a buffer full of zeros and write it to the unused + * node. Since we'll probably be writing a lot of nodes, + * bypass the journal (to avoid a transaction that's too + * big). Instead, this behaves more like clearing out + * nodes when extending a B-tree (eg., ClearBTNodes). + */ + bp = lf_hfs_generic_buf_allocate(vp, nodeNumber + bitNumber, btreePtr->nodeSize, 0); // buf_getblk(vp, nodeNumber + bitNumber, btreePtr->nodeSize, 0, 0, BLK_META); + if (bp == NULL) + { + LFHFS_LOG(LEVEL_ERROR , "BTZeroUnusedNodes: unable to read node %u\n", nodeNumber + bitNumber); + err = EIO; + goto ErrorExit; + } + + if (bp->uCacheFlags & GEN_BUF_WRITE_LOCK) { + /* + * This node is already part of a transaction and will be written when + * the transaction is committed, so don't write it here. If we did, then + * we'd hit a panic in hfs_vnop_bwrite because the B_LOCKED bit is still set. + */ + lf_hfs_generic_buf_release(bp); + continue; + } + + lf_hfs_generic_buf_clear(bp); + + err = lf_hfs_generic_buf_write(bp); + if (err) { + goto ErrorExit; + } + + lf_hfs_generic_buf_release(bp); + } + } + + /* Go to the next word in the bitmap */ + ++pos; + nodeNumber += 16; + } + } + +ErrorExit: +done: + (void) ReleaseNode(btreePtr, &mapNode); + + return err; +} + diff --git a/livefiles_hfs_plugin/lf_hfs_btree_misc_ops.c b/livefiles_hfs_plugin/lf_hfs_btree_misc_ops.c new file mode 100644 index 0000000..cd2d1a3 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btree_misc_ops.c @@ -0,0 +1,568 @@ +// +// lf_hfs_btree_misc_ops.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include "lf_hfs_btrees_private.h" +#include "lf_hfs_btrees_io.h" +#include "lf_hfs_utils.h" + +////////////////////////////// Routine Definitions ////////////////////////////// + +/*------------------------------------------------------------------------------- + Routine: CalcKeyRecordSize - Return size of combined key/record structure. + + Function: Rounds keySize and recSize so they will end on word boundaries. + Does NOT add size of offset. + + Input: keySize - length of key (including length field) + recSize - length of record data + + Output: none + + Result: u_int16_t - size of combined key/record that will be inserted in btree + -------------------------------------------------------------------------------*/ + +u_int16_t CalcKeyRecordSize (u_int16_t keySize, + u_int16_t recSize ) +{ + if ( M_IsOdd (keySize) ) keySize += 1; // pad byte + + if (M_IsOdd (recSize) ) recSize += 1; // pad byte + + return (keySize + recSize); +} + + + +/*------------------------------------------------------------------------------- + Routine: VerifyHeader - Validate fields of the BTree header record. + + Function: Examines the fields of the BTree header record to determine if the + fork appears to contain a valid BTree. + + Input: forkPtr - pointer to fork control block + header - pointer to BTree header + + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus VerifyHeader (FCB *filePtr, + BTHeaderRec *header ) +{ + u_int64_t forkSize; + u_int32_t totalNodes; + + + switch (header->nodeSize) // node size == 512*2^n + { + case 512: + case 1024: + case 2048: + case 4096: + case 8192: + case 16384: + case 32768: break; + default: return fsBTInvalidHeaderErr; //E_BadNodeType + } + + totalNodes = header->totalNodes; + + forkSize = (u_int64_t)totalNodes * (u_int64_t)header->nodeSize; + + if ( forkSize > (u_int64_t)filePtr->fcbEOF ) + return fsBTInvalidHeaderErr; + + if ( header->freeNodes >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->rootNode >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->firstLeafNode >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->lastLeafNode >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->treeDepth > kMaxTreeDepth ) + return fsBTInvalidHeaderErr; + + + /////////////////////////// Check BTree Type //////////////////////////////// + + switch (header->btreeType) + { + case 0: // HFS Type - no Key Descriptor + case kUserBTreeType: // with Key Descriptors etc. + case kReservedBTreeType: // Desktop Mgr BTree ? + break; + + default: return fsBTUnknownVersionErr; + } + + return noErr; +} + + + +OSStatus TreeIsDirty(BTreeControlBlockPtr btreePtr) +{ + return (btreePtr->flags & kBTHeaderDirty); +} + + + +/*------------------------------------------------------------------------------- + Routine: UpdateHeader - Write BTreeInfoRec fields to Header node. + + Function: Checks the kBTHeaderDirty flag in the BTreeInfoRec and updates the + header node if necessary. + + Input: btreePtr - pointer to BTreeInfoRec + + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus UpdateHeader(BTreeControlBlockPtr btreePtr, Boolean forceWrite) +{ + OSStatus err; + BlockDescriptor node; + BTHeaderRec *header; + u_int32_t options; + + if ((btreePtr->flags & kBTHeaderDirty) == 0) // btree info already flushed + return noErr; + + err = GetNode (btreePtr, kHeaderNodeNum, 0, &node ); + if (err != noErr) { + return err; + } + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &node); + + header = (BTHeaderRec*) ((char *)node.buffer + sizeof(BTNodeDescriptor)); + + header->treeDepth = btreePtr->treeDepth; + header->rootNode = btreePtr->rootNode; + header->leafRecords = btreePtr->leafRecords; + header->firstLeafNode = btreePtr->firstLeafNode; + header->lastLeafNode = btreePtr->lastLeafNode; + header->nodeSize = btreePtr->nodeSize; // this shouldn't change + header->maxKeyLength = btreePtr->maxKeyLength; // neither should this + header->totalNodes = btreePtr->totalNodes; + header->freeNodes = btreePtr->freeNodes; + header->btreeType = btreePtr->btreeType; + + // ignore header->clumpSize; // rename this field? + + if (forceWrite) + options = kForceWriteBlock; + else + options = kLockTransaction; + + err = UpdateNode (btreePtr, &node, 0, options); + + btreePtr->flags &= (~kBTHeaderDirty); + + return err; +} + + + +/*------------------------------------------------------------------------------- + Routine: FindIteratorPosition - One_line_description. + + Function: Brief_description_of_the_function_and_any_side_effects + + Algorithm: see FSC.BT.BTIterateRecord.PICT + + Note: // document side-effects of bad node hints + + Input: btreePtr - description + iterator - description + + + Output: iterator - description + left - description + middle - description + right - description + nodeNum - description + returnIndex - description + foundRecord - description + + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus FindIteratorPosition (BTreeControlBlockPtr btreePtr, + BTreeIteratorPtr iterator, + BlockDescriptor *left, + BlockDescriptor *middle, + BlockDescriptor *right, + u_int32_t *returnNodeNum, + u_int16_t *returnIndex, + Boolean *foundRecord ) +{ + OSStatus err; + Boolean foundIt; + u_int32_t nodeNum; + u_int16_t leftIndex, index, rightIndex; + Boolean validHint; + + // assume btreePtr valid + // assume left, middle, right point to BlockDescriptors + // assume nodeNum points to u_int32_t + // assume index points to u_int16_t + // assume foundRecord points to Boolean + + left->buffer = nil; + left->blockHeader = nil; + middle->buffer = nil; + middle->blockHeader = nil; + right->buffer = nil; + right->blockHeader = nil; + + foundIt = false; + + if (iterator == nil) // do we have an iterator? + { + err = fsBTInvalidIteratorErr; + goto ErrorExit; + } + + err = IsItAHint (btreePtr, iterator, &validHint); + M_ExitOnError (err); + + nodeNum = iterator->hint.nodeNum; + if (! validHint) // does the hint appear to be valid? + { + goto SearchTheTree; + } + + err = GetNode (btreePtr, nodeNum, kGetNodeHint, middle); + if( err == fsBTInvalidNodeErr ) // returned if nodeNum is out of range + goto SearchTheTree; + + M_ExitOnError (err); + + if ( ((NodeDescPtr) middle->buffer)->kind != kBTLeafNode || + ((NodeDescPtr) middle->buffer)->numRecords <= 0 ) + { + goto SearchTheTree; + } + + foundIt = SearchNode (btreePtr, middle->buffer, &iterator->key, &index); + if (foundIt == true) + { + ++btreePtr->numValidHints; + goto SuccessfulExit; + } + iterator->hint.nodeNum = 0; + + if (index == 0) + { + if (((NodeDescPtr) middle->buffer)->bLink == 0) // before 1st btree record + { + goto SuccessfulExit; + } + + nodeNum = ((NodeDescPtr) middle->buffer)->bLink; + + // BTree nodes are always grabbed in left to right order. + // Therefore release the current node before looking up the + // left node. + err = ReleaseNode(btreePtr, middle); + M_ExitOnError(err); + + // Look up the left node + err = GetNode (btreePtr, nodeNum, 0, left); + M_ExitOnError (err); + + // Look up the current node again + err = GetRightSiblingNode (btreePtr, left->buffer, middle); + M_ExitOnError (err); + + if ( ((NodeDescPtr) left->buffer)->kind != kBTLeafNode || + ((NodeDescPtr) left->buffer)->numRecords <= 0 ) + { + goto SearchTheTree; + } + + foundIt = SearchNode (btreePtr, left->buffer, &iterator->key, &leftIndex); + if (foundIt == true) + { + *right = *middle; + *middle = *left; + left->buffer = nil; + index = leftIndex; + + goto SuccessfulExit; + } + + if (leftIndex == 0) // we're lost! + { + goto SearchTheTree; + } + else if (leftIndex >= ((NodeDescPtr) left->buffer)->numRecords) + { + nodeNum = ((NodeDescPtr) left->buffer)->fLink; + if (index != 0) + { + LFHFS_LOG(LEVEL_ERROR, "FindIteratorPosition: index != 0\n"); + hfs_assert(0); + } + goto SuccessfulExit; + } + else + { + *right = *middle; + *middle = *left; + left->buffer = nil; + index = leftIndex; + + goto SuccessfulExit; + } + } + else if (index >= ((NodeDescPtr) middle->buffer)->numRecords) + { + if (((NodeDescPtr) middle->buffer)->fLink == 0) // beyond last record + { + goto SuccessfulExit; + } + + nodeNum = ((NodeDescPtr) middle->buffer)->fLink; + + err = GetRightSiblingNode (btreePtr, middle->buffer, right); + M_ExitOnError (err); + + if ( ((NodeDescPtr) right->buffer)->kind != kBTLeafNode || + ((NodeDescPtr) right->buffer)->numRecords <= 0 ) + { + goto SearchTheTree; + } + + foundIt = SearchNode (btreePtr, right->buffer, &iterator->key, &rightIndex); + if (rightIndex >= ((NodeDescPtr) right->buffer)->numRecords) // we're lost + { + goto SearchTheTree; + } + else // we found it, or rightIndex==0, or rightIndexbuffer = nil; + index = rightIndex; + + goto SuccessfulExit; + } + } + + + //////////////////////////// Search The Tree //////////////////////////////// + +SearchTheTree: + { + TreePathTable treePathTable; // so we only use stack space if we need to + + err = ReleaseNode (btreePtr, left); M_ExitOnError (err); + err = ReleaseNode (btreePtr, middle); M_ExitOnError (err); + err = ReleaseNode (btreePtr, right); M_ExitOnError (err); + + err = SearchTree ( btreePtr, &iterator->key, treePathTable, &nodeNum, middle, &index); + switch (err) // separate find condition from exceptions + { + case noErr: foundIt = true; break; + case fsBTRecordNotFoundErr: break; + default: goto ErrorExit; + } + } + + /////////////////////////////// Success! //////////////////////////////////// + +SuccessfulExit: + + *returnNodeNum = nodeNum; + *returnIndex = index; + *foundRecord = foundIt; + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, left); + (void) ReleaseNode (btreePtr, middle); + (void) ReleaseNode (btreePtr, right); + + *returnNodeNum = 0; + *returnIndex = 0; + *foundRecord = false; + + return err; +} + + + +/////////////////////////////// CheckInsertParams /////////////////////////////// + +OSStatus CheckInsertParams (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + u_int16_t recordLen ) +{ + BTreeControlBlockPtr btreePtr; + + if (filePtr == nil) return paramErr; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) return fsBTInvalidFileErr; + if (iterator == nil) return paramErr; + if (record == nil) return paramErr; + + // check total key/record size limit + if ( CalcKeyRecordSize (CalcKeySize(btreePtr, &iterator->key), recordLen) > (btreePtr->nodeSize >> 1)) + return fsBTRecordTooLargeErr; + + return noErr; +} + + + +/*------------------------------------------------------------------------------- + Routine: TrySimpleReplace - Attempts a simple insert, set, or replace. + + Function: If a hint exitst for the iterator, attempt to find the key in the hint + node. If the key is found, an insert operation fails. If the is not + found, a replace operation fails. If the key was not found, and the + insert position is greater than 0 and less than numRecords, the record + is inserted, provided there is enough freeSpace. If the key was found, + and there is more freeSpace than the difference between the new record + and the old record, the old record is deleted and the new record is + inserted. + + Assumptions: iterator key has already been checked by CheckKey + + + Input: btreePtr - description + iterator - description + record - description + recordLen - description + operation - description + + + Output: recordInserted - description + + + Result: noErr - success + E_RecordExits - insert operation failure + != noErr - GetNode, ReleaseNode, UpdateNode returned an error + -------------------------------------------------------------------------------*/ + +OSStatus TrySimpleReplace (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + u_int16_t recordLen, + Boolean *recordInserted ) +{ + u_int32_t oldSpace; + u_int32_t spaceNeeded; + u_int16_t index; + u_int16_t keySize; + Boolean foundIt; + Boolean didItFit; + + + *recordInserted = false; // we'll assume this won't work... + + if ( nodePtr->kind != kBTLeafNode ) + return noErr; // we're in the weeds! + + foundIt = SearchNode (btreePtr, nodePtr, &iterator->key, &index); + + if ( foundIt == false ) + return noErr; // we might be lost... + + keySize = CalcKeySize(btreePtr, &iterator->key); // includes length field + + spaceNeeded = CalcKeyRecordSize (keySize, recordLen); + + oldSpace = GetRecordSize (btreePtr, nodePtr, index); + + if ( spaceNeeded == oldSpace ) + { + u_int8_t * dst; + + dst = GetRecordAddress (btreePtr, nodePtr, index); + + if ( M_IsOdd (keySize) ) + ++keySize; // add pad byte + + dst += keySize; // skip over key to point at record + + BlockMoveData(record->bufferAddress, dst, recordLen); // blast away... + + *recordInserted = true; + } + else if ( (GetNodeFreeSize(btreePtr, nodePtr) + oldSpace) >= spaceNeeded) + { + DeleteRecord (btreePtr, nodePtr, index); + + didItFit = InsertKeyRecord (btreePtr, nodePtr, index, + &iterator->key, KeyLength(btreePtr, &iterator->key), + record->bufferAddress, recordLen); + if (didItFit == false) + { + LFHFS_LOG(LEVEL_ERROR, "TrySimpleInsert: InsertKeyRecord returned false!"); + hfs_assert(0); + } + *recordInserted = true; + } + // else not enough space... + + return noErr; +} + + +/*------------------------------------------------------------------------------- + Routine: IsItAHint - checks the hint within a BTreeInterator. + + Function: checks the hint within a BTreeInterator. If it is non-zero, it may + possibly be valid. + + Input: btreePtr - pointer to control block for BTree file + iterator - pointer to BTreeIterator + + Output: answer - true if the hint looks reasonable + - false if the hint is 0 + + Result: noErr - success + -------------------------------------------------------------------------------*/ + + +OSStatus IsItAHint (BTreeControlBlockPtr btreePtr, BTreeIterator *iterator, Boolean *answer) +{ + ++btreePtr->numHintChecks; + if (iterator->hint.nodeNum == 0) + { + *answer = false; + } + else + { + *answer = true; + ++btreePtr->numPossibleHints; + } + + return noErr; +} diff --git a/livefiles_hfs_plugin/lf_hfs_btree_node_ops.c b/livefiles_hfs_plugin/lf_hfs_btree_node_ops.c new file mode 100644 index 0000000..e0df514 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btree_node_ops.c @@ -0,0 +1,941 @@ +// +// lf_hfs_btree_node_ops.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include "lf_hfs_btrees_private.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_generic_buf.h" + +///////////////////////// BTree Module Node Operations ////////////////////////// +// +// GetNode - Call FS Agent to get node +// GetNewNode - Call FS Agent to get a new node +// ReleaseNode - Call FS Agent to release node obtained by GetNode. +// UpdateNode - Mark a node as dirty and call FS Agent to release it. +// +// ClearNode - Clear a node to all zeroes. +// +// InsertRecord - Inserts a record into a BTree node. +// InsertKeyRecord - Inserts a key and record pair into a BTree node. +// DeleteRecord - Deletes a record from a BTree node. +// +// SearchNode - Return index for record that matches key. +// LocateRecord - Return pointer to key and data, and size of data. +// +// GetNodeDataSize - Return the amount of space used for data in the node. +// GetNodeFreeSize - Return the amount of free space in the node. +// +// GetRecordOffset - Return the offset for record "index". +// GetRecordAddress - Return address of record "index". +// GetOffsetAddress - Return address of offset for record "index". +// +// InsertOffset - Inserts a new offset into a node. +// DeleteOffset - Deletes an offset from a node. +// +///////////////////////////////////////////////////////////////////////////////// + + + +////////////////////// Routines Internal To BTreeNodeOps.c ////////////////////// + +u_int16_t GetRecordOffset (BTreeControlBlockPtr btree, + NodeDescPtr node, + u_int16_t index ); + +u_int16_t *GetOffsetAddress (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index ); + +void InsertOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index, + u_int16_t delta ); + +void DeleteOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index ); + + +///////////////////////////////////////////////////////////////////////////////// + +#define GetRecordOffset(btreePtr,node,index) (*(short *) ((u_int8_t *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize)) + + +/*------------------------------------------------------------------------------- + + Routine: GetNode - Call FS Agent to get node + + Function: Gets an existing BTree node from FS Agent and verifies it. + + Input: btreePtr - pointer to BTree control block + nodeNum - number of node to request + + Output: nodePtr - pointer to beginning of node (nil if error) + + Result: + noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus GetNode (BTreeControlBlockPtr btreePtr, + u_int32_t nodeNum, + u_int32_t flags, + NodeRec *nodePtr ) +{ + OSStatus err; + GetBlockProcPtr getNodeProc; + u_int32_t options; + + + // is nodeNum within proper range? + if( nodeNum >= btreePtr->totalNodes ) + { + LFHFS_LOG(LEVEL_ERROR, "GetNode:nodeNum [%u] >= totalNodes [%u]",nodeNum, btreePtr->totalNodes); + err = fsBTInvalidNodeErr; + goto ErrorExit; + } + + nodePtr->blockSize = btreePtr->nodeSize; // indicate the size of a node + + options = kGetBlock; + if ( flags & kGetNodeHint ) + { + options |= kGetBlockHint; + } + + getNodeProc = btreePtr->getBlockProc; + err = getNodeProc (btreePtr->fileRefNum, + nodeNum, + options, + nodePtr ); + + if (err != noErr) + { + LFHFS_LOG(LEVEL_ERROR, "GetNode: getNodeProc returned error."); + goto ErrorExit; + } + ++btreePtr->numGetNodes; + + return noErr; + +ErrorExit: + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + + return err; +} + + + +/*------------------------------------------------------------------------------- + + Routine: GetNewNode - Call FS Agent to get a new node + + Function: Gets a new BTree node from FS Agent and initializes it to an empty + state. + + Input: btreePtr - pointer to BTree control block + nodeNum - number of node to request + + Output: returnNodePtr - pointer to beginning of node (nil if error) + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus GetNewNode (BTreeControlBlockPtr btreePtr, + u_int32_t nodeNum, + NodeRec *returnNodePtr ) +{ + OSStatus err; + NodeDescPtr node; + void *pos; + GetBlockProcPtr getNodeProc; + + + //////////////////////// get buffer for new node //////////////////////////// + + returnNodePtr->blockSize = btreePtr->nodeSize; // indicate the size of a node + + getNodeProc = btreePtr->getBlockProc; + err = getNodeProc (btreePtr->fileRefNum, + nodeNum, + kGetBlock+kGetEmptyBlock, + returnNodePtr ); + + if (err != noErr) + { + LFHFS_LOG(LEVEL_ERROR, "GetNewNode: getNodeProc returned error."); + // returnNodePtr->buffer = nil; + return err; + } + ++btreePtr->numGetNewNodes; + + + ////////////////////////// initialize the node ////////////////////////////// + + node = returnNodePtr->buffer; + + GenericLFBuf *psBuf = returnNodePtr->blockHeader; + ClearNode (btreePtr, node); // clear the node + lf_hfs_generic_buf_set_cache_flag(psBuf, GEN_BUF_LITTLE_ENDIAN); + + pos = (char *)node + btreePtr->nodeSize - 2; // find address of last offset + *(u_int16_t *)pos = sizeof (BTNodeDescriptor); // set offset to beginning of free space + + + return noErr; +} + + + +/*------------------------------------------------------------------------------- + + Routine: ReleaseNode - Call FS Agent to release node obtained by GetNode. + + Function: Informs the FS Agent that a BTree node may be released. + + Input: btreePtr - pointer to BTree control block + nodeNum - number of node to release + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus ReleaseNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ) +{ + OSStatus err; + ReleaseBlockProcPtr releaseNodeProc; + + + err = noErr; + + if (nodePtr->buffer != nil) + { + releaseNodeProc = btreePtr->releaseBlockProc; + err = releaseNodeProc (btreePtr->fileRefNum, + nodePtr, + kReleaseBlock ); + if (err) + { + LFHFS_LOG(LEVEL_ERROR, "ReleaseNode: releaseNodeProc returned error."); + hfs_assert(0); + } + ++btreePtr->numReleaseNodes; + } + + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + + return err; +} + + + + +/*------------------------------------------------------------------------------- + + Routine: TrashNode - Call FS Agent to release node obtained by GetNode, and + not store it...mark it as bad. + + Function: Informs the FS Agent that a BTree node may be released and thrown away. + + Input: btreePtr - pointer to BTree control block + nodeNum - number of node to release + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus TrashNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ) +{ + OSStatus err; + ReleaseBlockProcPtr releaseNodeProc; + + + err = noErr; + + if (nodePtr->buffer != nil) + { + releaseNodeProc = btreePtr->releaseBlockProc; + err = releaseNodeProc (btreePtr->fileRefNum, + nodePtr, + kReleaseBlock | kTrashBlock ); + if (err) + { + LFHFS_LOG(LEVEL_ERROR, "TrashNode: releaseNodeProc returned error."); + hfs_assert(0); + } + ++btreePtr->numReleaseNodes; + } + + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + + return err; +} + + + +/*------------------------------------------------------------------------------- + + Routine: UpdateNode - Mark a node as dirty and call FS Agent to release it. + + Function: Marks a BTree node dirty and informs the FS Agent that it may be released. + + Input: btreePtr - pointer to BTree control block + nodeNum - number of node to release + transactionID - ID of transaction this node update is a part of + flags - special flags to pass to ReleaseNodeProc + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus UpdateNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr, + u_int32_t transactionID, + u_int32_t flags ) +{ +#pragma unused(transactionID) + + OSStatus err; + ReleaseBlockProcPtr releaseNodeProc; + + + err = noErr; + + if (nodePtr->buffer != nil) // Why call UpdateNode if nil ?!? + { + releaseNodeProc = btreePtr->releaseBlockProc; + err = releaseNodeProc (btreePtr->fileRefNum, + nodePtr, + flags | kMarkBlockDirty ); + ++btreePtr->numUpdateNodes; + } + + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + + return err; +} + +/*------------------------------------------------------------------------------- + + Routine: ClearNode - Clear a node to all zeroes. + + Function: Writes zeroes from beginning of node for nodeSize bytes. + + Input: btreePtr - pointer to BTree control block + node - pointer to node to clear + + Result: none + -------------------------------------------------------------------------------*/ + +void ClearNode (BTreeControlBlockPtr btreePtr, NodeDescPtr node ) +{ + ClearMemory( node, btreePtr->nodeSize ); +} + +/*------------------------------------------------------------------------------- + + Routine: InsertRecord - Inserts a record into a BTree node. + + Function: + + Note: Record size must be even! + + Input: btreePtr - pointer to BTree control block + node - pointer to node to insert the record + index - position record is to be inserted + recPtr - pointer to record to insert + + Result: noErr - success + fsBTFullErr - record larger than remaining free space. + -------------------------------------------------------------------------------*/ + +Boolean InsertRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index, + RecordPtr recPtr, + u_int16_t recSize ) +{ + u_int16_t freeSpace; + u_int16_t indexOffset; + u_int16_t freeOffset; + u_int16_t bytesToMove; + void *src; + void *dst; + + //// will new record fit in node? + + freeSpace = GetNodeFreeSize (btreePtr, node); + //we could get freeOffset & calc freeSpace + if ( freeSpace < recSize + 2) + { + return false; + } + + + //// make hole for new record + + indexOffset = GetRecordOffset (btreePtr, node, index); + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + src = ((Ptr) node) + indexOffset; + dst = ((Ptr) src) + recSize; + bytesToMove = freeOffset - indexOffset; + if (bytesToMove) + MoveRecordsRight (src, dst, bytesToMove); + + + //// adjust offsets for moved records + + InsertOffset (btreePtr, node, index, recSize); + + + //// move in the new record + + dst = ((Ptr) node) + indexOffset; + MoveRecordsLeft (recPtr, dst, recSize); + + return true; +} + + + +/*------------------------------------------------------------------------------- + + Routine: InsertKeyRecord - Inserts a record into a BTree node. + + Function: + + Note: Record size must be even! + + Input: btreePtr - pointer to BTree control block + node - pointer to node to insert the record + index - position record is to be inserted + keyPtr - pointer to key for record to insert + keyLength - length of key (or maxKeyLength) + recPtr - pointer to record to insert + recSize - number of bytes to copy for record + + Result: noErr - success + fsBTFullErr - record larger than remaining free space. + -------------------------------------------------------------------------------*/ + +Boolean InsertKeyRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index, + KeyPtr keyPtr, + u_int16_t keyLength, + RecordPtr recPtr, + u_int16_t recSize ) +{ + u_int16_t freeSpace; + u_int16_t indexOffset; + u_int16_t freeOffset; + u_int16_t bytesToMove; + u_int8_t * src; + u_int8_t * dst; + u_int16_t keySize; + u_int16_t rawKeyLength; + u_int16_t sizeOfLength; + + //// calculate actual key size + + if ( btreePtr->attributes & kBTBigKeysMask ) + keySize = keyLength + sizeof(u_int16_t); + else + keySize = keyLength + sizeof(u_int8_t); + + if ( M_IsOdd (keySize) ) + ++keySize; // add pad byte + + + //// will new record fit in node? + + freeSpace = GetNodeFreeSize (btreePtr, node); + //we could get freeOffset & calc freeSpace + if ( freeSpace < keySize + recSize + 2) + { + return false; + } + + + //// make hole for new record + + indexOffset = GetRecordOffset (btreePtr, node, index); + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + src = ((u_int8_t *) node) + indexOffset; + dst = ((u_int8_t *) src) + keySize + recSize; + bytesToMove = freeOffset - indexOffset; + if (bytesToMove) + MoveRecordsRight (src, dst, bytesToMove); + + + //// adjust offsets for moved records + + InsertOffset (btreePtr, node, index, keySize + recSize); + + + //// copy record key + + dst = ((u_int8_t *) node) + indexOffset; + + if ( btreePtr->attributes & kBTBigKeysMask ) + { + *((u_int16_t *)dst) = keyLength; // use keyLength rather than key.length + dst = (u_int8_t *) (((u_int16_t *)dst) + 1); + rawKeyLength = keyPtr->length16; + sizeOfLength = 2; + } + else + { + *dst++ = keyLength; // use keyLength rather than key.length + rawKeyLength = keyPtr->length8; + sizeOfLength = 1; + } + + MoveRecordsLeft ( ((u_int8_t *) keyPtr) + sizeOfLength, dst, rawKeyLength); // copy key + + // any pad bytes? + bytesToMove = keySize - rawKeyLength; + if (bytesToMove) { + ClearMemory (dst + rawKeyLength, bytesToMove); // clear pad bytes in index key + } + + + //// copy record data + + dst = ((u_int8_t *) node) + indexOffset + keySize; + MoveRecordsLeft (recPtr, dst, recSize); + + return true; +} + + + +/*------------------------------------------------------------------------------- + + Routine: DeleteRecord - Deletes a record from a BTree node. + + Function: + + Input: btreePtr - pointer to BTree control block + node - pointer to node to insert the record + index - position record is to be inserted + + Result: none + -------------------------------------------------------------------------------*/ + +void DeleteRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index ) +{ + int16_t indexOffset; + int16_t nextOffset; + int16_t freeOffset; + int16_t bytesToMove; + void *src; + void *dst; + + //// compress records + indexOffset = GetRecordOffset (btreePtr, node, index); + nextOffset = GetRecordOffset (btreePtr, node, index + 1); + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + src = ((Ptr) node) + nextOffset; + dst = ((Ptr) node) + indexOffset; + bytesToMove = freeOffset - nextOffset; + if (bytesToMove) + MoveRecordsLeft (src, dst, bytesToMove); + + //// Adjust the offsets + DeleteOffset (btreePtr, node, index); + + /* clear out new free space */ + bytesToMove = nextOffset - indexOffset; + ClearMemory(GetRecordAddress(btreePtr, node, node->numRecords), bytesToMove); + +} + + + +/*------------------------------------------------------------------------------- + + Routine: SearchNode - Return index for record that matches key. + + Function: Returns the record index for the record that matches the search key. + If no record was found that matches the search key, the "insert index" + of where the record should go is returned instead. + + Algorithm: A binary search algorithm is used to find the specified key. + + Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + searchKey - pointer to the key to match + + Output: index - pointer to beginning of key for record + + Result: true - success (index = record index) + false - key did not match anything in node (index = insert index) + -------------------------------------------------------------------------------*/ +Boolean +SearchNode( BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + KeyPtr searchKey, + u_int16_t *returnIndex ) +{ + int32_t lowerBound; + int32_t upperBound; + int32_t index; + int32_t result; + KeyPtr trialKey; + u_int16_t *offset; + KeyCompareProcPtr compareProc = btreePtr->keyCompareProc; + + lowerBound = 0; + upperBound = node->numRecords - 1; + offset = (u_int16_t *) ((u_int8_t *)(node) + (btreePtr)->nodeSize - kOffsetSize); + + while (lowerBound <= upperBound) { + index = (lowerBound + upperBound) >> 1; + + trialKey = (KeyPtr) ((u_int8_t *)node + *(offset - index)); + + result = compareProc(searchKey, trialKey); + + if (result < 0) { + upperBound = index - 1; /* search < trial */ + } else if (result > 0) { + lowerBound = index + 1; /* search > trial */ + } else { + *returnIndex = index; /* search == trial */ + return true; + } + } + + *returnIndex = lowerBound; /* lowerBound is insert index */ + return false; +} + + +/*------------------------------------------------------------------------------- + + Routine: GetRecordByIndex - Return pointer to key and data, and size of data. + + Function: Returns a pointer to beginning of key for record, a pointer to the + beginning of the data for the record, and the size of the record data + (does not include the size of the key). + + Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - index of record to get + + Output: keyPtr - pointer to beginning of key for record + dataPtr - pointer to beginning of data for record + dataSize - size of the data portion of the record + + Result: none + -------------------------------------------------------------------------------*/ + +OSStatus GetRecordByIndex (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index, + KeyPtr *keyPtr, + u_int8_t * *dataPtr, + u_int16_t *dataSize ) +{ + u_int16_t offset; + u_int16_t nextOffset; + u_int16_t keySize; + + // + // Make sure index is valid (in range 0..numRecords-1) + // + if (index >= node->numRecords) + return fsBTRecordNotFoundErr; + + //// find keyPtr + offset = GetRecordOffset (btreePtr, node, index); + *keyPtr = (KeyPtr) ((Ptr)node + offset); + + //// find dataPtr + keySize = CalcKeySize(btreePtr, *keyPtr); + if ( M_IsOdd (keySize) ) + ++keySize; // add pad byte + + offset += keySize; // add the key length to find data offset + *dataPtr = (u_int8_t *) node + offset; + + //// find dataSize + nextOffset = GetRecordOffset (btreePtr, node, index + 1); + *dataSize = nextOffset - offset; + + return noErr; +} + + + +/*------------------------------------------------------------------------------- + + Routine: GetNodeDataSize - Return the amount of space used for data in the node. + + Function: Gets the size of the data currently contained in a node, excluding + the node header. (record data + offset overhead) + + Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + + Result: - number of bytes used for data and offsets in the node. + -------------------------------------------------------------------------------*/ + +u_int16_t GetNodeDataSize (BTreeControlBlockPtr btreePtr, NodeDescPtr node ) +{ + u_int16_t freeOffset; + + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + return freeOffset + (node->numRecords << 1) - sizeof (BTNodeDescriptor); +} + + + +/*------------------------------------------------------------------------------- + + Routine: GetNodeFreeSize - Return the amount of free space in the node. + + Function: + + Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + + Result: - number of bytes of free space in the node. + -------------------------------------------------------------------------------*/ + +u_int16_t GetNodeFreeSize (BTreeControlBlockPtr btreePtr, NodeDescPtr node ) +{ + u_int16_t freeOffset; + + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); //inline? + + return btreePtr->nodeSize - freeOffset - (node->numRecords << 1) - kOffsetSize; +} + + + +/*------------------------------------------------------------------------------- + + Routine: GetRecordOffset - Return the offset for record "index". + + Function: + + Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain offset for + + Result: - offset (in bytes) from beginning of node of record specified by index + -------------------------------------------------------------------------------*/ +// make this a macro (for inlining) +#if 0 +u_int16_t GetRecordOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index ) +{ + void *pos; + + + pos = (u_int8_t *)node + btreePtr->nodeSize - (index << 1) - kOffsetSize; + + return *(short *)pos; +} +#endif + + + +/*------------------------------------------------------------------------------- + + Routine: GetRecordAddress - Return address of record "index". + + Function: + + Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain offset address for + + Result: - pointer to record "index". + -------------------------------------------------------------------------------*/ +// make this a macro (for inlining) +#if 0 +u_int8_t * GetRecordAddress (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index ) +{ + u_int8_t * pos; + + pos = (u_int8_t *)node + GetRecordOffset (btreePtr, node, index); + + return pos; +} +#endif + + + +/*------------------------------------------------------------------------------- + + Routine: GetRecordSize - Return size of record "index". + + Function: + + Note: This does not work on the FreeSpace index! + + Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain record size for + + Result: - size of record "index". + -------------------------------------------------------------------------------*/ + +u_int16_t GetRecordSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index ) +{ + u_int16_t *pos; + + pos = (u_int16_t *) ((Ptr)node + btreePtr->nodeSize - (index << 1) - kOffsetSize); + + return *(pos-1) - *pos; +} + + + +/*------------------------------------------------------------------------------- + Routine: GetOffsetAddress - Return address of offset for record "index". + + Function: + + Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain offset address for + + Result: - pointer to offset for record "index". + -------------------------------------------------------------------------------*/ + +u_int16_t *GetOffsetAddress (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index ) +{ + void *pos; + + pos = (Ptr)node + btreePtr->nodeSize - (index << 1) -2; + + return (u_int16_t *)pos; +} + + + +/*------------------------------------------------------------------------------- + Routine: GetChildNodeNum - Return child node number from index record "index". + + Function: Returns the first u_int32_t stored after the key for record "index". + + Assumes: The node is an Index Node. + The key.length stored at record "index" is ODD. //change for variable length index keys + + Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain child node number from + + Result: - child node number from record "index". + -------------------------------------------------------------------------------*/ + +u_int32_t GetChildNodeNum (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + u_int16_t index ) +{ + u_int8_t * pos; + + pos = GetRecordAddress (btreePtr, nodePtr, index); + pos += CalcKeySize(btreePtr, (BTreeKey *) pos); // key.length + size of length field + + return *(u_int32_t *)pos; +} + + + +/*------------------------------------------------------------------------------- + Routine: InsertOffset - Add an offset and adjust existing offsets by delta. + + Function: Add an offset at 'index' by shifting 'index+1' through the last offset + and adjusting them by 'delta', the size of the record to be inserted. + The number of records contained in the node is also incremented. + + Input: btreePtr - pointer to BTree control block + node - pointer to node + index - index at which to insert record + delta - size of record to be inserted + + Result: none + -------------------------------------------------------------------------------*/ + +void InsertOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index, + u_int16_t delta ) +{ + u_int16_t *src, *dst; + u_int16_t numOffsets; + + src = GetOffsetAddress (btreePtr, node, node->numRecords); // point to free offset + dst = src - 1; // point to new offset + numOffsets = node->numRecords++ - index; // subtract index & postincrement + + do { + *dst++ = *src++ + delta; // to tricky? + } while (numOffsets--); +} + + + +/*------------------------------------------------------------------------------- + + Routine: DeleteOffset - Delete an offset. + + Function: Delete the offset at 'index' by shifting 'index+1' through the last offset + and adjusting them by the size of the record 'index'. + The number of records contained in the node is also decremented. + + Input: btreePtr - pointer to BTree control block + node - pointer to node + index - index at which to delete record + + Result: none + -------------------------------------------------------------------------------*/ + +void DeleteOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index ) +{ + u_int16_t *src, *dst; + u_int16_t numOffsets; + u_int16_t delta; + + dst = GetOffsetAddress (btreePtr, node, index); + src = dst - 1; + delta = *src - *dst; + numOffsets = --node->numRecords - index; // predecrement numRecords & subtract index + + while (numOffsets--) + { + *--dst = *--src - delta; // work our way left + } +} + + diff --git a/livefiles_hfs_plugin/lf_hfs_btree_node_reserve.c b/livefiles_hfs_plugin/lf_hfs_btree_node_reserve.c new file mode 100644 index 0000000..c429697 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btree_node_reserve.c @@ -0,0 +1,313 @@ +// +// lf_hfs_btree_node_reserve.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include +#include "lf_hfs_btrees_private.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_vfsutils.h" + +/* + * B-tree Node Reserve + * + * BTReserveSpace + * BTReleaseReserve + * BTUpdateReserve + * + * Each kernel thread can have it's own reserve of b-tree + * nodes. This reserve info is kept in a hash table. + * + * Don't forget to call BTReleaseReserve when you're finished + * or you will leave stale node reserves in the hash. + */ + + +/* + * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT! + * + * It must remain equal in size to the opaque cat_cookie_t + * struct (in hfs_catalog.h). + */ +struct nreserve { + LIST_ENTRY(nreserve) nr_hash; /* hash chain */ + int nr_nodecnt; /* count of nodes held in reserve */ + int nr_newnodes; /* nodes that were allocated */ + struct vnode *nr_btvp; /* b-tree file vnode */ + void *nr_tag; /* unique tag (per thread) */ +}; + +#define NR_GET_TAG() (pthread_self()) + +#define NR_CACHE 17 + +#define NR_HASH(btvp, tag) \ +(&nr_hashtbl[((((intptr_t)(btvp)) >> 8) ^ ((intptr_t)(tag) >> 4)) & nr_hashmask]) + +LIST_HEAD(nodereserve, nreserve) *nr_hashtbl; + +u_long nr_hashmask; + +pthread_mutex_t nr_mutex; + +/* Internal Node Reserve Hash Routines (private) */ +static void nr_insert (struct vnode *, struct nreserve *nrp, int); +static void nr_delete (struct vnode *, struct nreserve *nrp, int *); +static void nr_update (struct vnode *, int); + + +/* + * BTReserveSetup - initialize the node reserve hash table + */ +void BTReserveSetup(void) +{ + if (sizeof(struct nreserve) != sizeof(cat_cookie_t)) + { + LFHFS_LOG(LEVEL_ERROR,"BTReserveSetup: nreserve size != opaque struct size"); + hfs_assert(0); + } + + nr_hashtbl = hashinit(NR_CACHE, &nr_hashmask); + + lf_lck_mtx_init(&nr_mutex); +} + + +/* + * BTReserveSpace - obtain a node reserve (for current thread) + * + * Used by the Catalog Layer (hfs_catalog.c) to reserve space. + * + * When data is NULL, we only insure that there's enough space + * but it is not reserved (assumes you keep the b-tree lock). + */ +int +BTReserveSpace(FCB *file, int operations, void* data) +{ + BTreeControlBlock *btree; + int rsrvNodes, availNodes, totalNodes; + int height; + int inserts, deletes; + u_int32_t clumpsize; + int err = 0; + + btree = (BTreeControlBlockPtr)file->fcbBTCBPtr; + clumpsize = file->ff_clumpsize; + + REQUIRE_FILE_LOCK(btree->fileRefNum, true); + + /* + * The node reserve is based on the number of b-tree + * operations (insert/deletes) and the height of the + * tree. + */ + height = btree->treeDepth; + if (height < 2) + height = 2; /* prevent underflow in rsrvNodes calculation */ + inserts = operations & 0xffff; + deletes = operations >> 16; + + /* + * Allow for at least one root split. + * + * Each delete operation can propogate a big key up the + * index. This can cause a split at each level up. + * + * Each insert operation can cause a local split and a + * split at each level up. + */ + rsrvNodes = 1 + (deletes * (height - 2)) + (inserts * (height - 1)); + + availNodes = btree->freeNodes - btree->reservedNodes; + + if (rsrvNodes > availNodes) { + u_int32_t reqblks, freeblks, rsrvblks; + uint32_t bt_rsrv; + struct hfsmount *hfsmp; + + /* + * For UNIX conformance, we try and reserve the MIN of either 5% of + * total file blocks or 10MB worth of blocks, for growing existing + * files. On non-HFS filesystems, creating a new directory entry may + * not cause additional disk space to be allocated, but on HFS, creating + * a new entry could cause the b-tree to grow. As a result, we take + * some precautions here to prevent that on configurations that try to + * satisfy conformance. + */ + hfsmp = VTOVCB(btree->fileRefNum); + rsrvblks = (uint32_t)(((u_int64_t)hfsmp->allocLimit * 5) / 100); + if (hfsmp->blockSize > HFS_BT_MAXRESERVE) { + bt_rsrv = 1; + } + else { + bt_rsrv = (HFS_BT_MAXRESERVE / hfsmp->blockSize); + } + rsrvblks = MIN(rsrvblks, bt_rsrv); + + freeblks = hfs_freeblks(hfsmp, 0); + if (freeblks <= rsrvblks) { + /* When running low, disallow adding new items. */ + if ((inserts > 0) && (deletes == 0)) { + return (ENOSPC); + } + freeblks = 0; + } else { + freeblks -= rsrvblks; + } + reqblks = clumpsize / hfsmp->blockSize; + + if (reqblks > freeblks) { + reqblks = ((rsrvNodes - availNodes) * btree->nodeSize) / hfsmp->blockSize; + /* When running low, disallow adding new items. */ + if ((reqblks > freeblks) && (inserts > 0) && (deletes == 0)) { + return (ENOSPC); + } + file->ff_clumpsize = freeblks * hfsmp->blockSize; + } + totalNodes = rsrvNodes + btree->totalNodes - availNodes; + + /* See if we also need a map node */ + if (totalNodes > (int)CalcMapBits(btree)) { + ++totalNodes; + } + if ((err = ExtendBTree(btree, totalNodes))) { + goto out; + } + } + /* Save this reserve if this is a persistent request. */ + if (data) { + btree->reservedNodes += rsrvNodes; + nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes); + } +out: + /* Put clump size back if it was changed. */ + if (file->ff_clumpsize != clumpsize) + file->ff_clumpsize = clumpsize; + + return (err); +} + + +/* + * BTReleaseReserve - release the node reserve held by current thread + * + * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space. + */ +int +BTReleaseReserve(FCB *file, void* data) +{ + BTreeControlBlock *btree; + int nodecnt; + + btree = (BTreeControlBlockPtr)file->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btree->fileRefNum, true); + + nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt); + + if (nodecnt) + btree->reservedNodes -= nodecnt; + + return (0); +} + +/* + * BTUpdateReserve - update a node reserve for allocations that occurred. + */ +void +BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes) +{ + nr_update(btreePtr->fileRefNum, nodes); +} + + +/*----------------------------------------------------------------------------*/ +/* Node Reserve Hash Functions (private) */ + + +int nrinserts = 0; +int nrdeletes = 0; + +/* + * Insert a new node reserve. + */ +static void +nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt) +{ + struct nodereserve *nrhead; + struct nreserve *tmp_nrp; + void * tag = NR_GET_TAG(); + + /* + * Check the cache - there may already be a reserve + */ + lf_lck_mtx_lock(&nr_mutex); + nrhead = NR_HASH(btvp, tag); + for (tmp_nrp = nrhead->lh_first; tmp_nrp; + tmp_nrp = tmp_nrp->nr_hash.le_next) { + if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) { + nrp->nr_tag = 0; + tmp_nrp->nr_nodecnt += nodecnt; + lf_lck_mtx_unlock(&nr_mutex); + return; + } + } + + nrp->nr_nodecnt = nodecnt; + nrp->nr_newnodes = 0; + nrp->nr_btvp = btvp; + nrp->nr_tag = tag; + LIST_INSERT_HEAD(nrhead, nrp, nr_hash); + ++nrinserts; + lf_lck_mtx_unlock(&nr_mutex); +} + +/* + * Delete a node reserve. + */ +static void +nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt) +{ + void * tag = NR_GET_TAG(); + + lf_lck_mtx_lock(&nr_mutex); + if (nrp->nr_tag) { + if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp)) + { + LFHFS_LOG(LEVEL_ERROR,"nr_delete: invalid NR (%p)", nrp); + hfs_assert(0); + } + LIST_REMOVE(nrp, nr_hash); + *nodecnt = nrp->nr_nodecnt; + bzero(nrp, sizeof(struct nreserve)); + ++nrdeletes; + } else { + *nodecnt = 0; + } + lf_lck_mtx_unlock(&nr_mutex); +} + + +/* + * Update a node reserve for any allocations that occurred. + */ +static void +nr_update(struct vnode * btvp, int nodecnt) +{ + struct nodereserve *nrhead; + struct nreserve *nrp; + void* tag = NR_GET_TAG(); + + lf_lck_mtx_lock(&nr_mutex); + + nrhead = NR_HASH(btvp, tag); + for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) { + if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) { + nrp->nr_newnodes += nodecnt; + break; + } + } + lf_lck_mtx_unlock(&nr_mutex); +} diff --git a/livefiles_hfs_plugin/lf_hfs_btree_tree_ops.c b/livefiles_hfs_plugin/lf_hfs_btree_tree_ops.c new file mode 100644 index 0000000..ecbe3fe --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btree_tree_ops.c @@ -0,0 +1,1298 @@ +// +// lf_hfs_btree_tree_ops.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include "lf_hfs_btrees_private.h" +#include "lf_hfs_btrees_io.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_generic_buf.h" +// +/////////////////////// Routines Internal To BTree Module /////////////////////// +// +// SearchTree +// InsertTree +// +////////////////////// Routines Internal To BTreeTreeOps.c ////////////////////// + +static OSStatus AddNewRootNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ); + +static OSStatus CollapseTree (BTreeControlBlockPtr btreePtr, + BlockDescriptor *blockPtr ); + +static OSStatus RotateLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode, + u_int16_t rightInsertIndex, + KeyPtr keyPtr, + u_int8_t * recPtr, + u_int16_t recSize, + u_int16_t *insertIndex, + u_int32_t *insertNodeNum, + Boolean *recordFit, + u_int16_t *recsRotated ); + +static Boolean RotateRecordLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ); + +static OSStatus SplitLeft (BTreeControlBlockPtr btreePtr, + BlockDescriptor *leftNode, + BlockDescriptor *rightNode, + u_int32_t rightNodeNum, + u_int16_t index, + KeyPtr keyPtr, + u_int8_t * recPtr, + u_int16_t recSize, + u_int16_t *insertIndex, + u_int32_t *insertNodeNum, + u_int16_t *recsRotated ); + + + +static OSStatus InsertLevel (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + InsertKey *primaryKey, + InsertKey *secondaryKey, + BlockDescriptor *targetNode, + u_int16_t index, + u_int16_t level, + u_int32_t *insertNode ); + +static OSErr InsertNode (BTreeControlBlockPtr btreePtr, + InsertKey *key, + BlockDescriptor *rightNode, + u_int32_t node, + u_int16_t index, + u_int32_t *newNode, + u_int16_t *newIndex, + BlockDescriptor *leftNode, + Boolean *updateParent, + Boolean *insertParent, + Boolean *rootSplit ); + +static u_int16_t GetKeyLength (const BTreeControlBlock *btreePtr, + const BTreeKey *key, + Boolean forLeafNode ); + + + +//////////////////////// BTree Multi-node Tree Operations /////////////////////// + + +/*------------------------------------------------------------------------------- + + Routine: SearchTree - Search BTree for key and set up Tree Path Table. + + Function: Searches BTree for specified key, setting up the Tree Path Table to + reflect the search path. + + + Input: btreePtr - pointer to control block of BTree to search + keyPtr - pointer to the key to search for + treePathTable - pointer to the tree path table to construct + + Output: nodeNum - number of the node containing the key position + iterator - BTreeIterator specifying record or insert position + + Result: noErr - key found, index is record index + fsBTRecordNotFoundErr - key not found, index is insert index + fsBTEmptyErr - key not found, return params are nil + otherwise - catastrophic failure (GetNode/ReleaseNode failed) + -------------------------------------------------------------------------------*/ + +OSStatus SearchTree (BTreeControlBlockPtr btreePtr, + BTreeKeyPtr searchKey, + TreePathTable treePathTable, + u_int32_t *nodeNum, + BlockDescriptor *nodePtr, + u_int16_t *returnIndex ) +{ + OSStatus err; + int16_t level; // Expected depth of current node + u_int32_t curNodeNum; // Current node we're searching + NodeRec nodeRec; + u_int16_t index; + Boolean keyFound; + int8_t nodeKind; // Kind of current node (index/leaf) + KeyPtr keyPtr; + u_int8_t * dataPtr; + u_int16_t dataSize; + + + curNodeNum = btreePtr->rootNode; + level = btreePtr->treeDepth; + + if (level == 0) // is the tree empty? + { + err = fsBTEmptyErr; + goto ErrorExit; + } + + //for debugging... + treePathTable [0].node = 0; + treePathTable [0].index = 0; + + while (true) + { + // + // [2550929] Node number 0 is the header node. It is never a valid + // index or leaf node. If we're ever asked to search through node 0, + // something has gone wrong (typically a bad child node number, or + // we found a node full of zeroes that we thought was an index node). + // + if (curNodeNum == 0) + { + LFHFS_LOG(LEVEL_ERROR, "SearchTree: curNodeNum is zero!"); + err = btBadNode; + goto ErrorExit; + } + + err = GetNode (btreePtr, curNodeNum, 0, &nodeRec); + if (err != noErr) + { + LFHFS_LOG(LEVEL_ERROR, "SearchTree: GetNode returned with error %d!",err); + goto ErrorExit; + } + + // + // [2550929] Sanity check the node height and node type. We expect + // particular values at each iteration in the search. This checking + // quickly finds bad pointers, loops, and other damage to the + // hierarchy of the B-tree. + // + if (((BTNodeDescriptor*)nodeRec.buffer)->height != level) + { + LFHFS_LOG(LEVEL_ERROR, "Incorrect node height"); + err = btBadNode; + goto ReleaseAndExit; + } + nodeKind = ((BTNodeDescriptor*)nodeRec.buffer)->kind; + if (level == 1) + { + // Nodes at level 1 must be leaves, by definition + if (nodeKind != kBTLeafNode) + { + LFHFS_LOG(LEVEL_ERROR, "Incorrect node type: expected leaf"); + err = btBadNode; + goto ReleaseAndExit; + } + } + else + { + // A node at any other depth must be an index node + if (nodeKind != kBTIndexNode) + { + LFHFS_LOG(LEVEL_ERROR, "Incorrect node type: expected index"); + err = btBadNode; + goto ReleaseAndExit; + } + } + + keyFound = SearchNode (btreePtr, nodeRec.buffer, searchKey, &index); + + treePathTable [level].node = curNodeNum; + + if (nodeKind == kBTLeafNode) + { + treePathTable [level].index = index; + break; // were done... + } + + if ( (keyFound != true) && (index != 0)) + --index; + + treePathTable [level].index = index; + + err = GetRecordByIndex (btreePtr, nodeRec.buffer, index, &keyPtr, &dataPtr, &dataSize); + if (err != noErr) + { + // [2550929] If we got an error, it is probably because the index was bad + // (typically a corrupt node that confused SearchNode). Invalidate the node + // so we won't accidentally use the corrupted contents. NOTE: the Mac OS 9 + // sources call this InvalidateNode. + LFHFS_LOG(LEVEL_ERROR, "SearchTree: GetRecordByIndex returned with errpr %d!",err); + + (void) TrashNode(btreePtr, &nodeRec); + goto ErrorExit; + } + + // Get the child pointer out of this index node. We're now done with the current + // node and can continue the search with the child node. + curNodeNum = *(u_int32_t *)dataPtr; + err = ReleaseNode (btreePtr, &nodeRec); + if (err != noErr) + { + LFHFS_LOG(LEVEL_ERROR, "SearchTree: ReleaseNode returned with errpr %d!",err); + goto ErrorExit; + } + + // The child node should be at a level one less than the parent. + --level; + } + + *nodeNum = curNodeNum; + *nodePtr = nodeRec; + *returnIndex = index; + + if (keyFound) + return noErr; // searchKey found, index identifies record in node + else + return fsBTRecordNotFoundErr; // searchKey not found, index identifies insert point + +ReleaseAndExit: + (void) ReleaseNode(btreePtr, &nodeRec); + // fall into ErrorExit + +ErrorExit: + + *nodeNum = 0; + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + *returnIndex = 0; + return err; +} + +////////////////////////////////// InsertTree /////////////////////////////////// + +OSStatus InsertTree ( BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + KeyPtr keyPtr, + u_int8_t * recPtr, + u_int16_t recSize, + BlockDescriptor *targetNode, + u_int16_t index, + u_int16_t level, + Boolean replacingKey, + u_int32_t *insertNode ) +{ + InsertKey primaryKey; + OSStatus err; + + primaryKey.keyPtr = keyPtr; + primaryKey.keyLength = GetKeyLength(btreePtr, primaryKey.keyPtr, (level == 1)); + primaryKey.recPtr = recPtr; + primaryKey.recSize = recSize; + primaryKey.replacingKey = replacingKey; + primaryKey.skipRotate = false; + + err = InsertLevel (btreePtr, treePathTable, &primaryKey, nil, + targetNode, index, level, insertNode ); + + return err; + +} // End of InsertTree + + +////////////////////////////////// InsertLevel ////////////////////////////////// + +OSStatus InsertLevel (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + InsertKey *primaryKey, + InsertKey *secondaryKey, + BlockDescriptor *targetNode, + u_int16_t index, + u_int16_t level, + u_int32_t *insertNode ) +{ + OSStatus err; + BlockDescriptor leftNode; + u_int32_t targetNodeNum; + u_int32_t newNodeNum; + u_int16_t newIndex; + Boolean insertParent; + Boolean updateParent; + Boolean newRoot; + InsertKey insertKey; + +#if defined(applec) && !defined(__SC__) + if ((level == 1) && (((NodeDescPtr)targetNode->buffer)->kind != kBTLeafNode)) + { + LFHFS_LOG(LEVEL_ERROR, " InsertLevel: non-leaf at level 1! "); + hfs_assert(0); + } +#endif + leftNode.buffer = nil; + leftNode.blockHeader = nil; + targetNodeNum = treePathTable [level].node; + + insertParent = false; + updateParent = false; + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, targetNode); + + ////// process first insert ////// + + err = InsertNode (btreePtr, primaryKey, targetNode, targetNodeNum, index, + &newNodeNum, &newIndex, &leftNode, &updateParent, &insertParent, &newRoot ); + M_ExitOnError (err); + + if ( newRoot ) + { + // Extend the treePathTable by adding an entry for the new + // root node that references the current targetNode. + // + // If inserting the secondaryKey changes the first key of + // the target node, then we'll have to update the second + // key in the new root node. + + treePathTable [level + 1].node = btreePtr->rootNode; + treePathTable [level + 1].index = 1; // 1 since we always split/rotate left + } + + if ( level == 1 ) + *insertNode = newNodeNum; + + ////// process second insert (if any) ////// + + if ( secondaryKey != nil ) + { + Boolean temp; + + err = InsertNode (btreePtr, secondaryKey, targetNode, newNodeNum, newIndex, + &newNodeNum, &newIndex, &leftNode, &updateParent, &insertParent, &temp); + M_ExitOnError (err); + } + + //////////////////////// Update Parent(s) /////////////////////////////// + + if ( insertParent || updateParent ) + { + BlockDescriptor parentNode; + u_int32_t parentNodeNum; + KeyPtr keyPtr; + u_int8_t * recPtr; + u_int16_t recSize; + + parentNode.buffer = nil; + parentNode.blockHeader = nil; + + secondaryKey = nil; + + if (level == btreePtr->treeDepth) + { + LFHFS_LOG(LEVEL_ERROR, " InsertLevel: unfinished insert!?"); + hfs_assert(0); + } + ++level; + + // Get Parent Node data... + index = treePathTable [level].index; + parentNodeNum = treePathTable [level].node; + + if (parentNodeNum == 0) + { + LFHFS_LOG(LEVEL_ERROR, " InsertLevel: parent node is zero!?"); + hfs_assert(0); + } + + err = GetNode (btreePtr, parentNodeNum, 0, &parentNode); // released as target node in next level up + M_ExitOnError (err); + ////////////////////////// Update Parent Index ////////////////////////////// + + if ( updateParent ) + { + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &parentNode); + + // debug: check if ptr == targetNodeNum + GetRecordByIndex (btreePtr, parentNode.buffer, index, &keyPtr, &recPtr, &recSize); + if ((*(u_int32_t *) recPtr) != targetNodeNum) + { + LFHFS_LOG(LEVEL_ERROR, " InsertLevel: parent ptr doesn't match target node!"); + hfs_assert(0); + } + + // need to delete and re-insert this parent key/ptr + // we delete it here and it gets re-inserted in the + // InsertLevel call below. + DeleteRecord (btreePtr, parentNode.buffer, index); + + primaryKey->keyPtr = (KeyPtr) GetRecordAddress( btreePtr, targetNode->buffer, 0 ); + primaryKey->keyLength = GetKeyLength(btreePtr, primaryKey->keyPtr, false); + primaryKey->recPtr = (u_int8_t *) &targetNodeNum; + primaryKey->recSize = sizeof(targetNodeNum); + primaryKey->replacingKey = kReplaceRecord; + primaryKey->skipRotate = insertParent; // don't rotate left if we have two inserts occuring + } + + ////////////////////////// Add New Parent Index ///////////////////////////// + + if ( insertParent ) + { + InsertKey *insertKeyPtr; + + if ( updateParent ) + { + insertKeyPtr = &insertKey; + secondaryKey = &insertKey; + } + else + { + insertKeyPtr = primaryKey; + } + + insertKeyPtr->keyPtr = (KeyPtr) GetRecordAddress (btreePtr, leftNode.buffer, 0); + insertKeyPtr->keyLength = GetKeyLength(btreePtr, insertKeyPtr->keyPtr, false); + insertKeyPtr->recPtr = (u_int8_t *) &((NodeDescPtr)targetNode->buffer)->bLink; + insertKeyPtr->recSize = sizeof(u_int32_t); + insertKeyPtr->replacingKey = kInsertRecord; + insertKeyPtr->skipRotate = false; // a rotate is OK during second insert + } + + err = InsertLevel (btreePtr, treePathTable, primaryKey, secondaryKey, + &parentNode, index, level, insertNode ); + M_ExitOnError (err); + } + + err = UpdateNode (btreePtr, targetNode, 0, kLockTransaction); // all done with target + M_ExitOnError (err); + + err = UpdateNode (btreePtr, &leftNode, 0, kLockTransaction); // all done with left sibling + M_ExitOnError (err); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, targetNode); + (void) ReleaseNode (btreePtr, &leftNode); + + LFHFS_LOG(LEVEL_ERROR, " InsertLevel: an error occurred!"); + + return err; + +} // End of InsertLevel + + + +////////////////////////////////// InsertNode /////////////////////////////////// + +static OSErr InsertNode (BTreeControlBlockPtr btreePtr, + InsertKey *key, + + BlockDescriptor *rightNode, + u_int32_t node, + u_int16_t index, + + u_int32_t *newNode, + u_int16_t *newIndex, + + BlockDescriptor *leftNode, + Boolean *updateParent, + Boolean *insertParent, + Boolean *rootSplit ) +{ + BlockDescriptor *targetNode = NULL; + u_int32_t leftNodeNum; + u_int16_t recsRotated; + OSErr err; + Boolean recordFit; + + *rootSplit = false; + + if (rightNode->buffer == leftNode->buffer) + { + LFHFS_LOG(LEVEL_ERROR, " InsertNode: rightNode == leftNode"); + hfs_assert(0); + } + + leftNodeNum = ((NodeDescPtr) rightNode->buffer)->bLink; + + + /////////////////////// Try Simple Insert /////////////////////////////// + + /* sanity check our left and right nodes here. */ + if (node == leftNodeNum) { + if (leftNode->buffer == NULL) { + err = fsBTInvalidNodeErr; + M_ExitOnError(err); + } + else{ + targetNode = leftNode; + } + } + else { + // we can assume right node is initialized. + targetNode = rightNode; + } + + + recordFit = InsertKeyRecord (btreePtr, targetNode->buffer, index, key->keyPtr, key->keyLength, key->recPtr, key->recSize); + + if ( recordFit ) + { + *newNode = node; + *newIndex = index; + + if ( (index == 0) && (((NodeDescPtr) targetNode->buffer)->height != btreePtr->treeDepth) ) + *updateParent = true; // the first record changed so we need to update the parent + } + + + //////////////////////// Try Rotate Left //////////////////////////////// + + if ( !recordFit && leftNodeNum > 0 ) + { + if (leftNode->buffer != nil) + { + LFHFS_LOG(LEVEL_ERROR, " InsertNode: leftNode already acquired!"); + hfs_assert(0); + } + + if ( leftNode->buffer == nil ) + { + err = GetNode (btreePtr, leftNodeNum, 0, leftNode); // will be released by caller or a split below + M_ExitOnError (err); + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, leftNode); + } + + if (((NodeDescPtr) leftNode->buffer)->fLink != node) + { + LFHFS_LOG(LEVEL_ERROR, " InsertNode, RotateLeft: invalid sibling link!"); + hfs_assert(0); + } + + if ( !key->skipRotate ) // are rotates allowed? + { + err = RotateLeft (btreePtr, leftNode->buffer, rightNode->buffer, index, key->keyPtr, key->recPtr, + key->recSize, newIndex, newNode, &recordFit, &recsRotated ); + M_ExitOnError (err); + + if ( recordFit ) + { + if ( key->replacingKey || (recsRotated > 1) || (index > 0) ) + *updateParent = true; + } + } + } + + + //////////////////////// Try Split Left ///////////////////////////////// + + if ( !recordFit ) + { + // might not have left node... + err = SplitLeft (btreePtr, leftNode, rightNode, node, index, key->keyPtr, + key->recPtr, key->recSize, newIndex, newNode, &recsRotated); + M_ExitOnError (err); + + // if we split root node - add new root + + if ( ((NodeDescPtr) rightNode->buffer)->height == btreePtr->treeDepth ) + { + err = AddNewRootNode (btreePtr, leftNode->buffer, rightNode->buffer); // Note: does not update TPT + M_ExitOnError (err); + *rootSplit = true; + } + else + { + *insertParent = true; + + if ( key->replacingKey || (recsRotated > 1) || (index > 0) ) + *updateParent = true; + } + } + + return noErr; + +ErrorExit: + (void) ReleaseNode (btreePtr, leftNode); + return err; + +} // End of InsertNode + + +/*------------------------------------------------------------------------------- + Routine: DeleteTree - One_line_description. + + Function: Brief_description_of_the_function_and_any_side_effects + + ToDo: + + Input: btreePtr - description + treePathTable - description + targetNode - description + index - description + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +OSStatus DeleteTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + BlockDescriptor *targetNode, + u_int16_t index, + u_int16_t level ) +{ + OSStatus err; + BlockDescriptor parentNode; + BTNodeDescriptor *targetNodePtr; + u_int32_t targetNodeNum; + Boolean deleteRequired; + Boolean updateRequired; + + // XXXdbg - initialize these to null in case we get an + // error and try to exit before it's initialized + parentNode.buffer = nil; + parentNode.blockHeader = nil; + + deleteRequired = false; + updateRequired = false; + + targetNodeNum = treePathTable[level].node; + targetNodePtr = targetNode->buffer; + if (targetNodePtr == nil) + { + LFHFS_LOG(LEVEL_ERROR, "DeleteTree: targetNode has nil buffer!"); + hfs_assert(0); + } + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, targetNode); + + DeleteRecord (btreePtr, targetNodePtr, index); + + //coalesce remaining records? + + if ( targetNodePtr->numRecords == 0 ) // did we delete the last record? + { + BlockDescriptor siblingNode; + u_int32_t siblingNodeNum; + + deleteRequired = true; + + siblingNode.buffer = nil; + siblingNode.blockHeader = nil; + + ////////////////// Get Siblings & Update Links ////////////////////////// + + siblingNodeNum = targetNodePtr->bLink; // Left Sibling Node + if ( siblingNodeNum != 0 ) + { + err = GetNode (btreePtr, siblingNodeNum, 0, &siblingNode); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &siblingNode); + + ((NodeDescPtr)siblingNode.buffer)->fLink = targetNodePtr->fLink; + err = UpdateNode (btreePtr, &siblingNode, 0, kLockTransaction); + M_ExitOnError (err); + } + else if ( targetNodePtr->kind == kBTLeafNode ) // update firstLeafNode + { + btreePtr->firstLeafNode = targetNodePtr->fLink; + } + + siblingNodeNum = targetNodePtr->fLink; // Right Sibling Node + if ( siblingNodeNum != 0 ) + { + err = GetNode (btreePtr, siblingNodeNum, 0, &siblingNode); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &siblingNode); + + ((NodeDescPtr)siblingNode.buffer)->bLink = targetNodePtr->bLink; + err = UpdateNode (btreePtr, &siblingNode, 0, kLockTransaction); + M_ExitOnError (err); + } + else if ( targetNodePtr->kind == kBTLeafNode ) // update lastLeafNode + { + btreePtr->lastLeafNode = targetNodePtr->bLink; + } + + //////////////////////// Free Empty Node //////////////////////////////// + + GenericLFBuf *psBuf = targetNode->blockHeader; + lf_hfs_generic_buf_set_cache_flag(psBuf, GEN_BUF_LITTLE_ENDIAN); + ClearNode (btreePtr, targetNodePtr); + + err = UpdateNode (btreePtr, targetNode, 0, kLockTransaction); + M_ExitOnError (err); + + err = FreeNode (btreePtr, targetNodeNum); + M_ExitOnError (err); + } + else if ( index == 0 ) // did we delete the first record? + { + updateRequired = true; // yes, so we need to update parent + } + + + if ( level == btreePtr->treeDepth ) // then targetNode->buffer is the root node + { + deleteRequired = false; + updateRequired = false; + + if ( targetNode->buffer == nil ) // then root was freed and the btree is empty + { + btreePtr->rootNode = 0; + btreePtr->treeDepth = 0; + } + else if ( ((NodeDescPtr)targetNode->buffer)->numRecords == 1 ) + { + err = CollapseTree (btreePtr, targetNode); + M_ExitOnError (err); + } + } + + + if ( updateRequired || deleteRequired ) + { + ++level; // next level + + //// Get Parent Node and index + index = treePathTable [level].index; + err = GetNode (btreePtr, treePathTable[level].node, 0, &parentNode); + M_ExitOnError (err); + + if ( updateRequired ) + { + KeyPtr keyPtr; + u_int8_t * recPtr; + u_int16_t recSize; + u_int32_t insertNode; + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &parentNode); + + //debug: check if ptr == targetNodeNum + GetRecordByIndex (btreePtr, parentNode.buffer, index, &keyPtr, &recPtr, &recSize); + if ((*(u_int32_t *) recPtr) != targetNodeNum) + { + LFHFS_LOG(LEVEL_ERROR, " DeleteTree: parent ptr doesn't match targetNodeNum!!"); + hfs_assert(0); + } + + // need to delete and re-insert this parent key/ptr + DeleteRecord (btreePtr, parentNode.buffer, index); + + keyPtr = (KeyPtr) GetRecordAddress( btreePtr, targetNode->buffer, 0 ); + recPtr = (u_int8_t *) &targetNodeNum; + recSize = sizeof(targetNodeNum); + + err = InsertTree (btreePtr, treePathTable, keyPtr, recPtr, recSize, + &parentNode, index, level, kReplaceRecord, &insertNode); + M_ExitOnError (err); + } + else // deleteRequired + { + err = DeleteTree (btreePtr, treePathTable, &parentNode, index, level); + M_ExitOnError (err); + } + } + + + err = UpdateNode (btreePtr, targetNode, 0, kLockTransaction); + M_ExitOnError (err); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, targetNode); + (void) ReleaseNode (btreePtr, &parentNode); + + return err; + +} // end DeleteTree + + + +///////////////////////////////// CollapseTree ////////////////////////////////// + +static OSStatus CollapseTree (BTreeControlBlockPtr btreePtr, + BlockDescriptor *blockPtr ) +{ + OSStatus err; + u_int32_t originalRoot; + u_int32_t nodeNum; + + originalRoot = btreePtr->rootNode; + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, blockPtr); + + while (true) + { + if ( ((NodeDescPtr)blockPtr->buffer)->numRecords > 1) + break; // this will make a fine root node + + if ( ((NodeDescPtr)blockPtr->buffer)->kind == kBTLeafNode) + break; // we've hit bottom + + nodeNum = btreePtr->rootNode; + btreePtr->rootNode = GetChildNodeNum (btreePtr, blockPtr->buffer, 0); + --btreePtr->treeDepth; + + //// Clear and Free Current Old Root Node //// + GenericLFBuf *psBuf = blockPtr->blockHeader; + lf_hfs_generic_buf_set_cache_flag(psBuf, GEN_BUF_LITTLE_ENDIAN); + ClearNode (btreePtr, blockPtr->buffer); + err = UpdateNode (btreePtr, blockPtr, 0, kLockTransaction); + M_ExitOnError (err); + err = FreeNode (btreePtr, nodeNum); + M_ExitOnError (err); + + //// Get New Root Node + err = GetNode (btreePtr, btreePtr->rootNode, 0, blockPtr); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, blockPtr); + } + + if (btreePtr->rootNode != originalRoot) + M_BTreeHeaderDirty (btreePtr); + + err = UpdateNode (btreePtr, blockPtr, 0, kLockTransaction); // always update! + M_ExitOnError (err); + + return noErr; + + + /////////////////////////////////// ErrorExit /////////////////////////////////// + +ErrorExit: + (void) ReleaseNode (btreePtr, blockPtr); + return err; +} + + + +////////////////////////////////// RotateLeft /////////////////////////////////// + +/*------------------------------------------------------------------------------- + + Routine: RotateLeft - One_line_description. + + Function: Brief_description_of_the_function_and_any_side_effects + + Algorithm: if rightIndex > insertIndex, subtract 1 for actual rightIndex + + Input: btreePtr - description + leftNode - description + rightNode - description + rightInsertIndex - description + keyPtr - description + recPtr - description + recSize - description + + Output: insertIndex + insertNodeNum - description + recordFit - description + recsRotated + + Result: noErr - success + != noErr - failure + -------------------------------------------------------------------------------*/ + +static OSStatus RotateLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode, + u_int16_t rightInsertIndex, + KeyPtr keyPtr, + u_int8_t * recPtr, + u_int16_t recSize, + u_int16_t *insertIndex, + u_int32_t *insertNodeNum, + Boolean *recordFit, + u_int16_t *recsRotated ) +{ + OSStatus err; + int32_t insertSize; + int32_t nodeSize; + int32_t leftSize, rightSize; + int32_t moveSize = 0; + u_int16_t keyLength; + u_int16_t lengthFieldSize; + u_int16_t index, moveIndex; + Boolean didItFit; + + ///////////////////// Determine If Record Will Fit ////////////////////////// + + keyLength = GetKeyLength(btreePtr, keyPtr, (rightNode->kind == kBTLeafNode)); + + // the key's length field is 8-bits in HFS and 16-bits in HFS+ + if ( btreePtr->attributes & kBTBigKeysMask ) + lengthFieldSize = sizeof(u_int16_t); + else + lengthFieldSize = sizeof(u_int8_t); + + insertSize = keyLength + lengthFieldSize + recSize + sizeof(u_int16_t); + + if ( M_IsOdd (insertSize) ) + ++insertSize; // add pad byte; + + nodeSize = btreePtr->nodeSize; + + // add size of insert record to right node + rightSize = nodeSize - GetNodeFreeSize (btreePtr, rightNode) + insertSize; + leftSize = nodeSize - GetNodeFreeSize (btreePtr, leftNode); + + moveIndex = 0; + + while ( leftSize < rightSize ) + { + if ( moveIndex < rightInsertIndex ) + { + moveSize = GetRecordSize (btreePtr, rightNode, moveIndex) + 2; + } + else if ( moveIndex == rightInsertIndex ) + { + moveSize = insertSize; + } + else // ( moveIndex > rightInsertIndex ) + { + moveSize = GetRecordSize (btreePtr, rightNode, moveIndex - 1) + 2; + } + + leftSize += moveSize; + rightSize -= moveSize; + ++moveIndex; + } + + if ( leftSize > nodeSize ) // undo last move + { + rightSize += moveSize; + --moveIndex; + } + + if ( rightSize > nodeSize ) // record won't fit - failure, but not error + { + *insertIndex = 0; + *insertNodeNum = 0; + *recordFit = false; + *recsRotated = 0; + + return noErr; + } + + // we've found balance point, moveIndex == number of records moved into leftNode + + + //////////////////////////// Rotate Records ///////////////////////////////// + + *recsRotated = moveIndex; + *recordFit = true; + index = 0; + + while ( index < moveIndex ) + { + if ( index == rightInsertIndex ) // insert new record in left node + { + u_int16_t leftInsertIndex; + + leftInsertIndex = leftNode->numRecords; + + didItFit = InsertKeyRecord (btreePtr, leftNode, leftInsertIndex, + keyPtr, keyLength, recPtr, recSize); + if ( !didItFit ) + { + LFHFS_LOG(LEVEL_ERROR, "RotateLeft: InsertKeyRecord (left) returned false!"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + + *insertIndex = leftInsertIndex; + *insertNodeNum = rightNode->bLink; + } + else + { + didItFit = RotateRecordLeft (btreePtr, leftNode, rightNode); + if ( !didItFit ) + { + LFHFS_LOG(LEVEL_ERROR, "RotateLeft: RotateRecordLeft returned false!"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + } + + ++index; + } + + if ( moveIndex <= rightInsertIndex ) // then insert new record in right node + { + rightInsertIndex -= index; // adjust for records already rotated + + didItFit = InsertKeyRecord (btreePtr, rightNode, rightInsertIndex, + keyPtr, keyLength, recPtr, recSize); + if ( !didItFit ) + { + LFHFS_LOG(LEVEL_ERROR, "RotateLeft: InsertKeyRecord (right) returned false!"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + + *insertIndex = rightInsertIndex; + *insertNodeNum = leftNode->fLink; + } + + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + *insertIndex = 0; + *insertNodeNum = 0; + *recordFit = false; + *recsRotated = 0; + + return err; +} + + + +/////////////////////////////////// SplitLeft /////////////////////////////////// + +static OSStatus SplitLeft (BTreeControlBlockPtr btreePtr, + BlockDescriptor *leftNode, + BlockDescriptor *rightNode, + u_int32_t rightNodeNum, + u_int16_t index, + KeyPtr keyPtr, + u_int8_t * recPtr, + u_int16_t recSize, + u_int16_t *insertIndex, + u_int32_t *insertNodeNum, + u_int16_t *recsRotated ) +{ + OSStatus err; + NodeDescPtr left, right; + u_int32_t newNodeNum; + Boolean recordFit; + + + ///////////////////////////// Compare Nodes ///////////////////////////////// + + right = rightNode->buffer; + left = leftNode->buffer; + + if (right->bLink != 0 && left == 0) + { + LFHFS_LOG(LEVEL_ERROR, " SplitLeft: left sibling missing!?" ); + hfs_assert(0); + } + /* type should be kBTLeafNode or kBTIndexNode */ + + if ( (right->height == 1) && (right->kind != kBTLeafNode) ) + return fsBTInvalidNodeErr; + + if ( left != nil ) + { + if ( left->fLink != rightNodeNum ) + return fsBTInvalidNodeErr; //E_BadSibling ? + + if ( left->height != right->height ) + return fsBTInvalidNodeErr; //E_BadNodeHeight ? + + if ( left->kind != right->kind ) + return fsBTInvalidNodeErr; //E_BadNodeType ? + } + + + ///////////////////////////// Allocate Node ///////////////////////////////// + + err = AllocateNode (btreePtr, &newNodeNum); + M_ExitOnError (err); + + + /////////////// Update Forward Link In Original Left Node /////////////////// + + if ( left != nil ) + { + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, leftNode); + + left->fLink = newNodeNum; + err = UpdateNode (btreePtr, leftNode, 0, kLockTransaction); + M_ExitOnError (err); + } + + + /////////////////////// Initialize New Left Node //////////////////////////// + + err = GetNewNode (btreePtr, newNodeNum, leftNode); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, leftNode); + + left = leftNode->buffer; + left->fLink = rightNodeNum; + + + // Steal Info From Right Node + + left->bLink = right->bLink; + left->kind = right->kind; + left->height = right->height; + + right->bLink = newNodeNum; // update Right bLink + + if ( (left->kind == kBTLeafNode) && (left->bLink == 0) ) + { + // if we're adding a new first leaf node - update BTreeInfoRec + + btreePtr->firstLeafNode = newNodeNum; + M_BTreeHeaderDirty (btreePtr); //AllocateNode should have set the bit already... + } + + ////////////////////////////// Rotate Left ////////////////////////////////// + + err = RotateLeft (btreePtr, left, right, index, keyPtr, recPtr, recSize, + insertIndex, insertNodeNum, &recordFit, recsRotated); + + M_ExitOnError (err); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, leftNode); + (void) ReleaseNode (btreePtr, rightNode); + + //Free new node if allocated? + + *insertIndex = 0; + *insertNodeNum = 0; + *recsRotated = 0; + + return err; +} + + + +/////////////////////////////// RotateRecordLeft //////////////////////////////// + +static Boolean RotateRecordLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ) +{ + u_int16_t size; + u_int8_t * recPtr; + Boolean recordFit; + + size = GetRecordSize (btreePtr, rightNode, 0); + recPtr = GetRecordAddress (btreePtr, rightNode, 0); + + recordFit = InsertRecord (btreePtr, leftNode, leftNode->numRecords, recPtr, size); + + if ( !recordFit ) + return false; + + DeleteRecord (btreePtr, rightNode, 0); + + return true; +} + + +//////////////////////////////// AddNewRootNode ///////////////////////////////// + +static OSStatus AddNewRootNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ) +{ + OSStatus err; + BlockDescriptor rootNode; + u_int32_t rootNum; + KeyPtr keyPtr; + Boolean didItFit; + u_int16_t keyLength; + + rootNode.buffer = nil; + rootNode.blockHeader = nil; + + if (leftNode == nil || rightNode == nil) + { + LFHFS_LOG(LEVEL_ERROR, "AddNewRootNode: %s nil", (leftNode == nil && rightNode == nil)? "left and right node are" : ((leftNode == nil) ? "left node is" : "right node is")); + hfs_assert(0); + } + + /////////////////////// Initialize New Root Node //////////////////////////// + + err = AllocateNode (btreePtr, &rootNum); + M_ExitOnError (err); + + err = GetNewNode (btreePtr, rootNum, &rootNode); + M_ExitOnError (err); + + // XXXdbg + ModifyBlockStart(btreePtr->fileRefNum, &rootNode); + + ((NodeDescPtr)rootNode.buffer)->kind = kBTIndexNode; + ((NodeDescPtr)rootNode.buffer)->height = ++btreePtr->treeDepth; + + + ///////////////////// Insert Left Node Index Record ///////////////////////// + + keyPtr = (KeyPtr) GetRecordAddress (btreePtr, leftNode, 0); + keyLength = GetKeyLength(btreePtr, keyPtr, false); + + didItFit = InsertKeyRecord ( btreePtr, rootNode.buffer, 0, keyPtr, keyLength, + (u_int8_t *) &rightNode->bLink, 4 ); + if (!didItFit) + { + LFHFS_LOG(LEVEL_ERROR, "AddNewRootNode:InsertKeyRecord failed for left index record\n"); + hfs_assert(0); + } + + //////////////////// Insert Right Node Index Record ///////////////////////// + + keyPtr = (KeyPtr) GetRecordAddress (btreePtr, rightNode, 0); + keyLength = GetKeyLength(btreePtr, keyPtr, false); + + didItFit = InsertKeyRecord ( btreePtr, rootNode.buffer, 1, keyPtr, keyLength, + (u_int8_t *) &leftNode->fLink, 4 ); + + if (!didItFit) + { + LFHFS_LOG(LEVEL_ERROR, "AddNewRootNode:InsertKeyRecord failed for right index record\n"); + hfs_assert(0); + } + + /////////////////////////// Release Root Node /////////////////////////////// + + err = UpdateNode (btreePtr, &rootNode, 0, kLockTransaction); + M_ExitOnError (err); + + // update BTreeInfoRec + + btreePtr->rootNode = rootNum; + M_BTreeHeaderDirty(btreePtr); + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + return err; +} + + +static u_int16_t GetKeyLength ( const BTreeControlBlock *btreePtr, const BTreeKey *key, Boolean forLeafNode ) +{ + u_int16_t length; + + if ( forLeafNode || btreePtr->attributes & kBTVariableIndexKeysMask ) + length = KeyLength (btreePtr, key); // just use actual key length + else + length = btreePtr->maxKeyLength; // fixed sized index key (i.e. HFS) //shouldn't we clear the pad bytes? + + return length; +} + + diff --git a/livefiles_hfs_plugin/lf_hfs_btrees_internal.h b/livefiles_hfs_plugin/lf_hfs_btrees_internal.h new file mode 100644 index 0000000..6c66fab --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btrees_internal.h @@ -0,0 +1,336 @@ +// +// lf_hfs_btrees_internal.h +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#ifndef lf_hfs_btrees_internal_h +#define lf_hfs_btrees_internal_h + +#include "lf_hfs_file_mgr_internal.h" + +enum { + fsBTInvalidHeaderErr = btBadHdr, + fsBTBadRotateErr = dsBadRotate, + fsBTInvalidNodeErr = btBadNode, + fsBTRecordTooLargeErr = btNoFit, + fsBTRecordNotFoundErr = btNotFound, + fsBTDuplicateRecordErr = btExists, + fsBTFullErr = btNoSpaceAvail, + + fsBTInvalidFileErr = ERR_BASE + 0x0302, /* no BTreeCB has been allocated for fork*/ + fsBTrFileAlreadyOpenErr = ERR_BASE + 0x0303, + fsBTInvalidIteratorErr = ERR_BASE + 0x0308, + fsBTEmptyErr = ERR_BASE + 0x030A, + fsBTNoMoreMapNodesErr = ERR_BASE + 0x030B, + fsBTBadNodeSize = ERR_BASE + 0x030C, + fsBTBadNodeType = ERR_BASE + 0x030D, + fsBTInvalidKeyLengthErr = ERR_BASE + 0x030E, + fsBTStartOfIterationErr = ERR_BASE + 0x0353, + fsBTEndOfIterationErr = ERR_BASE + 0x0354, + fsBTUnknownVersionErr = ERR_BASE + 0x0355, + fsBTTreeTooDeepErr = ERR_BASE + 0x0357, + fsIteratorExitedScopeErr = ERR_BASE + 0x0A02, /* iterator exited the scope*/ + fsIteratorScopeExceptionErr = ERR_BASE + 0x0A03, /* iterator is undefined due to error or movement of scope locality*/ + fsUnknownIteratorMovementErr = ERR_BASE + 0x0A04, /* iterator movement is not defined*/ + fsInvalidIterationMovmentErr = ERR_BASE + 0x0A05, /* iterator movement is invalid in current context*/ + fsClientIDMismatchErr = ERR_BASE + 0x0A06, /* wrong client process ID*/ + fsEndOfIterationErr = ERR_BASE + 0x0A07, /* there were no objects left to return on iteration*/ + fsBTTimeOutErr = ERR_BASE + 0x0A08 /* BTree scan interrupted -- no time left for physical I/O */ +}; + +typedef struct { + void *buffer; + void *blockHeader; + daddr64_t blockNum; /* logical block number (used by hfs_swap_BTNode) */ + ByteCount blockSize; + Boolean blockReadFromDisk; + Byte isModified; // XXXdbg - for journaling + Byte reserved[2]; + +} BlockDescriptor, *BlockDescPtr; + + +typedef struct { + void * bufferAddress; + ByteCount itemSize; + ItemCount itemCount; + +} FSBufferDescriptor, *FSBufferDescriptorPtr; + + +/* + Fork Level Access Method Block get options + */ +enum { + kGetBlock = 0x00000000, + kGetBlockHint = 0x00000001, // if set, the block is being looked up using hint + kForceReadBlock = 0x00000002, // how does this relate to Read/Verify? Do we need this? + kGetEmptyBlock = 0x00000008 +}; +typedef u_int32_t GetBlockOptions; + +/* + Fork Level Access Method Block release options + */ +enum { + kReleaseBlock = 0x00000000, + kForceWriteBlock = 0x00000001, + kMarkBlockDirty = 0x00000002, + kTrashBlock = 0x00000004, + kLockTransaction = 0x00000100 +}; +typedef u_int32_t ReleaseBlockOptions; + +typedef u_int64_t FSSize; +typedef u_int32_t ForkBlockNumber; + +/*============================================================================ + Fork Level Buffered I/O Access Method + ============================================================================*/ + +typedef OSStatus (* GetBlockProcPtr) (FileReference fileRefNum, + uint64_t blockNum, + GetBlockOptions options, + BlockDescriptor *block ); + + +typedef OSStatus (* ReleaseBlockProcPtr) (FileReference fileRefNum, + BlockDescPtr blockPtr, + ReleaseBlockOptions options ); + +typedef OSStatus (* SetEndOfForkProcPtr) (FileReference fileRefNum, + FSSize minEOF, + FSSize maxEOF ); + +typedef OSStatus (* SetBlockSizeProcPtr) (FileReference fileRefNum, + ByteCount blockSize, + ItemCount minBlockCount ); + +OSStatus SetEndOfForkProc ( FileReference fileRefNum, FSSize minEOF, FSSize maxEOF ); + + +/* + B*Tree Information Version + */ +enum BTreeInformationVersion{ + kBTreeInfoVersion = 0 +}; + +/* + B*Tree Iteration Operation Constants + */ +enum BTreeIterationOperations{ + kBTreeFirstRecord, + kBTreeNextRecord, + kBTreePrevRecord, + kBTreeLastRecord, + kBTreeCurrentRecord +}; +typedef u_int16_t BTreeIterationOperation; + + +/* + Btree types: 0 is HFS CAT/EXT file, 1~127 are AppleShare B*Tree files, 128~254 unused + hfsBtreeType EQU 0 ; control file + validBTType EQU $80 ; user btree type starts from 128 + userBT1Type EQU $FF ; 255 is our Btree type. Used by BTInit and BTPatch + */ +enum BTreeTypes{ + kHFSBTreeType = 0, // control file + kUserBTreeType = 128, // user btree type starts from 128 + kReservedBTreeType = 255 // +}; + +#define kBTreeHeaderUserBytes 128 + +/* B-tree structures */ + +enum { + kMaxKeyLength = 520 +}; + +typedef union { + u_int8_t length8; + u_int16_t length16; + u_int8_t rawData [kMaxKeyLength+2]; + +} BTreeKey, *BTreeKeyPtr; + +/* BTNodeDescriptor -- Every B-tree node starts with these fields. */ +typedef struct { + u_int32_t fLink; /* next node at this level*/ + u_int32_t bLink; /* previous node at this level*/ + int8_t kind; /* kind of node (leaf, index, header, map)*/ + u_int8_t height; /* zero for header, map; child is one more than parent*/ + u_int16_t numRecords; /* number of records in this node*/ + u_int16_t reserved; /* reserved - initialized as zero */ + +} __attribute__((aligned(2), packed)) BTNodeDescriptor; + +/* Constants for BTNodeDescriptor kind */ +enum { + kBTLeafNode = -1, + kBTIndexNode = 0, + kBTHeaderNode = 1, + kBTMapNode = 2 +}; + +/* BTHeaderRec -- The first record of a B-tree header node */ +typedef struct { + u_int16_t treeDepth; /* maximum height (usually leaf nodes) */ + u_int32_t rootNode; /* node number of root node */ + u_int32_t leafRecords; /* number of leaf records in all leaf nodes */ + u_int32_t firstLeafNode; /* node number of first leaf node */ + u_int32_t lastLeafNode; /* node number of last leaf node */ + u_int16_t nodeSize; /* size of a node, in bytes */ + u_int16_t maxKeyLength; /* reserved */ + u_int32_t totalNodes; /* total number of nodes in tree */ + u_int32_t freeNodes; /* number of unused (free) nodes in tree */ + u_int16_t reserved1; /* unused */ + u_int32_t clumpSize; /* reserved */ + u_int8_t btreeType; /* reserved */ + u_int8_t keyCompareType; /* Key string Comparison Type */ + u_int32_t attributes; /* persistent attributes about the tree */ + u_int32_t reserved3[16]; /* reserved */ + +} __attribute__((aligned(2), packed)) BTHeaderRec; + +/* Constants for BTHeaderRec attributes */ +enum { + kBTBadCloseMask = 0x00000001, /* reserved */ + kBTBigKeysMask = 0x00000002, /* key length field is 16 bits */ + kBTVariableIndexKeysMask = 0x00000004 /* keys in index nodes are variable length */ +}; + +/* + BTreeInfoRec Structure - for BTGetInformation + */ +typedef struct { + u_int16_t version; + u_int16_t nodeSize; + u_int16_t maxKeyLength; + u_int16_t treeDepth; + u_int32_t lastfsync; /* Last time that this was fsynced */ + ItemCount numRecords; + ItemCount numNodes; + ItemCount numFreeNodes; + u_int8_t keyCompareType; + u_int8_t reserved[3]; +} BTreeInfoRec, *BTreeInfoRecPtr; + +/* + BTreeHint can never be exported to the outside. Use u_int32_t BTreeHint[4], + u_int8_t BTreeHint[16], etc. + */ +typedef struct { + ItemCount writeCount; + u_int32_t nodeNum; // node the key was last seen in + u_int16_t index; // index then key was last seen at + u_int16_t reserved1; + u_int32_t reserved2; +} BTreeHint, *BTreeHintPtr; + +/* + BTree Iterator + */ +typedef struct { + BTreeHint hint; + u_int16_t version; + u_int16_t reserved; + u_int32_t hitCount; // Total number of leaf records hit + u_int32_t maxLeafRecs; // Max leaf records over iteration + BTreeKey key; +} BTreeIterator, *BTreeIteratorPtr; + + +/*============================================================================ + B*Tree SPI + ============================================================================*/ + +/* + Key Comparison Function ProcPtr Type - for BTOpenPath + */ +//typedef int32_t (* KeyCompareProcPtr)(BTreeKeyPtr a, BTreeKeyPtr b); + + +typedef int32_t (* IterateCallBackProcPtr)(BTreeKeyPtr key, void * record, void * state); + +OSStatus BTOpenPath (FCB *filePtr, KeyCompareProcPtr keyCompareProc); + +OSStatus BTClosePath (FCB *filePtr ); + + +OSStatus BTSearchRecord (FCB *filePtr, + BTreeIterator *searchIterator, + FSBufferDescriptor *btRecord, + u_int16_t *recordLen, + BTreeIterator *resultIterator ); + +OSStatus BTIterateRecord (FCB *filePtr, + BTreeIterationOperation operation, + BTreeIterator *iterator, + FSBufferDescriptor *btRecord, + u_int16_t *recordLen ); + + +OSStatus BTIterateRecords (FCB *filePtr, + BTreeIterationOperation operation, + BTreeIterator *iterator, + IterateCallBackProcPtr callBackProc, + void *callBackState); + +OSStatus BTInsertRecord (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *btrecord, + u_int16_t recordLen ); + +OSStatus BTReplaceRecord (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *btRecord, + u_int16_t recordLen ); + +OSStatus BTUpdateRecord (FCB *filePtr, + BTreeIterator *iterator, + IterateCallBackProcPtr callBackProc, + void *callBackState ); + +OSStatus BTDeleteRecord (FCB *filePtr, + BTreeIterator *iterator ); + +OSStatus BTGetInformation (FCB *filePtr, + u_int16_t vers, + BTreeInfoRec *info ); + +OSStatus BTIsDirty (FCB *filePtr); + +OSStatus BTFlushPath (FCB *filePtr); + +OSStatus BTReloadData (FCB *filePtr); + +OSStatus BTInvalidateHint (BTreeIterator *iterator ); + +OSStatus BTGetLastSync (FCB *filePtr, + u_int32_t *lastfsync ); + +OSStatus BTSetLastSync (FCB *filePtr, + u_int32_t lastfsync ); + +OSStatus BTHasContiguousNodes(FCB *filePtr); + +OSStatus BTGetUserData(FCB *filePtr, void * dataPtr, int dataSize); + +OSStatus BTSetUserData(FCB *filePtr, void * dataPtr, int dataSize); + +/* B-tree node reserve routines. */ +void BTReserveSetup(void); + +int BTReserveSpace(FCB *file, int operations, void * data); + +int BTReleaseReserve(FCB *file, void * data); + +int BTZeroUnusedNodes(FCB *file); + + +#endif /* lf_hfs_btrees_internal_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_btrees_io.c b/livefiles_hfs_plugin/lf_hfs_btrees_io.c new file mode 100644 index 0000000..d4e2081 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btrees_io.c @@ -0,0 +1,916 @@ +// +// lf_hfs_btrees_io.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include +#include "lf_hfs_btrees_io.h" +#include "lf_hfs.h" +#include "lf_hfs_xattr.h" +#include "lf_hfs_cnode.h" +#include "lf_hfs_endian.h" +#include "lf_hfs_btrees_io.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_file_mgr_internal.h" +#include "lf_hfs_btrees_private.h" +#include "lf_hfs_generic_buf.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_readwrite_ops.h" +#include "lf_hfs_file_extent_mapping.h" +#include "lf_hfs_vnops.h" +#include "lf_hfs_journal.h" + +static int ClearBTNodes(struct vnode *vp, int blksize, off_t offset, off_t amount); +static int btree_journal_modify_block_end(struct hfsmount *hfsmp, GenericLFBuf *bp); +void btree_swap_node(GenericLFBuf *bp, __unused void *arg); + +/* + * Return btree node size for given vnode. + * + * Returns: + * For btree vnode, returns btree node size. + * For non-btree vnodes, returns 0. + */ +u_int16_t get_btree_nodesize(struct vnode *vp) +{ + BTreeControlBlockPtr btree; + u_int16_t node_size = 0; + + if (vnode_issystem(vp)) { + btree = (BTreeControlBlockPtr) VTOF(vp)->fcbBTCBPtr; + if (btree) { + node_size = btree->nodeSize; + } + } + + return node_size; +} + +OSStatus SetBTreeBlockSize(FileReference vp, ByteCount blockSize, __unused ItemCount minBlockCount) +{ + BTreeControlBlockPtr bTreePtr; + + hfs_assert(vp != NULL); + hfs_assert(blockSize >= kMinNodeSize); + if (blockSize > MAXBSIZE ) + return (fsBTBadNodeSize); + + bTreePtr = (BTreeControlBlockPtr)VTOF(vp)->fcbBTCBPtr; + bTreePtr->nodeSize = blockSize; + + return (E_NONE); +} + + +OSStatus GetBTreeBlock(FileReference vp, uint64_t blockNum, GetBlockOptions options, BlockDescriptor *block) +{ + OSStatus retval = E_NONE; + GenericLFBufPtr bp = NULL; + u_int8_t allow_empty_node; + + /* If the btree block is being read using hint, it is + * fine for the swap code to find zeroed out nodes. + */ + if (options & kGetBlockHint) { + allow_empty_node = true; + } else { + allow_empty_node = false; + } + + if (options & kGetEmptyBlock) { + daddr64_t blkno; + off_t offset; + + offset = (daddr64_t)blockNum * (daddr64_t)block->blockSize; + bp = lf_hfs_generic_buf_allocate(vp, blockNum, (uint32_t)block->blockSize, GEN_BUF_IS_UPTODATE | GEN_BUF_LITTLE_ENDIAN); + if (bp && !hfs_vnop_blockmap(&(struct vnop_blockmap_args){ + .a_vp = vp, + .a_foffset = offset, + .a_size = block->blockSize, + .a_bpn = &blkno + })) { + } + } else { + bp = lf_hfs_generic_buf_allocate(vp, blockNum, (uint32_t)block->blockSize, 0); + retval = lf_hfs_generic_buf_read( bp ); + } + if (bp == NULL) + retval = -1; //XXX need better error + + if (retval == E_NONE) { + block->blockHeader = bp; + block->buffer = bp->pvData; + block->blockNum = bp->uBlockN; + block->blockReadFromDisk = !(bp->uCacheFlags & GEN_BUF_LITTLE_ENDIAN); + + // XXXdbg + block->isModified = 0; + + /* Check and endian swap B-Tree node (only if it's a valid block) */ + if (!(options & kGetEmptyBlock)) + { + /* This happens when we first open the b-tree, we might not have all the node data on hand */ + if ((((BTNodeDescriptor *)block->buffer)->kind == kBTHeaderNode) && + (((BTHeaderRec *)((char *)block->buffer + 14))->nodeSize != bp->uValidBytes) && + (SWAP_BE16 (((BTHeaderRec *)((char *)block->buffer + 14))->nodeSize) != bp->uValidBytes)) { + + /* + * Don't swap the node descriptor, record offsets, or other records. + * This record will be invalidated and re-read with the correct node + * size once the B-tree control block is set up with the node size + * from the header record. + */ + retval = hfs_swap_BTNode (block, vp, kSwapBTNodeHeaderRecordOnly, allow_empty_node); + } + else + { + /* + * In this case, we have enough data in-hand to do basic validation + * on the B-Tree node. + */ + if (block->blockReadFromDisk) + { + /* + * The node was just read from disk, so always swap/check it. + * This is necessary on big endian since the test below won't trigger. + */ + retval = hfs_swap_BTNode (block, vp, kSwapBTNodeBigToHost, allow_empty_node); + } + else { + /* + * Block wasn't read from disk; it was found in the cache. + */ + if (*((u_int16_t *)((char *)block->buffer + (block->blockSize - sizeof (u_int16_t)))) == 0x0e00) { + /* + * The node was left in the cache in non-native order, so swap it. + * This only happens on little endian, after the node is written + * back to disk. + */ + retval = hfs_swap_BTNode (block, vp, kSwapBTNodeBigToHost, allow_empty_node); + } + else if (*((u_int16_t *)((char *)block->buffer + (block->blockSize - sizeof (u_int16_t)))) == 0x000e) { + /* + * The node was in-cache in native-endianness. We don't need to do + * anything here, because the node is ready to use. Set retval == 0. + */ + retval = 0; + } + /* + * If the node doesn't have hex 14 (0xe) in the last two bytes of the buffer, + * it doesn't necessarily mean that this is a bad node. Zeroed nodes that are + * marked as unused in the b-tree map node would be OK and not have valid content. + */ + } + } + } + } + + if (retval) { + if (bp) { + lf_hfs_generic_buf_release(bp); + } + block->blockHeader = NULL; + block->buffer = NULL; + } + + return (retval); +} + + +void ModifyBlockStart(FileReference vp, BlockDescPtr blockPtr) +{ + struct hfsmount *hfsmp = VTOHFS(vp); + GenericLFBuf *bp = NULL; + + if (hfsmp->jnl == NULL) { + return; + } + + bp = (GenericLFBuf *) blockPtr->blockHeader; + + if (bp == NULL) { + LFHFS_LOG(LEVEL_ERROR, "ModifyBlockStart: ModifyBlockStart: null bp for blockdescptr %p?!?\n", blockPtr); + hfs_assert(0); + return; + } + + journal_modify_block_start(hfsmp->jnl, bp); + blockPtr->isModified = 1; +} + +void +btree_swap_node(GenericLFBuf *bp, __unused void *arg) +{ + lf_hfs_generic_buf_lock(bp); + + if (!(bp->uCacheFlags & GEN_BUF_LITTLE_ENDIAN)) { + goto exit; + } + + // struct hfsmount *hfsmp = (struct hfsmount *)arg; + int retval; + struct vnode *vp = bp->psVnode; + BlockDescriptor block; + + /* Prepare the block pointer */ + block.blockHeader = bp; + block.buffer = bp->pvData; + block.blockNum = bp->uBlockN; + block.blockReadFromDisk = !(bp->uCacheFlags & GEN_BUF_LITTLE_ENDIAN); + block.blockSize = bp->uDataSize; + + /* Swap the data now that this node is ready to go to disk. + * We allow swapping of zeroed out nodes here because we might + * be writing node whose last record just got deleted. + */ + retval = hfs_swap_BTNode (&block, vp, kSwapBTNodeHostToBig, true); + if (retval) + { + LFHFS_LOG(LEVEL_ERROR, "btree_swap_node: btree_swap_node: about to write corrupt node!\n"); + hfs_assert(0); + } +exit: + lf_hfs_generic_buf_unlock(bp); +} + + +static int +btree_journal_modify_block_end(struct hfsmount *hfsmp, GenericLFBuf *bp) +{ + return journal_modify_block_end(hfsmp->jnl, bp, btree_swap_node, hfsmp); +} + +OSStatus ReleaseBTreeBlock(FileReference vp, BlockDescPtr blockPtr, ReleaseBlockOptions options) +{ + + OSStatus retval = E_NONE; + GenericLFBufPtr bp = NULL; + struct hfsmount *hfsmp = VTOHFS(vp); + + bp = (GenericLFBufPtr) blockPtr->blockHeader; + + if (bp == NULL) { + retval = -1; + goto exit; + } + + if (options & kTrashBlock) { + if (hfsmp->jnl && (bp->uCacheFlags & GEN_BUF_WRITE_LOCK)) + { + journal_kill_block(hfsmp->jnl, bp); + } + else + { + lf_hfs_generic_buf_invalidate(bp); + } + + /* Don't let anyone else try to use this bp, it's been consumed */ + blockPtr->blockHeader = NULL; + + } else { + if (options & kForceWriteBlock) { + + if (hfsmp->jnl) + { + if (blockPtr->isModified == 0) { + LFHFS_LOG(LEVEL_ERROR, "releaseblock: modified is 0 but forcewrite set! bp %p\n", bp); + hfs_assert(0); + } + + retval = btree_journal_modify_block_end(hfsmp, bp); + blockPtr->isModified = 0; + } + else + { + btree_swap_node(bp, NULL); + retval = lf_hfs_generic_buf_write(bp); + lf_hfs_generic_buf_release(bp); + } + + /* Don't let anyone else try to use this bp, it's been consumed */ + blockPtr->blockHeader = NULL; + + } else if (options & kMarkBlockDirty) { + struct timeval tv; + microuptime(&tv); + if ( (options & kLockTransaction) + && hfsmp->jnl == NULL + ) + { + } + if (hfsmp->jnl) + { + if (blockPtr->isModified == 0) { + LFHFS_LOG(LEVEL_ERROR, "releaseblock: modified is 0 but markdirty set! bp %p\n", bp); + hfs_assert(0); + } + retval = btree_journal_modify_block_end(hfsmp, bp); + blockPtr->isModified = 0; + } + else + { + btree_swap_node(bp, NULL); + retval = lf_hfs_generic_buf_write(bp); + lf_hfs_generic_buf_release(bp); + + if ( retval != 0) { + blockPtr->blockHeader = NULL; + goto exit; + } + } + + /* Don't let anyone else try to use this bp, it's been consumed */ + blockPtr->blockHeader = NULL; + + } else { + btree_swap_node(bp, NULL); + + // check if we had previously called journal_modify_block_start() + // on this block and if so, abort it (which will call buf_brelse()). + if (hfsmp->jnl && blockPtr->isModified) { + // XXXdbg - I don't want to call modify_block_abort() + // because I think it may be screwing up the + // journal and blowing away a block that has + // valid data in it. + // + // journal_modify_block_abort(hfsmp->jnl, bp); + //panic("hfs: releaseblock called for 0x%x but mod_block_start previously called.\n", bp); + btree_journal_modify_block_end(hfsmp, bp); + blockPtr->isModified = 0; + } + else + { + lf_hfs_generic_buf_release(bp); /* note: B-tree code will clear blockPtr->blockHeader and blockPtr->buffer */ + } + + /* Don't let anyone else try to use this bp, it's been consumed */ + blockPtr->blockHeader = NULL; + } + } + +exit: + return (retval); +} + + +OSStatus ExtendBTreeFile(FileReference vp, FSSize minEOF, FSSize maxEOF) +{ +#pragma unused (maxEOF) + + OSStatus retval = 0, ret = 0; + int64_t actualBytesAdded, origSize; + u_int64_t bytesToAdd; + u_int32_t startAllocation; + u_int32_t fileblocks; + BTreeInfoRec btInfo; + ExtendedVCB *vcb; + FCB *filePtr; + int64_t trim = 0; + int lockflags = 0; + + filePtr = GetFileControlBlock(vp); + + if ( (off_t)minEOF > filePtr->fcbEOF ) + { + bytesToAdd = minEOF - filePtr->fcbEOF; + + if (bytesToAdd < filePtr->ff_clumpsize) + bytesToAdd = filePtr->ff_clumpsize; //XXX why not always be a mutiple of clump size? + } + else + { + return -1; + } + + vcb = VTOVCB(vp); + + /* + * The Extents B-tree can't have overflow extents. ExtendFileC will + * return an error if an attempt is made to extend the Extents B-tree + * when the resident extents are exhausted. + */ + + /* Protect allocation bitmap and extents overflow file. */ + lockflags = SFL_BITMAP; + if (VTOC(vp)->c_fileid != kHFSExtentsFileID) + lockflags |= SFL_EXTENTS; + lockflags = hfs_systemfile_lock(vcb, lockflags, HFS_EXCLUSIVE_LOCK); + + (void) BTGetInformation(filePtr, 0, &btInfo); + +#if 0 // XXXdbg + /* + * The b-tree code expects nodes to be contiguous. So when + * the allocation block size is less than the b-tree node + * size, we need to force disk allocations to be contiguous. + */ + if (vcb->blockSize >= btInfo.nodeSize) { + extendFlags = 0; + } else { + /* Ensure that all b-tree nodes are contiguous on disk */ + extendFlags = kEFContigMask; + } +#endif + + origSize = filePtr->fcbEOF; + fileblocks = filePtr->ff_blocks; + startAllocation = vcb->nextAllocation; + + // loop trying to get a contiguous chunk that's an integer multiple + // of the btree node size. if we can't get a contiguous chunk that + // is at least the node size then we break out of the loop and let + // the error propagate back up. + while((off_t)bytesToAdd >= btInfo.nodeSize) { + do { + retval = ExtendFileC(vcb, filePtr, bytesToAdd, 0, + kEFContigMask | kEFMetadataMask | kEFNoClumpMask, + (int64_t *)&actualBytesAdded); + if (retval == dskFulErr && actualBytesAdded == 0) { + bytesToAdd >>= 1; + if (bytesToAdd < btInfo.nodeSize) { + break; + } else if ((bytesToAdd % btInfo.nodeSize) != 0) { + // make sure it's an integer multiple of the nodeSize + bytesToAdd -= (bytesToAdd % btInfo.nodeSize); + } + } + } while (retval == dskFulErr && actualBytesAdded == 0); + + if (retval == dskFulErr && actualBytesAdded == 0 && bytesToAdd <= btInfo.nodeSize) { + break; + } + + filePtr->fcbEOF = (u_int64_t)filePtr->ff_blocks * (u_int64_t)vcb->blockSize; + bytesToAdd = minEOF - filePtr->fcbEOF; + } + + /* + * If a new extent was added then move the roving allocator + * reference forward by the current b-tree file size so + * there's plenty of room to grow. + */ + if ((retval == 0) && + ((VCBTOHFS(vcb)->hfs_flags & HFS_METADATA_ZONE) == 0) && + (vcb->nextAllocation > startAllocation) && + ((vcb->nextAllocation + fileblocks) < vcb->allocLimit)) { + HFS_UPDATE_NEXT_ALLOCATION(vcb, vcb->nextAllocation + fileblocks); + } + + filePtr->fcbEOF = (u_int64_t)filePtr->ff_blocks * (u_int64_t)vcb->blockSize; + + // XXXdbg ExtendFileC() could have returned an error even though + // it grew the file to be big enough for our needs. If this is + // the case, we don't care about retval so we blow it away. + // + if (filePtr->fcbEOF >= (off_t)minEOF && retval != 0) { + retval = 0; + } + + // XXXdbg if the file grew but isn't large enough or isn't an + // even multiple of the nodeSize then trim things back. if + // the file isn't large enough we trim back to the original + // size. otherwise we trim back to be an even multiple of the + // btree node size. + // + if ((filePtr->fcbEOF < (off_t)minEOF) || ((filePtr->fcbEOF - origSize) % btInfo.nodeSize) != 0) { + + if (filePtr->fcbEOF < (off_t)minEOF) { + retval = dskFulErr; + + if (filePtr->fcbEOF < origSize) { + LFHFS_LOG(LEVEL_ERROR, "ExtendBTreeFile: btree file eof %lld less than orig size %lld!\n", + filePtr->fcbEOF, origSize); + hfs_assert(0); + } + + trim = filePtr->fcbEOF - origSize; + } else { + trim = ((filePtr->fcbEOF - origSize) % btInfo.nodeSize); + } + + ret = TruncateFileC(vcb, filePtr, filePtr->fcbEOF - trim, 0, 0, FTOC(filePtr)->c_fileid, 0); + filePtr->fcbEOF = (u_int64_t)filePtr->ff_blocks * (u_int64_t)vcb->blockSize; + + // XXXdbg - assert if the file didn't get trimmed back properly + if ((filePtr->fcbEOF % btInfo.nodeSize) != 0) { + LFHFS_LOG(LEVEL_ERROR, "ExtendBTreeFile: truncate file didn't! fcbEOF %lld nsize %d fcb %p\n", + filePtr->fcbEOF, btInfo.nodeSize, filePtr); + hfs_assert(0); + } + + if (ret) + { + LFHFS_LOG(LEVEL_ERROR, "ExtendBTreeFile: error truncating btree files (sz 0x%llx, trim %lld, ret %ld)\n", + filePtr->fcbEOF, trim, (long)ret); + goto out; + } + } + + if(VTOC(vp)->c_fileid != kHFSExtentsFileID) { + /* + * Get any extents overflow b-tree changes to disk ASAP! + */ + (void) BTFlushPath(VTOF(vcb->extentsRefNum)); + (void) hfs_fsync(vcb->extentsRefNum, MNT_WAIT, 0); + } + hfs_systemfile_unlock(vcb, lockflags); + lockflags = 0; + + if ((filePtr->fcbEOF % btInfo.nodeSize) != 0) { + LFHFS_LOG(LEVEL_ERROR, "extendbtree: fcb %p has eof 0x%llx not a multiple of 0x%x (trim %llx)\n", + filePtr, filePtr->fcbEOF, btInfo.nodeSize, trim); + hfs_assert(0); + } + + /* + * Update the Alternate MDB or Alternate VolumeHeader + */ + VTOC(vp)->c_flag |= C_MODIFIED; + if ((VTOC(vp)->c_fileid == kHFSExtentsFileID) || + (VTOC(vp)->c_fileid == kHFSCatalogFileID) || + (VTOC(vp)->c_fileid == kHFSAttributesFileID) + ) { + MarkVCBDirty( vcb ); + (void) hfs_flushvolumeheader(VCBTOHFS(vcb), HFS_FVH_WRITE_ALT); + } else { + VTOC(vp)->c_touch_chgtime = TRUE; + VTOC(vp)->c_touch_modtime = TRUE; + + (void) hfs_update(vp, 0); + } + + ret = ClearBTNodes(vp, btInfo.nodeSize, origSize, (filePtr->fcbEOF - origSize)); +out: + if (retval == 0) + retval = ret; + + if (lockflags) + hfs_systemfile_unlock(vcb, lockflags); + + return retval; +} + + +/* + * Clear out (zero) new b-tree nodes on disk. + */ +static int +ClearBTNodes(struct vnode *vp, int blksize, off_t offset, off_t amount) +{ + GenericLFBufPtr bp = NULL; + daddr64_t blk; + daddr64_t blkcnt; + + blk = offset / blksize; + blkcnt = amount / blksize; + + while (blkcnt > 0) { + + bp = lf_hfs_generic_buf_allocate(vp, blk, blksize, GEN_BUF_NON_CACHED); + if (bp == NULL) + continue; + + // XXXdbg -- skipping the journal since it makes a transaction + // become *way* too large + lf_hfs_generic_buf_write(bp); + lf_hfs_generic_buf_release(bp); + + --blkcnt; + ++blk; + } + + return (0); +} + + +extern char hfs_attrname[]; + +/* + * Create an HFS+ Attribute B-tree File. + * + * No global resources should be held. + */ +int +hfs_create_attr_btree(struct hfsmount *hfsmp, u_int32_t nodesize, u_int32_t nodecnt) +{ + struct vnode* vp = NULL; + struct cat_desc cndesc; + struct cat_attr cnattr; + struct cat_fork cfork; + BlockDescriptor blkdesc; + BTNodeDescriptor *ndp; + BTHeaderRec *bthp; + BTreeControlBlockPtr btcb = NULL; + GenericLFBufPtr bp = NULL; + void * buffer; + u_int8_t *bitmap; + u_int16_t *index; + u_int32_t node_num, num_map_nodes; + u_int32_t bytes_per_map_record; + u_int32_t temp; + u_int16_t offset; + int intrans = 0; + int result; + int newvnode_flags = 0; + +again: + /* + * Serialize creation using HFS_CREATING_BTREE flag. + */ + hfs_lock_mount (hfsmp); + if (hfsmp->hfs_flags & HFS_CREATING_BTREE) { + /* Someone else beat us, wait for them to finish. */ + hfs_unlock_mount (hfsmp); + usleep( 100 ); + if (hfsmp->hfs_attribute_vp) { + return (0); + } + goto again; + } + hfsmp->hfs_flags |= HFS_CREATING_BTREE; + hfs_unlock_mount (hfsmp); + + /* Check if were out of usable disk space. */ + if ((hfs_freeblks(hfsmp, 1) == 0)) { + result = ENOSPC; + goto exit; + } + + /* + * Set up Attribute B-tree vnode + * (this must be done before we start a transaction + * or take any system file locks) + */ + bzero(&cndesc, sizeof(cndesc)); + cndesc.cd_parentcnid = kHFSRootParentID; + cndesc.cd_flags |= CD_ISMETA; + cndesc.cd_nameptr = (const u_int8_t *)hfs_attrname; + cndesc.cd_namelen = strlen(hfs_attrname); + cndesc.cd_cnid = kHFSAttributesFileID; + + bzero(&cnattr, sizeof(cnattr)); + cnattr.ca_linkcount = 1; + cnattr.ca_mode = S_IFREG; + cnattr.ca_fileid = cndesc.cd_cnid; + + bzero(&cfork, sizeof(cfork)); + cfork.cf_clump = nodesize * nodecnt; + + result = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, + &cfork, &vp, &newvnode_flags); + if (result) { + goto exit; + } + /* + * Set up Attribute B-tree control block + */ + btcb = hfs_mallocz(sizeof(*btcb)); + + btcb->nodeSize = nodesize; + btcb->maxKeyLength = kHFSPlusAttrKeyMaximumLength; + btcb->btreeType = 0xFF; + btcb->attributes = kBTVariableIndexKeysMask | kBTBigKeysMask; + btcb->version = kBTreeVersion; + btcb->writeCount = 1; + btcb->flags = 0; /* kBTHeaderDirty */ + btcb->fileRefNum = vp; + btcb->getBlockProc = GetBTreeBlock; + btcb->releaseBlockProc = ReleaseBTreeBlock; + btcb->setEndOfForkProc = ExtendBTreeFile; + btcb->keyCompareProc = (KeyCompareProcPtr)hfs_attrkeycompare; + + /* + * NOTE: We must make sure to zero out this pointer if we error out in this function! + * If we don't, then unmount will treat it as a valid pointer which can lead to a + * use-after-free + */ + VTOF(vp)->fcbBTCBPtr = btcb; + + /* + * Allocate some space + */ + if (hfs_start_transaction(hfsmp) != 0) { + result = EINVAL; + goto exit; + } + intrans = 1; + + /* Note ExtendBTreeFile will acquire the necessary system file locks. */ + result = ExtendBTreeFile(vp, nodesize, cfork.cf_clump); + if (result) + goto exit; + + btcb->totalNodes = (u_int32_t)(VTOF(vp)->ff_size) / nodesize; + + /* + * Figure out how many map nodes we'll need. + * + * bytes_per_map_record = the number of bytes in the map record of a + * map node. Since that is the only record in the node, it is the size + * of the node minus the node descriptor at the start, and two record + * offsets at the end of the node. The "- 2" is to round the size down + * to a multiple of 4 bytes (since sizeof(BTNodeDescriptor) is not a + * multiple of 4). + * + * The value "temp" here is the number of *bits* in the map record of + * the header node. + */ + bytes_per_map_record = nodesize - sizeof(BTNodeDescriptor) - 2*sizeof(u_int16_t) - 2; + temp = 8 * (nodesize - sizeof(BTNodeDescriptor) + - sizeof(BTHeaderRec) + - kBTreeHeaderUserBytes + - 4 * sizeof(u_int16_t)); + if (btcb->totalNodes > temp) { + num_map_nodes = howmany(btcb->totalNodes - temp, bytes_per_map_record * 8); + } + else { + num_map_nodes = 0; + } + + btcb->freeNodes = btcb->totalNodes - 1 - num_map_nodes; + + /* + * Initialize the b-tree header on disk + */ + bp = lf_hfs_generic_buf_allocate(vp, 0, btcb->nodeSize, 0); + if (bp == NULL) { + result = EIO; + goto exit; + } + + buffer = bp->pvData; + blkdesc.buffer = buffer; + blkdesc.blockHeader = (void *)bp; + blkdesc.blockReadFromDisk = 0; + blkdesc.isModified = 0; + + ModifyBlockStart(vp, &blkdesc); + + if (bp->uDataSize != nodesize) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_create_attr_btree: bad buffer size (%u)\n", bp->uDataSize); + hfs_assert(0); + } + + bzero(buffer, nodesize); + index = (u_int16_t *)buffer; + + /* FILL IN THE NODE DESCRIPTOR: */ + ndp = (BTNodeDescriptor *)buffer; + if (num_map_nodes != 0) + ndp->fLink = 1; + ndp->kind = kBTHeaderNode; + ndp->numRecords = 3; + offset = sizeof(BTNodeDescriptor); + index[(nodesize / 2) - 1] = offset; + + /* FILL IN THE HEADER RECORD: */ + bthp = (BTHeaderRec *)((u_int8_t *)buffer + offset); + bthp->nodeSize = nodesize; + bthp->totalNodes = btcb->totalNodes; + bthp->freeNodes = btcb->freeNodes; + bthp->clumpSize = cfork.cf_clump; + bthp->btreeType = 0xFF; + bthp->attributes = kBTVariableIndexKeysMask | kBTBigKeysMask; + bthp->maxKeyLength = kHFSPlusAttrKeyMaximumLength; + bthp->keyCompareType = kHFSBinaryCompare; + offset += sizeof(BTHeaderRec); + index[(nodesize / 2) - 2] = offset; + + /* FILL IN THE USER RECORD: */ + offset += kBTreeHeaderUserBytes; + index[(nodesize / 2) - 3] = offset; + + /* Mark the header node and map nodes in use in the map record. + * + * NOTE: Assumes that the header node's map record has at least + * (num_map_nodes + 1) bits. + */ + bitmap = (u_int8_t *) buffer + offset; + temp = num_map_nodes + 1; /* +1 for the header node */ + while (temp >= 8) { + *(bitmap++) = 0xFF; + temp -= 8; + } + *bitmap = ~(0xFF >> temp); + + offset += nodesize - sizeof(BTNodeDescriptor) - sizeof(BTHeaderRec) + - kBTreeHeaderUserBytes - (4 * sizeof(int16_t)); + index[(nodesize / 2) - 4] = offset; + + if (hfsmp->jnl) + { + result = btree_journal_modify_block_end(hfsmp, bp); + } + else + { + result = lf_hfs_generic_buf_write(bp); + lf_hfs_generic_buf_release(bp); + } + if (result) + goto exit; + + /* Create the map nodes: node numbers 1 .. num_map_nodes */ + for (node_num=1; node_num <= num_map_nodes; ++node_num) { + bp = lf_hfs_generic_buf_allocate(vp, node_num, btcb->nodeSize, 0); + if (bp == NULL) { + result = EIO; + goto exit; + } + buffer = (void *)bp->pvData; + blkdesc.buffer = buffer; + blkdesc.blockHeader = (void *)bp; + blkdesc.blockReadFromDisk = 0; + blkdesc.isModified = 0; + + ModifyBlockStart(vp, &blkdesc); + + bzero(buffer, nodesize); + index = (u_int16_t *)buffer; + + /* Fill in the node descriptor */ + ndp = (BTNodeDescriptor *)buffer; + if (node_num != num_map_nodes) + ndp->fLink = node_num + 1; + ndp->kind = kBTMapNode; + ndp->numRecords = 1; + offset = sizeof(BTNodeDescriptor); + index[(nodesize / 2) - 1] = offset; + + + /* Fill in the map record's offset */ + /* Note: We assume that the map record is all zeroes */ + offset = sizeof(BTNodeDescriptor) + bytes_per_map_record; + index[(nodesize / 2) - 2] = offset; + + if (hfsmp->jnl) + { + result = btree_journal_modify_block_end(hfsmp, bp); + } + else + { + result = lf_hfs_generic_buf_write(bp); + lf_hfs_generic_buf_release(bp); + } + if (result) + goto exit; + } + + /* Update vp/cp for attribute btree */ + hfs_lock_mount (hfsmp); + hfsmp->hfs_attribute_cp = VTOC(vp); + hfsmp->hfs_attribute_vp = vp; + hfs_unlock_mount (hfsmp); + + (void) hfs_flushvolumeheader(hfsmp, HFS_FVH_WRITE_ALT); + + if (intrans) { + hfs_end_transaction(hfsmp); + intrans = 0; + } + + /* Initialize the vnode for virtual attribute data file */ + result = init_attrdata_vnode(hfsmp); + if (result) { + LFHFS_LOG(LEVEL_ERROR , "hfs_create_attr_btree: vol=%s init_attrdata_vnode() error=%d\n", hfsmp->vcbVN, result); + } + +exit: + + if (vp && result) { + /* + * If we're about to error out, then make sure to zero out the B-Tree control block pointer + * from the filefork of the EA B-Tree cnode/vnode. Failing to do this will lead to a use + * after free at unmount or BTFlushPath. Since we're about to error out anyway, this memory + * will be freed. + */ + VTOF(vp)->fcbBTCBPtr = NULL; + } + + + if (vp) { + hfs_unlock(VTOC(vp)); + } + if (result) { + hfs_free(btcb); + if (vp) { + hfs_vnop_reclaim(vp); + } + /* XXX need to give back blocks ? */ + } + if (intrans) { + hfs_end_transaction(hfsmp); + } + + /* + * All done, clear HFS_CREATING_BTREE, and wake up any sleepers. + */ + hfs_lock_mount (hfsmp); + hfsmp->hfs_flags &= ~HFS_CREATING_BTREE; + hfs_unlock_mount (hfsmp); + + return (result); +} diff --git a/livefiles_hfs_plugin/lf_hfs_btrees_io.h b/livefiles_hfs_plugin/lf_hfs_btrees_io.h new file mode 100644 index 0000000..176f521 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btrees_io.h @@ -0,0 +1,35 @@ +// +// lf_hfs_btrees_io.h +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#ifndef lf_hfs_btrees_io_h +#define lf_hfs_btrees_io_h + +#include + + +#include "lf_hfs.h" +#include "lf_hfs_btrees_internal.h" + +/* BTree accessor routines */ +OSStatus SetBTreeBlockSize(FileReference vp, ByteCount blockSize, + ItemCount minBlockCount); + +OSStatus GetBTreeBlock(FileReference vp, uint64_t blockNum, + GetBlockOptions options, BlockDescriptor *block); + +OSStatus ReleaseBTreeBlock(FileReference vp, BlockDescPtr blockPtr, + ReleaseBlockOptions options); + +OSStatus ExtendBTreeFile(FileReference vp, FSSize minEOF, FSSize maxEOF); + +void ModifyBlockStart(FileReference vp, BlockDescPtr blockPtr); + +int hfs_create_attr_btree(struct hfsmount *hfsmp, u_int32_t nodesize, u_int32_t nodecnt); + +u_int16_t get_btree_nodesize(struct vnode *vp); + +#endif /* lf_hfs_btrees_io_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_btrees_private.h b/livefiles_hfs_plugin/lf_hfs_btrees_private.h new file mode 100644 index 0000000..9e2c0a3 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_btrees_private.h @@ -0,0 +1,382 @@ +// +// lf_hfs_btrees_private.h +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#ifndef lf_hfs_btrees_private_h +#define lf_hfs_btrees_private_h + +#include "lf_hfs_defs.h" +#include "lf_hfs_file_mgr_internal.h" +#include "lf_hfs_btrees_internal.h" +#include "lf_hfs_logger.h" + +/////////////////////////////////// Constants /////////////////////////////////// + +#define kBTreeVersion 1 +#define kMaxTreeDepth 16 + + +#define kHeaderNodeNum 0 +#define kKeyDescRecord 1 + + +// Header Node Record Offsets +enum { + kHeaderRecOffset = 0x000E, + kKeyDescRecOffset = 0x0078, + kHeaderMapRecOffset = 0x00F8 +}; + +#define kMinNodeSize (512) + +#define kMinRecordSize (6) +// where is minimum record size enforced? + +// miscellaneous BTree constants +enum { + kOffsetSize = 2 +}; + +// Insert Operations +typedef enum { + kInsertRecord = 0, + kReplaceRecord = 1 +} InsertType; + +// illegal string attribute bits set in mask +#define kBadStrAttribMask (0xCF) + + + +//////////////////////////////////// Macros ///////////////////////////////////// + +#define M_NodesInMap(mapSize) ((mapSize) << 3) + +#define M_ClearBitNum(integer,bitNumber) ((integer) &= (~(1<<(bitNumber)))) +#define M_SetBitNum(integer,bitNumber) ((integer) |= (1<<(bitNumber))) +#define M_IsOdd(integer) (((integer) & 1) != 0) +#define M_IsEven(integer) (((integer) & 1) == 0) + +#define M_MapRecordSize(nodeSize) (nodeSize - sizeof (BTNodeDescriptor) - 6) +#define M_HeaderMapRecordSize(nodeSize) (nodeSize - sizeof(BTNodeDescriptor) - sizeof(BTHeaderRec) - 128 - 8) + +#define M_SWAP_BE16_ClearBitNum(integer,bitNumber) ((integer) &= SWAP_BE16(~(1<<(bitNumber)))) +#define M_SWAP_BE16_SetBitNum(integer,bitNumber) ((integer) |= SWAP_BE16(1<<(bitNumber))) + +///////////////////////////////////// Types ///////////////////////////////////// + +typedef struct { // fields specific to BTree CBs + + u_int8_t keyCompareType; /* Key string Comparison Type */ + u_int8_t btreeType; + u_int16_t treeDepth; + FileReference fileRefNum; // refNum of btree file + KeyCompareProcPtr keyCompareProc; + u_int32_t rootNode; + u_int32_t leafRecords; + u_int32_t firstLeafNode; + u_int32_t lastLeafNode; + u_int16_t nodeSize; + u_int16_t maxKeyLength; + u_int32_t totalNodes; + u_int32_t freeNodes; + + u_int16_t reserved3; // 4-byte alignment + + // new fields + int16_t version; + u_int32_t flags; // dynamic flags + u_int32_t attributes; // persistent flags + u_int32_t writeCount; + u_int32_t lastfsync; /* Last time that this was fsynced */ + + GetBlockProcPtr getBlockProc; + ReleaseBlockProcPtr releaseBlockProc; + SetEndOfForkProcPtr setEndOfForkProc; + + // statistical information + u_int32_t numGetNodes; + u_int32_t numGetNewNodes; + u_int32_t numReleaseNodes; + u_int32_t numUpdateNodes; + u_int32_t numMapNodesRead; // map nodes beyond header node + u_int32_t numHintChecks; + u_int32_t numPossibleHints; // Looks like a formated hint + u_int32_t numValidHints; // Hint used to find correct record. + u_int32_t reservedNodes; + BTreeIterator iterator; // useable when holding exclusive b-tree lock + +#if DEBUG + void *madeDirtyBy[2]; +#endif + +} BTreeControlBlock, *BTreeControlBlockPtr; + +u_int32_t CalcKeySize(const BTreeControlBlock *btcb, const BTreeKey *key); +#define CalcKeySize(btcb, key) ( ((btcb)->attributes & kBTBigKeysMask) ? ((key)->length16 + 2) : ((key)->length8 + 1) ) + +u_int32_t KeyLength(const BTreeControlBlock *btcb, const BTreeKey *key); +#define KeyLength(btcb, key) ( ((btcb)->attributes & kBTBigKeysMask) ? (key)->length16 : (key)->length8 ) + + +typedef enum { + kBTHeaderDirty = 0x00000001 +} BTreeFlags; + +static inline void M_BTreeHeaderDirty(BTreeControlBlock *bt) { +#if DEBUG + bt->madeDirtyBy[0] = __builtin_return_address(0); + bt->madeDirtyBy[1] = __builtin_return_address(1); +#endif + bt->flags |= kBTHeaderDirty; +} + +typedef int8_t *NodeBuffer; +typedef BlockDescriptor NodeRec, *NodePtr; //remove this someday... + + +//// Tree Path Table - constructed by SearchTree, used by InsertTree and DeleteTree + +typedef struct { + u_int32_t node; // node number + u_int16_t index; + u_int16_t reserved; // align size to a power of 2 + +} TreePathRecord, *TreePathRecordPtr; + +typedef TreePathRecord TreePathTable [kMaxTreeDepth]; + + +//// InsertKey - used by InsertTree, InsertLevel and InsertNode + +typedef struct { + BTreeKeyPtr keyPtr; + u_int8_t * recPtr; + u_int16_t keyLength; + u_int16_t recSize; + Boolean replacingKey; + Boolean skipRotate; +} InsertKey; + +//// For Notational Convenience + +typedef BTNodeDescriptor* NodeDescPtr; +typedef u_int8_t *RecordPtr; +typedef BTreeKeyPtr KeyPtr; + + +//////////////////////////////////// Globals //////////////////////////////////// + + +//////////////////////////////////// Macros ///////////////////////////////////// +// Exit function on error +#define M_ExitOnError( result ) do { if ( ( result ) != noErr ) goto ErrorExit; } while(0) + +// Test for passed condition and return if true +#define M_ReturnErrorIf( condition, error ) do { if ( condition ) return( error ); } while(0) + +//////////////////////////////// Key Operations ///////////////////////////////// + +int32_t CompareKeys (BTreeControlBlockPtr btreePtr, + KeyPtr searchKey, + KeyPtr trialKey ); + +//////////////////////////////// Map Operations ///////////////////////////////// + +OSStatus AllocateNode (BTreeControlBlockPtr btreePtr, + u_int32_t *nodeNum); + +OSStatus FreeNode (BTreeControlBlockPtr btreePtr, + u_int32_t nodeNum); + +OSStatus ExtendBTree (BTreeControlBlockPtr btreePtr, + u_int32_t nodes ); + +u_int32_t CalcMapBits (BTreeControlBlockPtr btreePtr); + + +void BTUpdateReserve (BTreeControlBlockPtr btreePtr, + int nodes); + +//////////////////////////////// Misc Operations //////////////////////////////// + +u_int16_t CalcKeyRecordSize (u_int16_t keySize, + u_int16_t recSize ); + +OSStatus VerifyHeader (FCB *filePtr, + BTHeaderRec *header ); + +OSStatus UpdateHeader (BTreeControlBlockPtr btreePtr, + Boolean forceWrite ); + +OSStatus FindIteratorPosition (BTreeControlBlockPtr btreePtr, + BTreeIteratorPtr iterator, + BlockDescriptor *left, + BlockDescriptor *middle, + BlockDescriptor *right, + u_int32_t *nodeNum, + u_int16_t *index, + Boolean *foundRecord ); + +OSStatus CheckInsertParams (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + u_int16_t recordLen ); + +OSStatus TrySimpleReplace (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + u_int16_t recordLen, + Boolean *recordInserted ); + +OSStatus IsItAHint (BTreeControlBlockPtr btreePtr, + BTreeIterator *iterator, + Boolean *answer ); + +OSStatus TreeIsDirty (BTreeControlBlockPtr btreePtr); + +//////////////////////////////// Node Operations //////////////////////////////// + +//// Node Operations + +OSStatus GetNode (BTreeControlBlockPtr btreePtr, + u_int32_t nodeNum, + u_int32_t flags, + NodeRec *returnNodePtr ); + +/* Flags for GetNode() */ +#define kGetNodeHint 0x1 /* If set, the node is being looked up using a hint */ + +OSStatus GetLeftSiblingNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + NodeRec *left ); + +#define GetLeftSiblingNode(btree,node,left) GetNode ((btree), ((NodeDescPtr)(node))->bLink, 0, (left)) + +OSStatus GetRightSiblingNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + NodeRec *right ); + +#define GetRightSiblingNode(btree,node,right) GetNode ((btree), ((NodeDescPtr)(node))->fLink, 0, (right)) + + +OSStatus GetNewNode (BTreeControlBlockPtr btreePtr, + u_int32_t nodeNum, + NodeRec *returnNodePtr ); + +OSStatus ReleaseNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ); + +OSStatus TrashNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ); + +OSStatus UpdateNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr, + u_int32_t transactionID, + u_int32_t flags ); + +//// Node Buffer Operations + +void ClearNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + +u_int16_t GetNodeDataSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + +u_int16_t GetNodeFreeSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + + +//// Record Operations + +Boolean InsertRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index, + RecordPtr recPtr, + u_int16_t recSize ); + +Boolean InsertKeyRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + u_int16_t index, + KeyPtr keyPtr, + u_int16_t keyLength, + RecordPtr recPtr, + u_int16_t recSize ); + +void DeleteRecord (BTreeControlBlockPtr btree, + NodeDescPtr node, + u_int16_t index ); + + +Boolean SearchNode (BTreeControlBlockPtr btree, + NodeDescPtr node, + KeyPtr searchKey, + u_int16_t *index ); + +OSStatus GetRecordByIndex (BTreeControlBlockPtr btree, + NodeDescPtr node, + u_int16_t index, + KeyPtr *keyPtr, + u_int8_t * *dataPtr, + u_int16_t *dataSize ); + +u_int8_t * GetRecordAddress (BTreeControlBlockPtr btree, + NodeDescPtr node, + u_int16_t index ); + +#define GetRecordAddress(btreePtr,node,index) ((u_int8_t *)(node) + (*(short *) ((u_int8_t *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize))) + + +u_int16_t GetRecordSize (BTreeControlBlockPtr btree, + NodeDescPtr node, + u_int16_t index ); + +u_int32_t GetChildNodeNum (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + u_int16_t index ); + +void MoveRecordsLeft (u_int8_t * src, + u_int8_t * dst, + u_int16_t bytesToMove ); + +#define MoveRecordsLeft(src,dst,bytes) bcopy((src),(dst),(bytes)) + +void MoveRecordsRight (u_int8_t * src, + u_int8_t * dst, + u_int16_t bytesToMove ); + +#define MoveRecordsRight(src,dst,bytes) bcopy((src),(dst),(bytes)) + + +//////////////////////////////// Tree Operations //////////////////////////////// + +OSStatus SearchTree (BTreeControlBlockPtr btreePtr, + BTreeKeyPtr keyPtr, + TreePathTable treePathTable, + u_int32_t *nodeNum, + BlockDescriptor *nodePtr, + u_int16_t *index ); + +OSStatus InsertTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + KeyPtr keyPtr, + u_int8_t * recPtr, + u_int16_t recSize, + BlockDescriptor *targetNode, + u_int16_t index, + u_int16_t level, + Boolean replacingKey, + u_int32_t *insertNode ); + +OSStatus DeleteTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + BlockDescriptor *targetNode, + u_int16_t index, + u_int16_t level ); + + +#endif /* lf_hfs_btrees_private_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_catalog.c b/livefiles_hfs_plugin/lf_hfs_catalog.c new file mode 100644 index 0000000..2c274ce --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_catalog.c @@ -0,0 +1,3841 @@ +// +// lf_hfs_catalog.c +// hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include "lf_hfs.h" +#include "lf_hfs_catalog.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_unicode_wrappers.h" +#include "lf_hfs_sbunicode.h" +#include "lf_hfs_btrees_internal.h" +#include +#include "lf_hfs_vnops.h" +#include +#include "lf_hfs_dirops_handler.h" +#include "lf_hfs_endian.h" +#include "lf_hfs_btree.h" +#include "lf_hfs_xattr.h" +#include "lf_hfs_chash.h" +#include +#include +#include "lf_hfs_chash.h" +#include "lf_hfs_generic_buf.h" +#include "lf_hfs_journal.h" + +#define HFS_LOOKUP_SYSFILE 0x1 /* If set, allow lookup of system files */ +#define HFS_LOOKUP_HARDLINK 0x2 /* If set, allow lookup of hard link records and not resolve the hard links */ +#define HFS_LOOKUP_CASESENSITIVE 0x4 /* If set, verify results of a file/directory record match input case */ + +#define SMALL_DIRENTRY_SIZE (UVFS_DIRENTRY_RECLEN(1)) + +/* Map file mode type to directory entry types */ +u_char modetodirtype[16] = { + UVFS_FA_TYPE_FILE, 0, 0, 0, + UVFS_FA_TYPE_DIR, 0, 0, 0, + UVFS_FA_TYPE_FILE, 0, UVFS_FA_TYPE_SYMLINK, 0, + 0, 0, 0, 0 +}; +#define MODE_TO_TYPE(mode) (modetodirtype[((mode) & S_IFMT) >> 12]) + +/* + * Initialization of an FSBufferDescriptor structure. + */ +#define BDINIT(bd, addr) { \ + (bd).bufferAddress = (addr); \ + (bd).itemSize = sizeof(*(addr)); \ + (bd).itemCount = 1; \ + } + +/* HFS ID Hashtable Functions */ +#define IDHASH(hfsmp, inum) (&hfsmp->hfs_idhashtbl[(inum) & hfsmp->hfs_idhash]) + +static int isadir(const CatalogRecord *crp); +static int builddesc(const HFSPlusCatalogKey *key, cnid_t cnid, + u_int32_t hint, u_int32_t encoding, int isdir, struct cat_desc *descp); +static int buildkey(struct cat_desc *descp, HFSPlusCatalogKey *key); + +// --------------------------------------- Hard Link Support --------------------------------------------- +static int resolvelinkid(struct hfsmount *hfsmp, u_int32_t linkref, ino_t *ino); +static int cat_makealias(struct hfsmount *hfsmp, u_int32_t inode_num, struct HFSPlusCatalogFile *crp); +/* Hard link information collected during cat_getdirentries. */ +struct linkinfo { + u_int32_t link_ref; + caddr_t dirent_addr; +}; +typedef struct linkinfo linkinfo_t; + +struct btobj { + BTreeIterator iterator; + HFSPlusCatalogKey key; + CatalogRecord data; +}; + +/* Constants for directory hard link alias */ +enum { + /* Size of resource fork data array for directory hard link alias */ + kHFSAliasSize = 0x1d0, + + /* Volume type for ejectable devices like disk image */ + kHFSAliasVolTypeEjectable = 0x5, + + /* Offset for volume create date, in Mac OS local time */ + kHFSAliasVolCreateDateOffset = 0x12a, + + /* Offset for the type of volume */ + kHFSAliasVolTypeOffset = 0x130, + + /* Offset for folder ID of the parent directory of the directory inode */ + kHFSAliasParentIDOffset = 0x132, + + /* Offset for folder ID of the directory inode */ + kHFSAliasTargetIDOffset = 0x176, +}; + +/* Directory hard links are visible as aliases on pre-Leopard systems and + * as normal directories on Leopard or later. All directory hard link aliases + * have the same resource fork content except for the three uniquely + * identifying values that are updated in the resource fork data when the alias + * is created. The following array is the constant resource fork data used + * only for creating directory hard link aliases. + */ +static const char hfs_dirlink_alias_rsrc[] = { + 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x9e, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00, 0x32, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x2b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x9e, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x32, 0x00, 0x00, 0x61, 0x6c, 0x69, 0x73, + 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +// ------------------------------------------------------------------------------------------------------- + +/* State information for the getdirentries_callback function. */ +struct packdirentry_state { + int cbs_flags; /* VNODE_READDIR_* flags */ + u_int32_t cbs_parentID; + u_int32_t cbs_index; + ReadDirBuff_t cbs_psReadDirBuffer; + ExtendedVCB * cbs_hfsmp; + int cbs_result; + int32_t cbs_nlinks; + int32_t cbs_maxlinks; + linkinfo_t * cbs_linkinfo; + struct cat_desc * cbs_desc; + u_int8_t * cbs_namebuf; + /* + * The following fields are only used for NFS readdir, which + * uses the next file id as the seek offset of each entry. + */ + UVFSDirEntry * cbs_direntry; + UVFSDirEntry * cbs_prevdirentry; + UVFSDirEntry * cbs_lastinsertedentry; + u_int32_t cbs_previlinkref; + Boolean cbs_hasprevdirentry; + Boolean cbs_haslastinsertedentry; + Boolean cbs_eof; +}; + +struct position_state { + int error; + u_int32_t count; + u_int32_t index; + u_int32_t parentID; + struct hfsmount *hfsmp; +}; +/* Initialize the HFS ID hash table */ +void +hfs_idhash_init (struct hfsmount *hfsmp) { + /* secured by catalog lock so no lock init needed */ + hfsmp->hfs_idhashtbl = hashinit(HFS_IDHASH_DEFAULT, &hfsmp->hfs_idhash); +} + +/* Free the HFS ID hash table */ +void +hfs_idhash_destroy (struct hfsmount *hfsmp) { + /* during failed mounts & unmounts */ + hashDeinit(hfsmp->hfs_idhashtbl); +} + +/* + * Compare two HFS+ catalog keys + * + * Result: +n search key > trial key + * 0 search key = trial key + * -n search key < trial key + */ +int +CompareExtendedCatalogKeys(HFSPlusCatalogKey *searchKey, HFSPlusCatalogKey *trialKey) +{ + cnid_t searchParentID, trialParentID; + int result; + + searchParentID = searchKey->parentID; + trialParentID = trialKey->parentID; + + if (searchParentID > trialParentID) { + result = 1; + } + else if (searchParentID < trialParentID) { + result = -1; + } else { + /* parent node ID's are equal, compare names */ + if ( searchKey->nodeName.length == 0 || trialKey->nodeName.length == 0 ) + result = searchKey->nodeName.length - trialKey->nodeName.length; + else + result = FastUnicodeCompare(&searchKey->nodeName.unicode[0], + searchKey->nodeName.length, + &trialKey->nodeName.unicode[0], + trialKey->nodeName.length); + } + + return result; +} + +/* + * cat_binarykeycompare - compare two HFS Plus catalog keys. + + * The name portion of the key is compared using a 16-bit binary comparison. + * This is called from the b-tree code. + */ +int +cat_binarykeycompare(HFSPlusCatalogKey *searchKey, HFSPlusCatalogKey *trialKey) +{ + u_int32_t searchParentID, trialParentID; + int result; + + searchParentID = searchKey->parentID; + trialParentID = trialKey->parentID; + result = 0; + + if (searchParentID > trialParentID) { + ++result; + } else if (searchParentID < trialParentID) { + --result; + } else { + u_int16_t * str1 = &searchKey->nodeName.unicode[0]; + u_int16_t * str2 = &trialKey->nodeName.unicode[0]; + int length1 = searchKey->nodeName.length; + int length2 = trialKey->nodeName.length; + + result = UnicodeBinaryCompare (str1, length1, str2, length2); + } + + return result; +} + +/* + * cat_releasedesc + */ +void +cat_releasedesc(struct cat_desc *descp) +{ + if (descp == NULL) + return; + + if ((descp->cd_flags & CD_HASBUF) && (descp->cd_nameptr != NULL)) { + hfs_free( (void*)descp->cd_nameptr ); + } + descp->cd_nameptr = NULL; + descp->cd_namelen = 0; + descp->cd_flags &= ~CD_HASBUF; +} + +/* + * Extract the CNID from a catalog node record. + */ +static cnid_t +getcnid(const CatalogRecord *crp) +{ + cnid_t cnid = 0; + + switch (crp->recordType) { + case kHFSPlusFolderRecord: + cnid = crp->hfsPlusFolder.folderID; + break; + case kHFSPlusFileRecord: + cnid = crp->hfsPlusFile.fileID; + break; + default: + LFHFS_LOG(LEVEL_ERROR, "getcnid: unknown recordType=%d\n", crp->recordType); + break; + } + + return (cnid); +} + +/* + * Extract the text encoding from a catalog node record. + */ +static u_int32_t +getencoding(const CatalogRecord *crp) +{ + u_int32_t encoding; + + if (crp->recordType == kHFSPlusFolderRecord) + encoding = crp->hfsPlusFolder.textEncoding; + else if (crp->recordType == kHFSPlusFileRecord) + encoding = crp->hfsPlusFile.textEncoding; + else + encoding = 0; + + return (encoding); +} + +/* + * getbsdattr - get attributes in bsd format + * + */ +static void +getbsdattr(struct hfsmount *hfsmp, const struct HFSPlusCatalogFile *crp, struct cat_attr * attrp) +{ + int isDirectory = (crp->recordType == kHFSPlusFolderRecord); + const struct HFSPlusBSDInfo *bsd = &crp->bsdInfo; + + attrp->ca_recflags = crp->flags; + attrp->ca_atime = to_bsd_time(crp->accessDate); + attrp->ca_atimeondisk = attrp->ca_atime; + attrp->ca_mtime = to_bsd_time(crp->contentModDate); + attrp->ca_ctime = to_bsd_time(crp->attributeModDate); + attrp->ca_itime = to_bsd_time(crp->createDate); + attrp->ca_btime = to_bsd_time(crp->backupDate); + + if ((bsd->fileMode & S_IFMT) == 0) { + attrp->ca_flags = 0; + attrp->ca_uid = hfsmp->hfs_uid; + attrp->ca_gid = hfsmp->hfs_gid; + if (isDirectory) { + attrp->ca_mode = S_IFDIR | (hfsmp->hfs_dir_mask & (S_IRWXU|S_IRWXG|S_IRWXO)); + } else { + attrp->ca_mode = S_IFREG | (hfsmp->hfs_file_mask & (S_IRWXU|S_IRWXG|S_IRWXO)); + } + attrp->ca_linkcount = 1; + attrp->ca_rdev = 0; + } else { + attrp->ca_linkcount = 1; /* may be overridden below */ + attrp->ca_rdev = 0; + attrp->ca_uid = bsd->ownerID; + attrp->ca_gid = bsd->groupID; + attrp->ca_flags = bsd->ownerFlags | (bsd->adminFlags << 16); + attrp->ca_mode = (mode_t)bsd->fileMode; + switch (attrp->ca_mode & S_IFMT) { + case S_IFCHR: /* fall through */ + case S_IFBLK: + attrp->ca_rdev = bsd->special.rawDevice; + break; + case S_IFIFO: + case S_IFSOCK: + case S_IFDIR: + case S_IFREG: + /* Pick up the hard link count */ + if (bsd->special.linkCount > 0) + attrp->ca_linkcount = bsd->special.linkCount; + break; + } + + /* + * Override the permissions as determined by the mount auguments + * in ALMOST the same way unset permissions are treated but keep + * track of whether or not the file or folder is hfs locked + * by leaving the h_pflags field unchanged from what was unpacked + * out of the catalog. + */ + /* + * This code was used to do UID translation with MNT_IGNORE_OWNERS + * (aka MNT_UNKNOWNPERMISSIONS) at the HFS layer. It's largely done + * at the VFS layer, so there is no need to do it here now; this also + * allows VFS to let root see the real UIDs. + * + * if (((unsigned int)vfs_flags(HFSTOVFS(hfsmp))) & MNT_UNKNOWNPERMISSIONS) { + * attrp->ca_uid = hfsmp->hfs_uid; + * attrp->ca_gid = hfsmp->hfs_gid; + * } + */ + } + + if (isDirectory) { + if (!S_ISDIR(attrp->ca_mode)) { + attrp->ca_mode &= ~S_IFMT; + attrp->ca_mode |= S_IFDIR; + } + attrp->ca_entries = ((const HFSPlusCatalogFolder *)crp)->valence; + attrp->ca_dircount = ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) && (attrp->ca_recflags & kHFSHasFolderCountMask)) ? + ((const HFSPlusCatalogFolder *)crp)->folderCount : 0; + + /* Keep UF_HIDDEN bit in sync with Finder Info's invisible bit */ + if (((const HFSPlusCatalogFolder *)crp)->userInfo.frFlags & OSSwapHostToBigConstInt16(kFinderInvisibleMask)) + attrp->ca_flags |= UF_HIDDEN; + } else { + /* Keep IMMUTABLE bits in sync with HFS locked flag */ + if (crp->flags & kHFSFileLockedMask) { + /* The file's supposed to be locked: + Make sure at least one of the IMMUTABLE bits is set: */ + if ((attrp->ca_flags & (SF_IMMUTABLE | UF_IMMUTABLE)) == 0) + attrp->ca_flags |= UF_IMMUTABLE; + } else { + /* The file's supposed to be unlocked: */ + attrp->ca_flags &= ~(SF_IMMUTABLE | UF_IMMUTABLE); + } + /* Keep UF_HIDDEN bit in sync with Finder Info's invisible bit */ + if (crp->userInfo.fdFlags & OSSwapHostToBigConstInt16(kFinderInvisibleMask)) + attrp->ca_flags |= UF_HIDDEN; + /* get total blocks (both forks) */ + attrp->ca_blocks = crp->dataFork.totalBlocks + crp->resourceFork.totalBlocks; + + /* On HFS+ the ThreadExists flag must always be set. */ + attrp->ca_recflags |= kHFSThreadExistsMask; + + /* Pick up the hardlink first link, if any. */ + attrp->ca_firstlink = (attrp->ca_recflags & kHFSHasLinkChainMask) ? crp->hl_firstLinkID : 0; + } + + attrp->ca_fileid = crp->fileID; + + bcopy(&crp->userInfo, attrp->ca_finderinfo, 32); +} + +/* + * builddesc - build a cnode descriptor from an HFS+ key + */ +static int +builddesc(const HFSPlusCatalogKey *key, cnid_t cnid, u_int32_t hint, u_int32_t encoding, + int isdir, struct cat_desc *descp) +{ + int result = 0; + unsigned char * nameptr; + size_t bufsize; + size_t utf8len; + + /* guess a size... */ + bufsize = (3 * key->nodeName.length) + 1; + nameptr = hfs_malloc(bufsize); + if (nameptr == NULL) + return ENOMEM; + + memset(nameptr,0,bufsize); + result = utf8_encodestr(key->nodeName.unicode, + key->nodeName.length * sizeof(UniChar), + nameptr, (size_t *)&utf8len, + bufsize, ':', UTF_ADD_NULL_TERM); + + if (result == ENAMETOOLONG) { + hfs_free(nameptr); + bufsize = 1 + utf8_encodelen(key->nodeName.unicode, + key->nodeName.length * sizeof(UniChar), + ':', UTF_ADD_NULL_TERM); + nameptr = hfs_malloc(bufsize); + + result = utf8_encodestr(key->nodeName.unicode, + key->nodeName.length * sizeof(UniChar), + nameptr, (size_t *)&utf8len, + bufsize, ':', UTF_ADD_NULL_TERM); + } + descp->cd_parentcnid = key->parentID; + descp->cd_nameptr = nameptr; + descp->cd_namelen = utf8len; + descp->cd_cnid = cnid; + descp->cd_hint = hint; + descp->cd_flags = CD_DECOMPOSED | CD_HASBUF; + if (isdir) + descp->cd_flags |= CD_ISDIR; + descp->cd_encoding = encoding; + return result; +} + +/* + * cat_lookupbykey - lookup a catalog node using a cnode key + */ +static int +cat_lookupbykey(struct hfsmount *hfsmp, CatalogKey *keyp, int flags, u_int32_t hint, int wantrsrc, + struct cat_desc *descp, struct cat_attr *attrp, struct cat_fork *forkp, cnid_t *desc_cnid) +{ + BTreeIterator * iterator = NULL; + FSBufferDescriptor btdata = {0}; + CatalogRecord * recp = NULL; + u_int16_t datasize = 0; + int result = 0; + u_int32_t ilink = 0; + cnid_t cnid = 0; + u_int32_t encoding = 0; + cnid_t parentid = 0; + + recp = hfs_malloc(sizeof(CatalogRecord)); + BDINIT(btdata, recp); + iterator = hfs_mallocz(sizeof(*iterator)); + iterator->hint.nodeNum = hint; + bcopy(keyp, &iterator->key, sizeof(CatalogKey)); + + FCB *filePtr = VTOF(HFSTOVCB(hfsmp)->catalogRefNum); + result = BTSearchRecord(filePtr, iterator, + &btdata, &datasize, iterator); + if (result) + goto exit; + + /* Save the cnid, parentid, and encoding now in case there's a hard link or inode */ + cnid = getcnid(recp); + if (cnid == 0) { + /* CNID of 0 is invalid. Mark as corrupt */ + hfs_mark_inconsistent (hfsmp, HFS_INCONSISTENCY_DETECTED); + result = EINVAL; + goto exit; + } + + parentid = keyp->hfsPlus.parentID; + + encoding = getencoding(recp); + hint = iterator->hint.nodeNum; + + /* Hide the journal files (if any) */ + if ( IsEntryAJnlFile(hfsmp, cnid) && !(flags & HFS_LOOKUP_SYSFILE)) + { + result = HFS_ERESERVEDNAME; + goto exit; + } + + /* + * When a hardlink link is encountered, auto resolve it. + * + * The catalog record will change, and possibly its type. + */ + if ( (attrp || forkp) + && (recp->recordType == kHFSPlusFileRecord) + && ((to_bsd_time(recp->hfsPlusFile.createDate) == (time_t)hfsmp->hfs_itime) || + (to_bsd_time(recp->hfsPlusFile.createDate) == (time_t)hfsmp->hfs_metadata_createdate))) { + + int isdirlink = 0; + int isfilelink = 0; + + if ((SWAP_BE32(recp->hfsPlusFile.userInfo.fdType) == kHardLinkFileType) && + (SWAP_BE32(recp->hfsPlusFile.userInfo.fdCreator) == kHFSPlusCreator)) { + isfilelink = 1; + } else if ((recp->hfsPlusFile.flags & kHFSHasLinkChainMask) && + (SWAP_BE32(recp->hfsPlusFile.userInfo.fdType) == kHFSAliasType) && + (SWAP_BE32(recp->hfsPlusFile.userInfo.fdCreator) == kHFSAliasCreator)) { + isdirlink = 1; + } + if ((isfilelink || isdirlink) && !(flags & HFS_LOOKUP_HARDLINK)) { + ilink = recp->hfsPlusFile.hl_linkReference; + (void) cat_resolvelink(hfsmp, ilink, isdirlink, (struct HFSPlusCatalogFile *)recp); + } + } + + if (attrp != NULL) { + getbsdattr(hfsmp, (struct HFSPlusCatalogFile *)recp, attrp); + if (ilink) { + /* Update the inode number for this hard link */ + attrp->ca_linkref = ilink; + } + + /* + * Set kHFSHasLinkChainBit for hard links, and reset it for all + * other items. Also set linkCount to 1 for regular files. + * + * Due to some bug (rdar://8505977), some regular files can have + * kHFSHasLinkChainBit set and linkCount more than 1 even if they + * are not really hard links. The runtime code should not consider + * these files has hard links. Therefore we reset the kHFSHasLinkChainBit + * and linkCount for regular file before we vend it out. This might + * also result in repairing the bad files on disk, if the corresponding + * file is modified and updated on disk. + */ + if (ilink) + { + /* This is a hard link and the link count bit was not set */ + if (!(attrp->ca_recflags & kHFSHasLinkChainMask)) + { + LFHFS_LOG(LEVEL_DEBUG, "cat_lookupbykey: set hardlink bit on vol=%s cnid=%u inoid=%u\n", hfsmp->vcbVN, cnid, ilink); + attrp->ca_recflags |= kHFSHasLinkChainMask; + } + } + else + { + /* Make sure that this non-hard link (regular) record is not + * an inode record that was looked up and we do not end up + * reseting the hard link bit on it. + */ + if ((parentid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) && + (parentid != hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid)) + { + /* This is not a hard link or inode and the link count bit was set */ + if (attrp->ca_recflags & kHFSHasLinkChainMask) + { + LFHFS_LOG(LEVEL_DEBUG, "cat_lookupbykey: clear hardlink bit on vol=%s cnid=%u\n", hfsmp->vcbVN, cnid); + attrp->ca_recflags &= ~kHFSHasLinkChainMask; + } + /* This is a regular file and the link count was more than 1 */ + if (S_ISREG(attrp->ca_mode) && (attrp->ca_linkcount > 1)) + { + LFHFS_LOG(LEVEL_DEBUG, "cat_lookupbykey: set linkcount=1 on vol=%s cnid=%u old=%u\n", hfsmp->vcbVN, cnid, attrp->ca_linkcount); + attrp->ca_linkcount = 1; + } + } + } + } + if (forkp != NULL) { + if (isadir(recp)) { + bzero(forkp, sizeof(*forkp)); + } + else if (wantrsrc) { + /* Convert the resource fork. */ + forkp->cf_size = recp->hfsPlusFile.resourceFork.logicalSize; + forkp->cf_new_size = 0; + forkp->cf_blocks = recp->hfsPlusFile.resourceFork.totalBlocks; + forkp->cf_bytesread = 0; + + forkp->cf_vblocks = 0; + bcopy(&recp->hfsPlusFile.resourceFork.extents[0], + &forkp->cf_extents[0], sizeof(HFSPlusExtentRecord)); + } else { + int i; + u_int32_t validblks; + + /* Convert the data fork. */ + forkp->cf_size = recp->hfsPlusFile.dataFork.logicalSize; + forkp->cf_new_size = 0; + forkp->cf_blocks = recp->hfsPlusFile.dataFork.totalBlocks; + forkp->cf_bytesread = 0; + forkp->cf_vblocks = 0; + bcopy(&recp->hfsPlusFile.dataFork.extents[0], + &forkp->cf_extents[0], sizeof(HFSPlusExtentRecord)); + + /* Validate the fork's resident extents. */ + validblks = 0; + for (i = 0; i < kHFSPlusExtentDensity; ++i) { + if (forkp->cf_extents[i].startBlock + forkp->cf_extents[i].blockCount >= hfsmp->totalBlocks) { + /* Suppress any bad extents so a remove can succeed. */ + forkp->cf_extents[i].startBlock = 0; + forkp->cf_extents[i].blockCount = 0; + /* Disable writes */ + if (attrp != NULL) { + attrp->ca_mode &= S_IFMT | S_IRUSR | S_IRGRP | S_IROTH; + } + } else { + validblks += forkp->cf_extents[i].blockCount; + } + } + /* Adjust for any missing blocks. */ + if ((validblks < forkp->cf_blocks) && (forkp->cf_extents[7].blockCount == 0)) { + off_t psize; + + /* + * This is technically a volume corruption. + * If the total number of blocks calculated by iterating + summing + * the extents in the resident extent records, is less than that + * which is reported in the catalog entry, we should force a fsck. + * Only modifying ca_blocks here is not guaranteed to make it out + * to disk; it is a runtime-only field. + * + * Note that we could have gotten into this state if we had invalid ranges + * that existed in borrowed blocks that somehow made it out to disk. + * The cnode's on disk block count should never be greater + * than that which is in its extent records. + */ + + (void) hfs_mark_inconsistent (hfsmp, HFS_INCONSISTENCY_DETECTED); + + forkp->cf_blocks = validblks; + if (attrp != NULL) { + attrp->ca_blocks = validblks + recp->hfsPlusFile.resourceFork.totalBlocks; + } + psize = (off_t)validblks * (off_t)hfsmp->blockSize; + if (psize < forkp->cf_size) { + forkp->cf_size = psize; + } + + } + } + } + if (descp != NULL) { + HFSPlusCatalogKey * pluskey = NULL; + pluskey = (HFSPlusCatalogKey *)&iterator->key; + builddesc(pluskey, cnid, hint, encoding, isadir(recp), descp); + + } + + if (desc_cnid != NULL) { + *desc_cnid = cnid; + } +exit: + hfs_free(iterator); + hfs_free(recp); + + return MacToVFSError(result); +} + +/* + * Determine if a catalog node record is a directory. + */ +static int +isadir(const CatalogRecord *crp) +{ + if (crp->recordType == kHFSPlusFolderRecord) + { + return 1; + } + + return 0; +} + +static int +buildthread(void *keyp, void *recp, int directory) +{ + int size = 0; + + HFSPlusCatalogKey *key = (HFSPlusCatalogKey *)keyp; + HFSPlusCatalogThread *rec = (HFSPlusCatalogThread *)recp; + + size = sizeof(HFSPlusCatalogThread); + if (directory) + rec->recordType = kHFSPlusFolderThreadRecord; + else + rec->recordType = kHFSPlusFileThreadRecord; + rec->reserved = 0; + rec->parentID = key->parentID; + bcopy(&key->nodeName, &rec->nodeName, + sizeof(UniChar) * (key->nodeName.length + 1)); + + /* HFS Plus has variable sized thread records */ + size -= (sizeof(rec->nodeName.unicode) - + (rec->nodeName.length * sizeof(UniChar))); + + return (size); +} + +/* + * Build a catalog node thread key. + */ +static void +buildthreadkey(HFSCatalogNodeID parentID, CatalogKey *key) +{ + key->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength; + key->hfsPlus.parentID = parentID; + key->hfsPlus.nodeName.length = 0; +} + +/* + * cat_findname - obtain a descriptor from cnid + * + * Only a thread lookup is performed. + * + * Note: The caller is responsible for releasing the output + * catalog descriptor (when supplied outdescp is non-null). + + */ +int +cat_findname(struct hfsmount *hfsmp, cnid_t cnid, struct cat_desc *outdescp) +{ + BTreeIterator *iterator = NULL; + CatalogRecord * recp = NULL; + FSBufferDescriptor btdata; + CatalogKey * keyp; + + int isdir = 0; + int result; + + iterator = hfs_mallocz(sizeof(BTreeIterator)); + if (iterator == NULL) + { + result = ENOMEM; + goto exit; + } + + buildthreadkey(cnid, (CatalogKey *)&iterator->key); + iterator->hint.nodeNum = 0; + + recp = hfs_malloc(sizeof(CatalogRecord)); + if (recp == NULL) + { + result = ENOMEM; + goto exit; + } + memset(recp,0,sizeof(CatalogRecord)); + BDINIT(btdata, recp); + + result = BTSearchRecord(VTOF(hfsmp->hfs_catalog_vp), iterator, &btdata, NULL, NULL); + if (result) + goto exit; + + /* Turn thread record into a cnode key (in place). */ + switch (recp->recordType) + { + case kHFSPlusFolderThreadRecord: + isdir = 1; + /* fall through */ + case kHFSPlusFileThreadRecord: + keyp = (CatalogKey *)&recp->hfsPlusThread.reserved; + keyp->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength + + (keyp->hfsPlus.nodeName.length * 2); + break; + default: + result = ENOENT; + goto exit; + } + + + builddesc((HFSPlusCatalogKey *)keyp, cnid, 0, 0, isdir, outdescp); + +exit: + if (recp) + hfs_free(recp); + if (iterator) + hfs_free(iterator); + + return result; +} + +bool IsEntryAJnlFile(struct hfsmount *hfsmp, cnid_t cnid) +{ + return (((hfsmp->jnl || ((HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))) && + ((cnid == hfsmp->hfs_jnlfileid) || (cnid == hfsmp->hfs_jnlinfoblkid))); +} + +static bool IsEntryADirectoryLink(struct hfsmount *hfsmp, const CatalogRecord *crp,time_t itime) +{ + return ((SWAP_BE32(crp->hfsPlusFile.userInfo.fdType) == kHFSAliasType) && + (SWAP_BE32(crp->hfsPlusFile.userInfo.fdCreator) == kHFSAliasCreator) && + (crp->hfsPlusFile.flags & kHFSHasLinkChainMask) && + (crp->hfsPlusFile.bsdInfo.special.iNodeNum >= kHFSFirstUserCatalogNodeID) && + ((itime == (time_t)hfsmp->hfs_itime) || (itime == (time_t)hfsmp->hfs_metadata_createdate))); +} + +static bool IsEntryAHardLink(struct hfsmount *hfsmp, const CatalogRecord *crp,time_t itime) +{ + return((SWAP_BE32(crp->hfsPlusFile.userInfo.fdType) == kHardLinkFileType) && (SWAP_BE32(crp->hfsPlusFile.userInfo.fdCreator) == kHFSPlusCreator) && + ((itime == (time_t)hfsmp->hfs_itime) || (itime == (time_t)hfsmp->hfs_metadata_createdate))); +} + +/* + * getdirentries callback for HFS Plus directories. + */ +static int +getdirentries_callback(const CatalogKey *ckp, const CatalogRecord *crp, struct packdirentry_state *state) +{ + + UVFSDirEntry* entry = NULL; + const CatalogName *cnp; + OSErr result; + + u_int32_t ilinkref = 0; + u_int32_t curlinkref = 0; + cnid_t cnid; + u_int8_t type = 0; + time_t itime; + + caddr_t uiobase = NULL; + size_t namelen = 0; + size_t maxnamelen; + size_t uiosize = 0; + caddr_t uioaddr; + + Boolean bIsLastRecordInDir = false; + Boolean bToHide = false; + Boolean bIsLink = false; + Boolean bIsMangled = false; + + struct hfsmount *hfsmp = state->cbs_hfsmp; + cnid_t curID = ckp->hfsPlus.parentID; + + /* We're done when parent directory changes */ + if (state->cbs_parentID != curID) + { + /* + * If the parent ID is different from curID this means we've hit + * the EOF for the directory. To help future callers, we mark + * the cbs_eof boolean. However, we should only mark the EOF + * boolean if we're about to return from this function. + * + * This is because this callback function does its own uiomove + * to get the data to userspace. If we set the boolean before determining + * whether or not the current entry has enough room to write its + * data to userland, we could fool the callers of this catalog function + * into thinking they've hit EOF earlier than they really would have. + * In that case, we'd know that we have more entries to process and + * send to userland, but we didn't have enough room. + * + * To be safe, we mark cbs_eof here ONLY for the cases where we know we're + * about to return and won't write any new data back + * to userland. In the stop_after_pack case, we'll set this boolean + * regardless, so it's slightly safer to let that logic mark the boolean, + * especially since it's closer to the return of this function. + */ + + + /* The last record has not been returned yet, so we + * want to stop after packing the last item + */ + if (state->cbs_hasprevdirentry) + { + bIsLastRecordInDir = true; + } + else + { + state->cbs_eof = true; + state->cbs_result = ENOENT; + return (0); /* stop */ + } + + } + + entry = state->cbs_direntry; + u_int8_t* nameptr = (u_int8_t *)&entry->de_name; + if (state->cbs_flags & VNODE_READDIR_NAMEMAX) + { + /* + * The NFS server sometimes needs to make filenames fit in + * NAME_MAX bytes (since its client may not be able to + * handle a longer name). In that case, NFS will ask us + * to mangle the name to keep it short enough. + */ + maxnamelen = NAME_MAX + 1; + } + else + { + maxnamelen = UVFS_DIRENTRY_RECLEN(MAX_UTF8_NAME_LENGTH); + } + + if (bIsLastRecordInDir) + { + /* The last item returns a non-zero invalid cookie */ + cnid = INT_MAX; + } + else + { + if (crp == NULL) + return (0); + + switch(crp->recordType) + { + case kHFSPlusFolderRecord: + type = UVFS_FA_TYPE_DIR; + cnid = crp->hfsPlusFolder.folderID; + /* Hide our private system directories. */ + if (curID == kHFSRootFolderID) + { + if (cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid || cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) + { + bToHide = true; + } + } + break; + case kHFSPlusFileRecord: + itime = to_bsd_time(crp->hfsPlusFile.createDate); + type = MODE_TO_TYPE(crp->hfsPlusFile.bsdInfo.fileMode); + cnid = crp->hfsPlusFile.fileID; + /* + * When a hardlink link is encountered save its link ref. + */ + if (IsEntryAHardLink(hfsmp, crp, itime)) + { + /* If link ref is inode's file id then use it directly. */ + if (crp->hfsPlusFile.flags & kHFSHasLinkChainMask) + { + cnid = crp->hfsPlusFile.bsdInfo.special.iNodeNum; + } + else + { + ilinkref = crp->hfsPlusFile.bsdInfo.special.iNodeNum; + } + bIsLink =1; + } + else if (IsEntryADirectoryLink(hfsmp, crp,itime)) + { + /* A directory's link resolves to a directory. */ + type = UVFS_FA_TYPE_DIR; + /* A directory's link ref is always inode's file id. */ + cnid = crp->hfsPlusFile.bsdInfo.special.iNodeNum; + bIsLink = true; + } + + /* Hide the journal files */ + if ((curID == kHFSRootFolderID) && IsEntryAJnlFile(hfsmp, cnid)) + { + bToHide = 1; + } + break; + + default: + return (0); /* stop */ + }; + + cnp = (const CatalogName*) &ckp->hfsPlus.nodeName; + + namelen = cnp->ustr.length; + /* + * For MacRoman encoded names (textEncoding == 0), assume that it's ascii + * and convert it directly in an attempt to avoid the more + * expensive utf8_encodestr conversion. + */ + if ((namelen < maxnamelen) && (crp->hfsPlusFile.textEncoding == 0)) { + int i; + u_int16_t ch; + const u_int16_t *chp; + + chp = &cnp->ustr.unicode[0]; + for (i = 0; i < (int)namelen; ++i) { + ch = *chp++; + if (ch > 0x007f || ch == 0x0000) { + /* Perform expensive utf8_encodestr conversion */ + goto encodestr; + } + nameptr[i] = (ch == '/') ? ':' : (u_int8_t)ch; + } + nameptr[namelen] = '\0'; + result = 0; + } + else + { +encodestr: + result = utf8_encodestr(cnp->ustr.unicode, namelen * sizeof(UniChar), nameptr, &namelen, maxnamelen, ':', UTF_ADD_NULL_TERM); + } + + /* Check result returned from encoding the filename to utf8 */ + if (result == ENAMETOOLONG) + { + /* + * If we were looking at a catalog record for a hardlink (not the inode), + * then we want to use its link ID as opposed to the inode ID for + * a mangled name. For all other cases, they are the same. Note that + * due to the way directory hardlinks are implemented, the actual link + * is going to be counted as a file record, so we can catch both + * with is_link. + */ + cnid_t linkid = cnid; + if (bIsLink) + { + linkid = crp->hfsPlusFile.fileID; + } + + result = ConvertUnicodeToUTF8Mangled(cnp->ustr.length * sizeof(UniChar), cnp->ustr.unicode, maxnamelen, (ByteCount*)&namelen, nameptr, linkid); + if (result) return (0); /* stop */ + bIsMangled = 1; + } + } + + /* + * The index is 1 relative and includes "." and ".." + * + * Also stuff the cnid in the upper 32 bits of the cookie. + * The cookie is stored to the previous entry, which will + * be packed and copied this time + */ + state->cbs_prevdirentry->de_nextcookie = (state->cbs_index + 3) | ((u_int64_t)cnid << 32); + uiosize = state->cbs_prevdirentry->de_reclen; + + //Check if this will be the last entry to be inserted in the buffer + //If this is the last entry need to change the d_reclen to 0. + //If this is the last file in the dir, need to change the d_seekoff to UVFS_DIRCOOKIE_EOF + if ((UVFS_DIRENTRY_RECLEN(namelen) + uiosize) > state->cbs_psReadDirBuffer->uBufferResid) + { + state->cbs_prevdirentry->de_reclen = 0; + } + + if (bIsLastRecordInDir) + { + state->cbs_prevdirentry->de_reclen = 0; + state->cbs_prevdirentry->de_nextcookie = UVFS_DIRCOOKIE_EOF; + } + + uioaddr = (caddr_t) state->cbs_prevdirentry; + + /* Save current base address for post processing of hard-links. */ + if (ilinkref || state->cbs_previlinkref) + { + uiobase = uioaddr; + } + + /* If this entry won't fit then we're done */ + if ((uiosize > (user_size_t)state->cbs_psReadDirBuffer->uBufferResid) || (ilinkref != 0 && state->cbs_nlinks == state->cbs_maxlinks)) + { + return (0); /* stop */ + } + + if (state->cbs_hasprevdirentry) + { + // Skip entries marked as "bToHide" on the previous iteration! + if (state->cbs_prevdirentry->de_fileid != 0) + { + memcpy(state->cbs_psReadDirBuffer->pvBuffer + READDIR_BUF_OFFSET(state->cbs_psReadDirBuffer), uioaddr, uiosize); + state->cbs_lastinsertedentry = state->cbs_psReadDirBuffer->pvBuffer + READDIR_BUF_OFFSET(state->cbs_psReadDirBuffer); + state->cbs_haslastinsertedentry = true; + state->cbs_psReadDirBuffer->uBufferResid -= uiosize; + } + else if (state->cbs_haslastinsertedentry && bIsLastRecordInDir) + { + state->cbs_lastinsertedentry->de_reclen = 0; + state->cbs_lastinsertedentry->de_nextcookie = UVFS_DIRCOOKIE_EOF; + } + + ++state->cbs_index; + + /* Remember previous entry */ + state->cbs_desc->cd_cnid = cnid; + if (type == UVFS_FA_TYPE_DIR) + { + state->cbs_desc->cd_flags |= CD_ISDIR; + } + else + { + state->cbs_desc->cd_flags &= ~CD_ISDIR; + } + + if (state->cbs_desc->cd_nameptr != NULL) + { + state->cbs_desc->cd_namelen = 0; + } + + if (!bIsMangled) + { + state->cbs_desc->cd_namelen = namelen; + bcopy(nameptr, state->cbs_namebuf, namelen + 1); + } + else + { + /* Store unmangled name for the directory hint else it will + * restart readdir at the last location again + */ + u_int8_t *new_nameptr; + size_t bufsize; + size_t tmp_namelen = 0; + + cnp = (const CatalogName *)&ckp->hfsPlus.nodeName; + bufsize = 1 + utf8_encodelen(cnp->ustr.unicode, cnp->ustr.length * sizeof(UniChar), ':', 0); + new_nameptr = hfs_mallocz(bufsize); + result = utf8_encodestr(cnp->ustr.unicode, cnp->ustr.length * sizeof(UniChar), new_nameptr, &tmp_namelen, bufsize, ':', UTF_ADD_NULL_TERM); + if (result) + { + hfs_free(new_nameptr); + return (0); /* stop */ + } + + + state->cbs_desc->cd_namelen = tmp_namelen; + bcopy(new_nameptr, state->cbs_namebuf, tmp_namelen + 1); + + hfs_free(new_nameptr); + } + + if (state->cbs_hasprevdirentry) + { + curlinkref = ilinkref; /* save current */ + ilinkref = state->cbs_previlinkref; /* use previous */ + } + /* + * Record any hard links for post processing. + */ + if ((ilinkref != 0) && (state->cbs_result == 0) && (state->cbs_nlinks < state->cbs_maxlinks)) + { + state->cbs_linkinfo[state->cbs_nlinks].dirent_addr = uiobase; + state->cbs_linkinfo[state->cbs_nlinks].link_ref = ilinkref; + state->cbs_nlinks++; + } + + if (state->cbs_hasprevdirentry) + { + ilinkref = curlinkref; /* restore current */ + } + } + + /* Fill the direntry to be used the next time */ + if (bIsLastRecordInDir) + { + state->cbs_eof = true; + return (0); /* stop */ + } + + entry->de_filetype = type; + entry->de_namelen = namelen; + entry->de_reclen = UVFS_DIRENTRY_RECLEN(namelen); + entry->de_fileid = bToHide ? 0 : cnid; + + /* swap the current and previous entry */ + UVFSDirEntry* tmp = state->cbs_direntry; + state->cbs_direntry = state->cbs_prevdirentry; + state->cbs_prevdirentry = tmp; + state->cbs_hasprevdirentry = true; + state->cbs_previlinkref = ilinkref; + + /* Continue iteration if there's room */ + return (state->cbs_result == 0 && state->cbs_psReadDirBuffer->uBufferResid >= SMALL_DIRENTRY_SIZE); +} + +/* + * Callback to establish directory position. + * Called with position_state for each item in a directory. + */ +static int +cat_findposition(const CatalogKey *ckp, const CatalogRecord *crp, struct position_state *state) +{ + cnid_t curID = 0; + curID = ckp->hfsPlus.parentID; + + /* Make sure parent directory didn't change */ + if (state->parentID != curID) { + /* + * The parent ID is different from curID this means we've hit + * the EOF for the directory. + */ + state->error = ENOENT; + return (0); /* stop */ + } + + /* Count this entry */ + switch(crp->recordType) + { + case kHFSPlusFolderRecord: + case kHFSPlusFileRecord: + ++state->count; + break; + default: + LFHFS_LOG(LEVEL_ERROR, "cat_findposition: invalid record type %d in dir %d\n", crp->recordType, curID); + state->error = EINVAL; + return (0); /* stop */ + }; + + return (state->count < state->index); +} + +/* + * Pack a uio buffer with directory entries from the catalog + */ +int +cat_getdirentries(struct hfsmount *hfsmp, u_int32_t entrycnt, directoryhint_t *dirhint, ReadDirBuff_s* psReadDirBuffer, int flags, int *items, bool *eofflag, UVFSDirEntry* psDotDotEntry) +{ + FCB* fcb; + BTreeIterator * iterator = NULL; + CatalogKey * key; + struct packdirentry_state state; + int result = 0; + int index; + int have_key; + int extended; + + extended = flags & VNODE_READDIR_EXTENDED; + + fcb = hfsmp->hfs_catalog_cp->c_datafork; + + #define MAX_LINKINFO_ENTRIES 275 + /* + * Get a buffer for link info array, btree iterator and a direntry. + * + * We impose an cap of 275 link entries when trying to compute + * the total number of hardlink entries that we'll allow in the + * linkinfo array, as this has been shown to noticeably impact performance. + * + * Note that in the case where there are very few hardlinks, + * this does not restrict or prevent us from vending out as many entries + * as we can to the uio_resid, because the getdirentries callback + * uiomoves the directory entries to the uio itself and does not use + * this MALLOC'd array. It also limits itself to maxlinks of hardlinks. + */ + + // This value cannot underflow: both entrycnt and the rhs are unsigned 32-bit + // ints, so the worst-case MIN of them is 0. + int maxlinks = min (entrycnt, (u_int32_t)(psReadDirBuffer->uBufferResid / SMALL_DIRENTRY_SIZE)); + // Prevent overflow. + maxlinks = MIN (maxlinks, MAX_LINKINFO_ENTRIES); + int bufsize = MAXPATHLEN + (maxlinks * sizeof(linkinfo_t)) + sizeof(*iterator); + + if (extended) + { + bufsize += 2 * (sizeof(UVFSDirEntry) + sizeof(char)*MAX_UTF8_NAME_LENGTH); + } + void* buffer = hfs_mallocz(bufsize); + + state.cbs_flags = flags; + state.cbs_hasprevdirentry = false; + state.cbs_haslastinsertedentry = (psDotDotEntry != NULL); + state.cbs_lastinsertedentry = psDotDotEntry; + state.cbs_previlinkref = 0; + state.cbs_nlinks = 0; + state.cbs_maxlinks = maxlinks; + state.cbs_linkinfo = (linkinfo_t *)((char *)buffer + MAXPATHLEN); + /* + * We need to set cbs_eof to false regardless of whether or not the + * control flow is actually in the extended case, since we use this + * field to track whether or not we've returned EOF from the iterator function. + */ + state.cbs_eof = false; + + iterator = (BTreeIterator *) ((char *)state.cbs_linkinfo + (maxlinks * sizeof(linkinfo_t))); + key = (CatalogKey *)&iterator->key; + have_key = 0; + index = dirhint->dh_index + 1; + if (extended) + { + state.cbs_direntry = (UVFSDirEntry *)((char *)iterator + sizeof(BTreeIterator)); + state.cbs_prevdirentry = (UVFSDirEntry *) ((uint8_t*) state.cbs_direntry + sizeof(UVFSDirEntry) + sizeof(char)*MAX_UTF8_NAME_LENGTH); + } + /* + * Attempt to build a key from cached filename + */ + if (dirhint->dh_desc.cd_namelen != 0) + { + if (buildkey(&dirhint->dh_desc, (HFSPlusCatalogKey *)key) == 0) + { + iterator->hint.nodeNum = dirhint->dh_desc.cd_hint; + have_key = 1; + } + } + + if (index == 0 && dirhint->dh_threadhint != 0) + { + /* + * Position the iterator at the directory's thread record. + * (i.e. just before the first entry) + */ + buildthreadkey(dirhint->dh_desc.cd_parentcnid, key); + iterator->hint.nodeNum = dirhint->dh_threadhint; + iterator->hint.index = 0; + have_key = 1; + } + + /* + * If the last entry wasn't cached then position the btree iterator + */ + if (!have_key) + { + /* + * Position the iterator at the directory's thread record. + * (i.e. just before the first entry) + */ + buildthreadkey(dirhint->dh_desc.cd_parentcnid, key); + result = BTSearchRecord(fcb, iterator, NULL, NULL, iterator); + if (result) + { + result = MacToVFSError(result); + goto cleanup; + } + if (index == 0) + { + dirhint->dh_threadhint = iterator->hint.nodeNum; + } + /* + * Iterate until we reach the entry just + * before the one we want to start with. + */ + if (index > 0) + { + struct position_state ps; + + ps.error = 0; + ps.count = 0; + ps.index = index; + ps.parentID = dirhint->dh_desc.cd_parentcnid; + ps.hfsmp = hfsmp; + + result = BTIterateRecords(fcb, kBTreeNextRecord, iterator, (IterateCallBackProcPtr)cat_findposition, &ps); + if (ps.error) + result = ps.error; + else + result = MacToVFSError(result); + if (result) { + result = MacToVFSError(result); + if (result == ENOENT) { + /* + * ENOENT means we've hit the EOF. + * suppress the error, and set the eof flag. + */ + result = 0; + dirhint->dh_desc.cd_flags |= CD_EOF; + *eofflag = true; + } + goto cleanup; + } + } + } + + state.cbs_index = index; + state.cbs_hfsmp = hfsmp; + state.cbs_psReadDirBuffer = psReadDirBuffer; + state.cbs_desc = &dirhint->dh_desc; + state.cbs_namebuf = (u_int8_t *)buffer; + state.cbs_result = 0; + state.cbs_parentID = dirhint->dh_desc.cd_parentcnid; + + /* Use a temporary buffer to hold intermediate descriptor names. */ + if (dirhint->dh_desc.cd_namelen > 0 && dirhint->dh_desc.cd_nameptr != NULL) + { + bcopy(dirhint->dh_desc.cd_nameptr, buffer, dirhint->dh_desc.cd_namelen+1); + if (dirhint->dh_desc.cd_flags & CD_HASBUF) + { + dirhint->dh_desc.cd_flags &= ~CD_HASBUF; + hfs_free((void*) dirhint->dh_desc.cd_nameptr); + } + } + dirhint->dh_desc.cd_nameptr = (u_int8_t *)buffer; + + enum BTreeIterationOperations op; + if (extended && index != 0 && have_key) + op = kBTreeCurrentRecord; + else + op = kBTreeNextRecord; + + /* + * Process as many entries as possible starting at iterator->key. + */ + result = BTIterateRecords(fcb, op, iterator, (IterateCallBackProcPtr)getdirentries_callback, &state); + + /* For extended calls, every call to getdirentries_callback() + * transfers the previous directory entry found to the user + * buffer. Therefore when BTIterateRecords reaches the end of + * Catalog BTree, call getdirentries_callback() again with + * dummy values to copy the last directory entry stored in + * packdirentry_state + */ + if (extended && (result == fsBTRecordNotFoundErr)) + { + CatalogKey ckp; + bzero(&ckp, sizeof(ckp)); + result = getdirentries_callback(&ckp, NULL, &state); + } + + /* Note that state.cbs_index is still valid on errors */ + *items = state.cbs_index - index; + index = state.cbs_index; + + /* + * Also note that cbs_eof is set in all cases if we ever hit EOF + * during the enumeration by the catalog callback. Mark the directory's hint + * descriptor as having hit EOF. + */ + if (state.cbs_eof) + { + dirhint->dh_desc.cd_flags |= CD_EOF; + *eofflag = true; + } + + //If we went out without any entries. + //Need to check if the last updated entry is dotx2 and update accordingly. + if (*items == 0 && psDotDotEntry!= NULL) + { + if (state.cbs_eof) + { + //This is an empty dir + psDotDotEntry->de_nextcookie = UVFS_DIRCOOKIE_EOF; + psDotDotEntry->de_nextrec = 0; + } + else + { + //Buffer is too small to add more entries after ".." entry + psDotDotEntry->de_nextrec = 0; + } + } + + /* Finish updating the catalog iterator. */ + dirhint->dh_desc.cd_hint = iterator->hint.nodeNum; + dirhint->dh_desc.cd_flags |= CD_DECOMPOSED; + dirhint->dh_index = index - 1; + + /* Fix up the name. */ + if (dirhint->dh_desc.cd_namelen > 0) + { + dirhint->dh_desc.cd_nameptr = lf_hfs_utils_allocate_and_copy_string( (char *)buffer, dirhint->dh_desc.cd_namelen ); + dirhint->dh_desc.cd_flags |= CD_HASBUF; + } + else + { + dirhint->dh_desc.cd_nameptr = NULL; + dirhint->dh_desc.cd_namelen = 0; + } + + /* + * Post process any hard links to get the real file id. + */ + if (state.cbs_nlinks > 0) + { + ino_t fileid = 0; + caddr_t address; + int i; + + for (i = 0; i < state.cbs_nlinks; ++i) + { + if (resolvelinkid(hfsmp, state.cbs_linkinfo[i].link_ref, &fileid) != 0) + continue; + /* This assumes that d_ino is always first field. */ + address = state.cbs_linkinfo[i].dirent_addr; + if (address == (user_addr_t)0) + continue; + + if (extended) + { + ino64_t fileid_64 = (ino64_t)fileid; + memcpy(&fileid_64, (void*) address, sizeof(fileid_64)); + } + else + { + memcpy(&fileid, (void*) address, sizeof(fileid)); + } + + } + } + + if (state.cbs_result) + result = state.cbs_result; + else + result = MacToVFSError(result); + + if (result == ENOENT) + { + result = 0; + } + +cleanup: + hfs_free(buffer); + + return (result); +} + +/* + * cat_idlookup - lookup a catalog node using a cnode id + * + * Note: The caller is responsible for releasing the output + * catalog descriptor (when supplied outdescp is non-null). + */ +int +cat_idlookup(struct hfsmount *hfsmp, cnid_t cnid, int allow_system_files, int wantrsrc, + struct cat_desc *outdescp, struct cat_attr *attrp, struct cat_fork *forkp) +{ + BTreeIterator * iterator = NULL; + FSBufferDescriptor btdata = {0}; + u_int16_t datasize = 0; + CatalogKey * keyp = NULL; + CatalogRecord * recp = NULL; + int result = 0; + + iterator = hfs_mallocz(sizeof(*iterator)); + if (iterator == NULL) + return MacToVFSError(ENOMEM); + + buildthreadkey(cnid, (CatalogKey *)&iterator->key); + + recp = hfs_malloc(sizeof(CatalogRecord)); + BDINIT(btdata, recp); + + result = BTSearchRecord(VTOF(HFSTOVCB(hfsmp)->catalogRefNum), iterator, + &btdata, &datasize, iterator); + if (result) + goto exit; + + /* Turn thread record into a cnode key (in place) */ + switch (recp->recordType) { + + case kHFSPlusFileThreadRecord: + case kHFSPlusFolderThreadRecord: + keyp = (CatalogKey *)&recp->hfsPlusThread.reserved; + + /* check for NULL name */ + if (keyp->hfsPlus.nodeName.length == 0) { + result = ENOENT; + goto exit; + } + + keyp->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength + + (keyp->hfsPlus.nodeName.length * 2); + break; + + default: + result = ENOENT; + goto exit; + } + + result = cat_lookupbykey(hfsmp, keyp, + ((allow_system_files != 0) ? HFS_LOOKUP_SYSFILE : 0), + 0, wantrsrc, outdescp, attrp, forkp, NULL); + /* No corresponding file/folder record found for a thread record, + * mark the volume inconsistent. + */ + if (result == 0 && outdescp) { + cnid_t dcnid = outdescp->cd_cnid; + /* + * Just for sanity's case, let's make sure that + * the key in the thread matches the key in the record. + */ + if (cnid != dcnid) + { + LFHFS_LOG(LEVEL_ERROR, "cat_idlookup: Requested cnid (%d / %08x) != dcnid (%d / %08x)\n", cnid, cnid, dcnid, dcnid); + result = ENOENT; + } + } +exit: + hfs_free(recp); + hfs_free(iterator); + + return MacToVFSError(result); +} + +/* + * buildkey - build a Catalog b-tree key from a cnode descriptor + */ +static int +buildkey(struct cat_desc *descp, HFSPlusCatalogKey *key) +{ + int utf8_flags = UTF_ESCAPE_ILLEGAL; + int result = 0; + size_t unicodeBytes = 0; + + if (descp->cd_namelen == 0 || descp->cd_nameptr[0] == '\0') + return (EINVAL); /* invalid name */ + + key->parentID = descp->cd_parentcnid; + key->nodeName.length = 0; + /* + * Convert filename from UTF-8 into Unicode + */ + + if ((descp->cd_flags & CD_DECOMPOSED) == 0) + { + utf8_flags |= UTF_DECOMPOSED; + } + result = utf8_decodestr(descp->cd_nameptr, descp->cd_namelen, key->nodeName.unicode, &unicodeBytes, sizeof(key->nodeName.unicode), ':', utf8_flags); + key->nodeName.length = unicodeBytes / sizeof(UniChar); + key->keyLength = kHFSPlusCatalogKeyMinimumLength + unicodeBytes; + if (result) + { + if (result != ENAMETOOLONG) + result = EINVAL; /* name has invalid characters */ + return (result); + } + + return (0); +} + +/* + * These Catalog functions allow access to the HFS Catalog (database). + * The catalog b-tree lock must be acquired before calling any of these routines. + */ + +/* + * cat_lookup - lookup a catalog node using a cnode descriptor + * + * Note: The caller is responsible for releasing the output + * catalog descriptor (when supplied outdescp is non-null). + */ +int +cat_lookup(struct hfsmount *hfsmp, struct cat_desc *descp, int wantrsrc, + struct cat_desc *outdescp, struct cat_attr *attrp, + struct cat_fork *forkp, cnid_t *desc_cnid) +{ + CatalogKey * keyp = NULL; + int result; + int flags = 0; + + keyp = hfs_malloc(sizeof(CatalogKey)); + if ( keyp == NULL ) + { + result = ENOMEM; + goto exit; + } + + result = buildkey(descp, (HFSPlusCatalogKey *)keyp); + if (result) + goto exit; + + result = cat_lookupbykey(hfsmp, keyp, flags, descp->cd_hint, wantrsrc, outdescp, attrp, forkp, desc_cnid); + + if (result == ENOENT) { + struct cat_desc temp_desc; + if (outdescp == NULL) { + bzero(&temp_desc, sizeof(temp_desc)); + outdescp = &temp_desc; + } + result = cat_lookupmangled(hfsmp, descp, wantrsrc, outdescp, attrp, forkp); + if (desc_cnid) { + *desc_cnid = outdescp->cd_cnid; + } + if (outdescp == &temp_desc) { + /* Release the local copy of desc */ + cat_releasedesc(outdescp); + } + } + +exit: + hfs_free(keyp); + + return (result); +} + +/* + * cat_lookupmangled - lookup a catalog node using a mangled name + */ +int +cat_lookupmangled(struct hfsmount *hfsmp, struct cat_desc *descp, int wantrsrc, + struct cat_desc *outdescp, struct cat_attr *attrp, struct cat_fork *forkp) +{ + cnid_t fileID; + u_int32_t prefixlen; + int result; + u_int8_t utf8[NAME_MAX + 1]; + ByteCount utf8len; + u_int16_t unicode[kHFSPlusMaxFileNameChars + 1]; + size_t unicodelen; + + if (wantrsrc) + return (ENOENT); + + fileID = GetEmbeddedFileID(descp->cd_nameptr, descp->cd_namelen, &prefixlen); + if (fileID < (cnid_t)kHFSFirstUserCatalogNodeID) + return (ENOENT); + + if (fileID == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid || + fileID == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid || + fileID == hfsmp->hfs_jnlfileid || + fileID == hfsmp->hfs_jnlinfoblkid) + { + return (ENOENT); + } + + result = cat_idlookup(hfsmp, fileID, 0, 0, outdescp, attrp, forkp); + if (result) + return (ENOENT); + /* It must be in the correct directory */ + if (descp->cd_parentcnid != outdescp->cd_parentcnid) + goto falsematch; + + /* + * Compare the mangled version of file name looked up from the + * disk with the mangled name provided by the user. Note that + * this comparison is case-sensitive, which should be fine + * since we're trying to prevent user space from constructing + * a mangled name that differs from the one they'd get from the + * file system. + */ + result = utf8_decodestr(outdescp->cd_nameptr, outdescp->cd_namelen, + unicode, &unicodelen, sizeof(unicode), ':', 0); + if (result) { + goto falsematch; + } + result = ConvertUnicodeToUTF8Mangled(unicodelen, unicode, + sizeof(utf8), &utf8len, utf8, fileID); + if ((result != 0) || + ((u_int16_t)descp->cd_namelen != utf8len) || + (bcmp(descp->cd_nameptr, utf8, utf8len) != 0)) { + goto falsematch; + } + + return (0); + +falsematch: + cat_releasedesc(outdescp); + return (ENOENT); +} + +/* + * Callback to collect directory entries. + * Called with readattr_state for each item in a directory. + */ +struct readattr_state { + struct hfsmount *hfsmp; + struct cat_entrylist *list; + cnid_t dir_cnid; + int error; + int reached_eof; +}; + +static int +getentriesattr_callback(const CatalogKey *key, const CatalogRecord *rec, struct readattr_state *state) +{ + struct cat_entrylist *list = state->list; + struct hfsmount *hfsmp = state->hfsmp; + struct cat_entry *cep; + cnid_t parentcnid; + + if (list->realentries >= list->maxentries) + return (0); /* stop */ + + parentcnid = key->hfsPlus.parentID; + + switch(rec->recordType) + { + case kHFSPlusFolderRecord: + case kHFSPlusFileRecord: + if (parentcnid != state->dir_cnid) + { + state->error = btNotFound; + state->reached_eof = 1; + return (0); /* stop */ + } + break; + case kHFSPlusFolderThreadRecord: + case kHFSPlusFileThreadRecord: + list->skipentries++; + if (parentcnid != state->dir_cnid) + { + state->error = btNotFound; + state->reached_eof = 1; + return (0); /* stop */ + } + else + return (1); /*continue */ + break; + default: + state->error = btNotFound; + return (0); /* stop */ + } + + /* Hide the private system directories and journal files */ + if (parentcnid == kHFSRootFolderID) + { + if (rec->recordType == kHFSPlusFolderRecord) + { + if (rec->hfsPlusFolder.folderID == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid || rec->hfsPlusFolder.folderID == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) + { + list->skipentries++; + return (1); /* continue */ + } + } + + if ((rec->recordType == kHFSPlusFileRecord) && IsEntryAJnlFile(hfsmp, rec->hfsPlusFile.fileID)) + { + list->skipentries++; + return (1); /* continue */ + } + } + + cep = &list->entry[list->realentries++]; + + getbsdattr(hfsmp, (const struct HFSPlusCatalogFile *)rec, &cep->ce_attr); + builddesc((const HFSPlusCatalogKey *)key, getcnid(rec), 0, getencoding(rec), + isadir(rec), &cep->ce_desc); + + if (rec->recordType == kHFSPlusFileRecord) + { + cep->ce_datasize = rec->hfsPlusFile.dataFork.logicalSize; + cep->ce_datablks = rec->hfsPlusFile.dataFork.totalBlocks; + cep->ce_rsrcsize = rec->hfsPlusFile.resourceFork.logicalSize; + cep->ce_rsrcblks = rec->hfsPlusFile.resourceFork.totalBlocks; + + /* Save link reference for later processing. */ + if ((SWAP_BE32(rec->hfsPlusFile.userInfo.fdType) == kHardLinkFileType) && + (SWAP_BE32(rec->hfsPlusFile.userInfo.fdCreator) == kHFSPlusCreator)) + { + cep->ce_attr.ca_linkref = rec->hfsPlusFile.bsdInfo.special.iNodeNum; + } + else if ((rec->hfsPlusFile.flags & kHFSHasLinkChainMask) && + (SWAP_BE32(rec->hfsPlusFile.userInfo.fdType) == kHFSAliasType) && + (SWAP_BE32(rec->hfsPlusFile.userInfo.fdCreator) == kHFSAliasCreator)) + { + cep->ce_attr.ca_linkref = rec->hfsPlusFile.bsdInfo.special.iNodeNum; + } + } + + + return (list->realentries < list->maxentries); +} + +/* + * Pack a cat_entrylist buffer with attributes from the catalog + * + * Note: index is zero relative + */ +int +cat_getentriesattr(struct hfsmount *hfsmp, directoryhint_t *dirhint, struct cat_entrylist *ce_list, int *reachedeof) +{ + FCB* fcb; + CatalogKey * key; + BTreeIterator * iterator = NULL; + struct readattr_state state; + cnid_t parentcnid; + int i; + int index; + bool bHaveKey = false; + int result = 0; + int reached_eof = 0; + + ce_list->realentries = 0; + + fcb = GetFileControlBlock(HFSTOVCB(hfsmp)->catalogRefNum); + parentcnid = dirhint->dh_desc.cd_parentcnid; + + bzero (&state, sizeof(struct readattr_state)); + + state.hfsmp = hfsmp; + state.list = ce_list; + state.dir_cnid = parentcnid; + state.error = 0; + + iterator = hfs_mallocz(sizeof(*iterator)); + key = (CatalogKey *)&iterator->key; + iterator->hint.nodeNum = dirhint->dh_desc.cd_hint; + index = dirhint->dh_index + 1; + + /* + * Attempt to build a key from cached filename + */ + if (dirhint->dh_desc.cd_namelen != 0) + { + if (buildkey(&dirhint->dh_desc, (HFSPlusCatalogKey *)key) == 0) + { + bHaveKey = true; + } + } + + /* + * If the last entry wasn't cached then position the btree iterator + */ + if ((index == 0) || !bHaveKey) + { + /* + * Position the iterator at the directory's thread record. + * (i.e. just before the first entry) + */ + buildthreadkey(dirhint->dh_desc.cd_parentcnid, key); + result = BTSearchRecord(fcb, iterator, NULL, NULL, iterator); + if (result) + { + result = MacToVFSError(result); + goto exit; + } + + /* + * Iterate until we reach the entry just + * before the one we want to start with. + */ + if (index > 0) + { + + struct position_state ps; + + ps.error = 0; + ps.count = 0; + ps.index = index; + ps.parentID = dirhint->dh_desc.cd_parentcnid; + ps.hfsmp = hfsmp; + + result = BTIterateRecords(fcb, kBTreeNextRecord, iterator, + (IterateCallBackProcPtr)cat_findposition, &ps); + if (ps.error) + result = ps.error; + else + result = MacToVFSError(result); + + if (result) + { + /* + * Note: the index may now point to EOF if the directory + * was modified in between system calls. We will return + * ENOENT from cat_findposition if this is the case, and + * when we bail out with an error, our caller (hfs_readdirattr_internal) + * will suppress the error and indicate EOF to its caller. + */ + result = MacToVFSError(result); + goto exit; + } + } + } + + /* Fill list with entries starting at iterator->key. */ + result = BTIterateRecords(fcb, kBTreeNextRecord, iterator, + (IterateCallBackProcPtr)getentriesattr_callback, &state); + + if (state.error) + { + result = state.error; + reached_eof = state.reached_eof; + } + else if (ce_list->realentries == 0) + { + result = btNotFound; + reached_eof = 1; + } + else + { + result = MacToVFSError(result); + } + + /* + * Resolve any hard links. + */ + for (i = 0; i < (int)ce_list->realentries; ++i) + { + struct FndrFileInfo *fip; + struct cat_entry *cep; + int isdirlink = 0; + int isfilelink = 0; + + cep = &ce_list->entry[i]; + if (cep->ce_attr.ca_linkref == 0) + continue; + + /* Note: Finder info is still in Big Endian */ + fip = (struct FndrFileInfo *)&cep->ce_attr.ca_finderinfo; + + if (S_ISREG(cep->ce_attr.ca_mode) && + (SWAP_BE32(fip->fdType) == kHardLinkFileType) && + (SWAP_BE32(fip->fdCreator) == kHFSPlusCreator)) { + isfilelink = 1; + } + if (S_ISREG(cep->ce_attr.ca_mode) && + (SWAP_BE32(fip->fdType) == kHFSAliasType) && + (SWAP_BE32(fip->fdCreator) == kHFSAliasCreator) && + (cep->ce_attr.ca_recflags & kHFSHasLinkChainMask)) { + isdirlink = 1; + } + + if (isfilelink || isdirlink) { + struct HFSPlusCatalogFile filerec; + + if (cat_resolvelink(hfsmp, cep->ce_attr.ca_linkref, isdirlink, &filerec) != 0) + continue; + /* Repack entry from inode record. */ + getbsdattr(hfsmp, &filerec, &cep->ce_attr); + cep->ce_datasize = filerec.dataFork.logicalSize; + cep->ce_datablks = filerec.dataFork.totalBlocks; + cep->ce_rsrcsize = filerec.resourceFork.logicalSize; + cep->ce_rsrcblks = filerec.resourceFork.totalBlocks; + } + } + +exit: + if (iterator) + hfs_free(iterator); + *reachedeof = reached_eof; + return MacToVFSError(result); +} + +/* + * Check the run-time ID hashtable. + * + * The catalog lock must be held (like other functions in this file). + * + * Returns: + * 1 if the ID is in the hash table. + * 0 if the ID is not in the hash table + */ +int cat_check_idhash (struct hfsmount *hfsmp, cnid_t test_fileid) { + + cat_preflightid_t *preflight; + int found = 0; + + for (preflight = IDHASH(hfsmp, test_fileid)->lh_first; preflight ; preflight = preflight->id_hash.le_next) + { + if (preflight->fileid == test_fileid) + { + found = 1; + break; + } + } + + return found; +} + +int +cat_acquire_cnid (struct hfsmount *hfsmp, cnid_t *new_cnid) +{ + uint32_t nextCNID; + BTreeIterator *iterator; + FSBufferDescriptor btdata; + uint16_t datasize; + CatalogRecord *recp; + int result = 0; + int wrapped = 0; + /* + * Get the next CNID. We can change it since we hold the catalog lock. + */ +nextid: + nextCNID = hfsmp->vcbNxtCNID; + if (nextCNID == 0xFFFFFFFF) { + wrapped++; + if (wrapped > 1) { + /* don't allow more than one wrap-around */ + return ENOSPC; + } + hfs_lock_mount (hfsmp); + hfsmp->vcbNxtCNID = kHFSFirstUserCatalogNodeID; + hfsmp->vcbAtrb |= kHFSCatalogNodeIDsReusedMask; + hfs_unlock_mount (hfsmp); + } else { + hfsmp->vcbNxtCNID++; + } + hfs_note_header_minor_change(hfsmp); + + /* First check that there are not any entries pending in the hash table with this ID */ + if (cat_check_idhash (hfsmp, nextCNID)) + { + /* Someone wants to insert this into the catalog but hasn't done so yet. Skip it */ + goto nextid; + } + + /* Check to see if a thread record exists for the target ID we just got */ + iterator = hfs_mallocz(sizeof(BTreeIterator)); + if (iterator == NULL) + return ENOMEM; + + buildthreadkey(nextCNID, (CatalogKey *)&iterator->key); + + recp = hfs_malloc(sizeof(CatalogRecord)); + BDINIT(btdata, recp); + + result = BTSearchRecord(hfsmp->hfs_catalog_cp->c_datafork, iterator, &btdata, &datasize, iterator); + hfs_free(recp); + hfs_free(iterator); + + if (result == btNotFound) { + /* Good. File ID was not in use. Move on to checking EA B-Tree */ + result = file_attribute_exist (hfsmp, nextCNID); + if (result == EEXIST) { + /* This CNID has orphaned EAs. Skip it and move on to the next one */ + goto nextid; + } + if (result) { + /* For any other error, return the result */ + return result; + } + + /* + * Now validate that there are no lingering cnodes with this ID. If a cnode + * has been removed on-disk (marked C_NOEXISTS), but has not yet been reclaimed, + * then it will still have an entry in the cnode hash table. This means that + * a subsequent lookup will find THAT entry and believe this one has been deleted + * prematurely. If there is a lingering cnode, then just skip this entry and move on. + * + * Note that we pass (existence_only == 1) argument to hfs_chash_snoop. + */ + if ((hfsmp->vcbAtrb & kHFSCatalogNodeIDsReusedMask)) + { + if (hfs_chash_snoop (hfsmp, nextCNID, 1, NULL, NULL) == 0) + { + goto nextid; + } + } + + /* + * If we get here, then we didn't see any thread records, orphaned EAs, + * or stale cnodes. This ID is safe to vend out. + */ + *new_cnid = nextCNID; + } + else if (result == noErr) { + /* move on to the next ID */ + goto nextid; + } + else { + /* For any other situation, just bail out */ + return EIO; + } + + return 0; +} + + +int +cat_preflight(struct hfsmount *hfsmp, uint32_t ops, cat_cookie_t *cookie) +{ + int lockflags = 0; + int result; + + if (hfsmp->hfs_catalog_cp->c_lockowner != pthread_self()) + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); + + result = BTReserveSpace(hfsmp->hfs_catalog_cp->c_datafork, ops, (void*)cookie); + + if (lockflags) + hfs_systemfile_unlock(hfsmp, lockflags); + + return MacToVFSError(result); +} + +void +cat_postflight(struct hfsmount *hfsmp, cat_cookie_t *cookie) +{ + int lockflags = 0; + + if (hfsmp->hfs_catalog_cp->c_lockowner != pthread_self()) + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); + + (void) BTReleaseReserve(hfsmp->hfs_catalog_cp->c_datafork, (void*)cookie); + + if (lockflags) + hfs_systemfile_unlock(hfsmp, lockflags); +} +/* + * Extract the parent ID from a catalog node record. + */ +static cnid_t +getparentcnid(const CatalogRecord *recp) +{ + cnid_t cnid = 0; + + switch (recp->recordType) + { + case kHFSPlusFileThreadRecord: + case kHFSPlusFolderThreadRecord: + cnid = recp->hfsPlusThread.parentID; + break; + default: + LFHFS_LOG(LEVEL_ERROR, "getparentcnid: unknown recordType (crp @ %p)\n", recp); + hfs_assert(0); + break; + } + + return (cnid); +} + +int +cat_rename ( + struct hfsmount * hfsmp, + struct cat_desc * from_cdp, + struct cat_desc * todir_cdp, + struct cat_desc * to_cdp, + struct cat_desc * out_cdp ) +{ + + int result = 0; + + FSBufferDescriptor btdata; + ExtendedVCB * vcb = HFSTOVCB(hfsmp); + FCB * fcb = GetFileControlBlock(vcb->catalogRefNum); + u_int16_t datasize; + int sourcegone = 0; + int skipthread = 0; + int directory = from_cdp->cd_flags & CD_ISDIR; + int is_dirlink = 0; + u_int32_t encoding = 0; + + if (from_cdp->cd_namelen == 0 || to_cdp->cd_namelen == 0) + { + return (EINVAL); + } + + CatalogRecord* recp = NULL; + BTreeIterator* to_iterator = NULL; + BTreeIterator* from_iterator = (BTreeIterator*) hfs_mallocz(sizeof(BTreeIterator)); + if (from_iterator == NULL) + { + return (ENOMEM); + } + + if ((result = buildkey(from_cdp, (HFSPlusCatalogKey*) &from_iterator->key))) + { + goto exit; + } + + to_iterator = hfs_mallocz(sizeof(*to_iterator)); + if (to_iterator == NULL) + { + result = ENOMEM; + goto exit; + } + + if ((result = buildkey(to_cdp, (HFSPlusCatalogKey*) &to_iterator->key))) + { + goto exit; + } + + recp = hfs_malloc(sizeof(CatalogRecord)); + if (recp == NULL) + { + result = ENOMEM; + goto exit; + } + BDINIT(btdata, recp); + + /* + * When moving a directory, make sure its a valid move. + */ + if (directory && (from_cdp->cd_parentcnid != to_cdp->cd_parentcnid)) + { + cnid_t cnid = from_cdp->cd_cnid; + cnid_t pathcnid = todir_cdp->cd_parentcnid; + + /* First check the obvious ones */ + if (cnid == fsRtDirID || cnid == to_cdp->cd_parentcnid || cnid == pathcnid) + { + result = EINVAL; + goto exit; + } + /* now allocate the dir_iterator */ + BTreeIterator* dir_iterator = hfs_mallocz(sizeof(BTreeIterator)); + if (dir_iterator == NULL) + { + result = ENOMEM; + goto exit; + } + + /* + * Traverse destination path all the way back to the root + * making sure that source directory is not encountered. + * + */ + while (pathcnid > fsRtDirID) + { + buildthreadkey(pathcnid, (CatalogKey *)&dir_iterator->key); + result = BTSearchRecord(fcb, dir_iterator, &btdata, &datasize, NULL); + if (result) + { + hfs_free(dir_iterator); + goto exit; + } + pathcnid = getparentcnid(recp); + if (pathcnid == cnid || pathcnid == 0) + { + result = EINVAL; + hfs_free(dir_iterator); + goto exit; + } + } + hfs_free(dir_iterator); + } + + /* + * Step 1: Find cnode data at old location + */ + result = BTSearchRecord(fcb, from_iterator, &btdata, + &datasize, from_iterator); + if (result) + { + if (result != btNotFound) + goto exit; + + struct cat_desc temp_desc; + + /* Probably the node has mangled name */ + result = cat_lookupmangled(hfsmp, from_cdp, 0, &temp_desc, NULL, NULL); + if (result) + goto exit; + + /* The file has mangled name. Search the cnode data using full name */ + bzero(from_iterator, sizeof(*from_iterator)); + result = buildkey(&temp_desc, (HFSPlusCatalogKey *)&from_iterator->key); + if (result) + { + cat_releasedesc(&temp_desc); + goto exit; + } + + result = BTSearchRecord(fcb, from_iterator, &btdata, &datasize, from_iterator); + if (result) + { + cat_releasedesc(&temp_desc); + goto exit; + } + + cat_releasedesc(&temp_desc); + } + + /* Check if the source is directory hard link. We do not change + * directory flag because it is later used to initialize result descp + */ + if ((directory) && (recp->recordType == kHFSPlusFileRecord) && (recp->hfsPlusFile.flags & kHFSHasLinkChainMask)) + { + is_dirlink = 1; + } + + /* + * Update the text encoding (on disk and in descriptor), + * using hfs_pickencoding to get the new encoding when available. + * + * Note that hardlink inodes don't require a text encoding hint. + */ + if (todir_cdp->cd_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid && + todir_cdp->cd_parentcnid != hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) + { + encoding = kTextEncodingMacRoman; + + hfs_setencodingbits(hfsmp, encoding); + recp->hfsPlusFile.textEncoding = encoding; + if (out_cdp) + out_cdp->cd_encoding = encoding; + } + + + /* Step 2: Insert cnode at new location */ + result = BTInsertRecord(fcb, to_iterator, &btdata, datasize); + if (result == btExists) + { + int fromtype = recp->recordType; + cnid_t cnid = 0; + + if (from_cdp->cd_parentcnid != to_cdp->cd_parentcnid) + goto exit; /* EEXIST */ + + /* Find cnode data at new location */ + result = BTSearchRecord(fcb, to_iterator, &btdata, &datasize, NULL); + if (result) + goto exit; + + /* Get the CNID after calling searchrecord */ + cnid = getcnid (recp); + if (cnid == 0) + { + hfs_mark_inconsistent(hfsmp, HFS_INCONSISTENCY_DETECTED); + result = EINVAL; + goto exit; + } + + if ((fromtype != recp->recordType) || (from_cdp->cd_cnid != cnid)) + { + result = EEXIST; + goto exit; /* EEXIST */ + } + /* The old name is a case variant and must be removed */ + result = BTDeleteRecord(fcb, from_iterator); + if (result) + goto exit; + + /* Insert cnode (now that case duplicate is gone) */ + result = BTInsertRecord(fcb, to_iterator, &btdata, datasize); + if (result) + { + /* Try and restore original before leaving */ + // XXXdbg + { + int err; + err = BTInsertRecord(fcb, from_iterator, &btdata, datasize); + if (err) + { + LFHFS_LOG(LEVEL_ERROR, "cat_create: could not undo (BTInsert = %d)\n", err); + hfs_mark_inconsistent(hfsmp, HFS_ROLLBACK_FAILED); + result = err; + goto exit; + } + } + + goto exit; + } + sourcegone = 1; + } + if (result) + goto exit; + + /* Step 3: Remove cnode from old location */ + if (!sourcegone) + { + result = BTDeleteRecord(fcb, from_iterator); + if (result) + { + /* Try and delete new record before leaving */ + // XXXdbg + { + int err; + err = BTDeleteRecord(fcb, to_iterator); + if (err) + { + LFHFS_LOG(LEVEL_ERROR, "cat_create: could not undo (BTDelete = %d)\n", err); + hfs_mark_inconsistent(hfsmp, HFS_ROLLBACK_FAILED); + result = err; + goto exit; + } + } + + goto exit; + } + } + + /* #### POINT OF NO RETURN #### */ + + /* + * Step 4: Remove cnode's old thread record + */ + buildthreadkey(from_cdp->cd_cnid, (CatalogKey *)&from_iterator->key); + (void) BTDeleteRecord(fcb, from_iterator); + + /* + * Step 5: Insert cnode's new thread record + * (optional for HFS files) + */ + if (!skipthread) + { + /* For directory hard links, always create a file thread + * record. For everything else, use the directory flag. + */ + if (is_dirlink) + { + datasize = buildthread(&to_iterator->key, recp, false); + } + else + { + datasize = buildthread(&to_iterator->key, recp, directory); + } + btdata.itemSize = datasize; + buildthreadkey(from_cdp->cd_cnid, (CatalogKey *)&from_iterator->key); + result = BTInsertRecord(fcb, from_iterator, &btdata, datasize); + } + + if (out_cdp) + { + HFSPlusCatalogKey * pluskey = NULL; + pluskey = (HFSPlusCatalogKey *)&to_iterator->key; + builddesc(pluskey, from_cdp->cd_cnid, to_iterator->hint.nodeNum, encoding, directory, out_cdp); + + } +exit: + (void) BTFlushPath(fcb); + + hfs_free(from_iterator); + hfs_free(to_iterator); + hfs_free(recp); + + return MacToVFSError(result); +} + +struct update_state { + struct cat_desc * s_desc; + struct cat_attr * s_attr; + const struct cat_fork * s_datafork; + const struct cat_fork * s_rsrcfork; + struct hfsmount * s_hfsmp; +}; + +/* + * catrec_update - Update the fields of a catalog record + * This is called from within BTUpdateRecord. + */ +static int +catrec_update(const CatalogKey *ckp, CatalogRecord *crp, struct update_state *state) +{ + struct cat_desc *descp = state->s_desc; + struct cat_attr *attrp = state->s_attr; + const struct cat_fork *forkp; + struct hfsmount *hfsmp = state->s_hfsmp; + long blksize = HFSTOVCB(hfsmp)->blockSize; + + switch (crp->recordType) + { + case kHFSPlusFolderRecord: + { + HFSPlusCatalogFolder *dir; + + dir = (struct HFSPlusCatalogFolder *)crp; + /* Do a quick sanity check */ + if (dir->folderID != attrp->ca_fileid) + { + LFHFS_LOG(LEVEL_DEBUG, "catrec_update: id %d != %d, vol=%s\n", dir->folderID, attrp->ca_fileid, hfsmp->vcbVN); + return (btNotFound); + } + dir->flags = attrp->ca_recflags; + dir->valence = attrp->ca_entries; + dir->createDate = to_hfs_time(attrp->ca_itime); + dir->contentModDate = to_hfs_time(attrp->ca_mtime); + dir->backupDate = to_hfs_time(attrp->ca_btime); + dir->accessDate = to_hfs_time(attrp->ca_atime); + attrp->ca_atimeondisk = attrp->ca_atime; + dir->attributeModDate = to_hfs_time(attrp->ca_ctime); + /* Note: directory hardlink inodes don't require a text encoding hint. */ + if (ckp->hfsPlus.parentID != hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) { + dir->textEncoding = descp->cd_encoding; + } + dir->folderCount = attrp->ca_dircount; + bcopy(&attrp->ca_finderinfo[0], &dir->userInfo, 32); + /* + * Update the BSD Info if it was already initialized on + * disk or if the runtime values have been modified. + * + * If the BSD info was already initialized, but + * MNT_UNKNOWNPERMISSIONS is set, then the runtime IDs are + * probably different than what was on disk. We don't want + * to overwrite the on-disk values (so if we turn off + * MNT_UNKNOWNPERMISSIONS, the old IDs get used again). + * This way, we can still change fields like the mode or + * dates even when MNT_UNKNOWNPERMISSIONS is set. + * + * Note that if MNT_UNKNOWNPERMISSIONS is set, hfs_chown + * won't change the uid or gid from their defaults. So, if + * the BSD info wasn't set, and the runtime values are not + * default, then what changed was the mode or flags. We + * have to set the uid and gid to something, so use the + * supplied values (which will be default), which has the + * same effect as creating a new file while + * MNT_UNKNOWNPERMISSIONS is set. + */ + if ((dir->bsdInfo.fileMode != 0) || + (attrp->ca_flags != 0) || + (attrp->ca_uid != hfsmp->hfs_uid) || + (attrp->ca_gid != hfsmp->hfs_gid) || + ((attrp->ca_mode & ALLPERMS) != + (hfsmp->hfs_dir_mask & ACCESSPERMS))) { + if ((dir->bsdInfo.fileMode == 0) || ((HFSTOVFS(hfsmp)->mnt_flag) & MNT_UNKNOWNPERMISSIONS) == 0) + { + dir->bsdInfo.ownerID = attrp->ca_uid; + dir->bsdInfo.groupID = attrp->ca_gid; + } + dir->bsdInfo.ownerFlags = attrp->ca_flags & 0x000000FF; + dir->bsdInfo.adminFlags = attrp->ca_flags >> 16; + dir->bsdInfo.fileMode = attrp->ca_mode; + /* A directory hardlink has a link count. */ + if (attrp->ca_linkcount > 1 || dir->hl_linkCount > 1) + { + dir->hl_linkCount = attrp->ca_linkcount; + } + } + break; + } + case kHFSPlusFileRecord: { + HFSPlusCatalogFile *file; + int is_dirlink; + + file = (struct HFSPlusCatalogFile *)crp; + /* Do a quick sanity check */ + if (file->fileID != attrp->ca_fileid) + return (btNotFound); + file->flags = attrp->ca_recflags; + file->createDate = to_hfs_time(attrp->ca_itime); + file->contentModDate = to_hfs_time(attrp->ca_mtime); + file->backupDate = to_hfs_time(attrp->ca_btime); + file->accessDate = to_hfs_time(attrp->ca_atime); + attrp->ca_atimeondisk = attrp->ca_atime; + file->attributeModDate = to_hfs_time(attrp->ca_ctime); + /* + * Note: file hardlink inodes don't require a text encoding + * hint, but they do have a first link value. + */ + if (ckp->hfsPlus.parentID == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) { + file->hl_firstLinkID = attrp->ca_firstlink; + } else { + file->textEncoding = descp->cd_encoding; + } + bcopy(&attrp->ca_finderinfo[0], &file->userInfo, 32); + /* + * Update the BSD Info if it was already initialized on + * disk or if the runtime values have been modified. + * + * If the BSD info was already initialized, but + * MNT_UNKNOWNPERMISSIONS is set, then the runtime IDs are + * probably different than what was on disk. We don't want + * to overwrite the on-disk values (so if we turn off + * MNT_UNKNOWNPERMISSIONS, the old IDs get used again). + * This way, we can still change fields like the mode or + * dates even when MNT_UNKNOWNPERMISSIONS is set. + * + * Note that if MNT_UNKNOWNPERMISSIONS is set, hfs_chown + * won't change the uid or gid from their defaults. So, if + * the BSD info wasn't set, and the runtime values are not + * default, then what changed was the mode or flags. We + * have to set the uid and gid to something, so use the + * supplied values (which will be default), which has the + * same effect as creating a new file while + * MNT_UNKNOWNPERMISSIONS is set. + * + * Do not modify bsdInfo for directory hard link records. + * They are set during creation and are not modifiable, so just + * leave them alone. + */ + is_dirlink = (file->flags & kHFSHasLinkChainMask) && + (SWAP_BE32(file->userInfo.fdType) == kHFSAliasType) && + (SWAP_BE32(file->userInfo.fdCreator) == kHFSAliasCreator); + + if (!is_dirlink && ((file->bsdInfo.fileMode != 0) || (attrp->ca_flags != 0) || (attrp->ca_uid != hfsmp->hfs_uid) ||(attrp->ca_gid != hfsmp->hfs_gid) || + ((attrp->ca_mode & ALLPERMS) != (hfsmp->hfs_file_mask & ACCESSPERMS)))) + { + if ((file->bsdInfo.fileMode == 0) || (((HFSTOVFS(hfsmp)->mnt_flag) & MNT_UNKNOWNPERMISSIONS) == 0)) + { + file->bsdInfo.ownerID = attrp->ca_uid; + file->bsdInfo.groupID = attrp->ca_gid; + } + file->bsdInfo.ownerFlags = attrp->ca_flags & 0x000000FF; + file->bsdInfo.adminFlags = attrp->ca_flags >> 16; + file->bsdInfo.fileMode = attrp->ca_mode; + } + if (state->s_rsrcfork) { + forkp = state->s_rsrcfork; + file->resourceFork.logicalSize = forkp->cf_size; + file->resourceFork.totalBlocks = forkp->cf_blocks; + bcopy(&forkp->cf_extents[0], &file->resourceFork.extents, + sizeof(HFSPlusExtentRecord)); + /* Push blocks read to disk */ + file->resourceFork.clumpSize = (u_int32_t) howmany(forkp->cf_bytesread, blksize); + } + if (state->s_datafork) { + forkp = state->s_datafork; + file->dataFork.logicalSize = forkp->cf_size; + file->dataFork.totalBlocks = forkp->cf_blocks; + bcopy(&forkp->cf_extents[0], &file->dataFork.extents, + sizeof(HFSPlusExtentRecord)); + /* Push blocks read to disk */ + file->dataFork.clumpSize = (u_int32_t) howmany(forkp->cf_bytesread, blksize); + } + + if ((file->resourceFork.extents[0].startBlock != 0) && + (file->resourceFork.extents[0].startBlock == file->dataFork.extents[0].startBlock)) + { + LFHFS_LOG(LEVEL_ERROR, "catrec_update: rsrc fork == data fork"); + hfs_assert(0); + } + + /* Synchronize the lock state */ + if (attrp->ca_flags & (SF_IMMUTABLE | UF_IMMUTABLE)) + file->flags |= kHFSFileLockedMask; + else + file->flags &= ~kHFSFileLockedMask; + + /* Push out special field if necessary */ + if (S_ISBLK(attrp->ca_mode) || S_ISCHR(attrp->ca_mode)) + { + file->bsdInfo.special.rawDevice = attrp->ca_rdev; + } + else + { + /* + * Protect against the degenerate case where the descriptor contains the + * raw inode ID in its CNID field. If the HFSPlusCatalogFile record indicates + * the linkcount was greater than 1 (the default value), then it must have become + * a hardlink. In this case, update the linkcount from the cat_attr passed in. + */ + if ((descp->cd_cnid != attrp->ca_fileid) || (attrp->ca_linkcount > 1 ) || (file->hl_linkCount > 1)) + { + file->hl_linkCount = attrp->ca_linkcount; + } + } + break; + } + default: + return (btNotFound); + } + return (0); +} + +/* + * getkey - get a key from id by doing a thread lookup + */ +static int +getkey(struct hfsmount *hfsmp, cnid_t cnid, CatalogKey * key) +{ + FSBufferDescriptor btdata; + u_int16_t datasize; + CatalogKey * keyp = NULL; + CatalogRecord * recp = NULL; + int result = 0; + + + BTreeIterator* iterator = hfs_mallocz(sizeof(BTreeIterator)); + if (iterator == NULL) + { + result = memFullErr; + goto exit; + } + buildthreadkey(cnid, (CatalogKey *)&iterator->key); + + recp = hfs_mallocz(sizeof(CatalogRecord)); + if (recp == NULL) + { + result = memFullErr; + goto exit; + } + BDINIT(btdata, recp); + + result = BTSearchRecord(VTOF(HFSTOVCB(hfsmp)->catalogRefNum), iterator, &btdata, &datasize, iterator); + if (result) + goto exit; + + /* Turn thread record into a cnode key (in place) */ + switch (recp->recordType) + { + case kHFSPlusFileThreadRecord: + case kHFSPlusFolderThreadRecord: + keyp = (CatalogKey *)&recp->hfsPlusThread.reserved; + keyp->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength + + (keyp->hfsPlus.nodeName.length * 2); + bcopy(keyp, key, keyp->hfsPlus.keyLength + 2); + break; + + default: + result = cmNotFound; + break; + } + +exit: + hfs_free(iterator); + hfs_free(recp); + + return MacToVFSError(result); +} + +/* + * cat_update_internal - update the catalog node described by descp + * using the data from attrp and forkp. + * If update_hardlink is true, the hard link catalog record is updated + * and not the inode catalog record. + */ +static int +cat_update_internal(struct hfsmount *hfsmp, int update_hardlink, struct cat_desc *descp, struct cat_attr *attrp, + const struct cat_fork *dataforkp, const struct cat_fork *rsrcforkp) +{ + FCB * fcb = hfsmp->hfs_catalog_cp->c_datafork; + BTreeIterator * iterator; + int result = 0; + + struct update_state state; + state.s_desc = descp; + state.s_attr = attrp; + state.s_datafork = dataforkp; + state.s_rsrcfork = rsrcforkp; + state.s_hfsmp = hfsmp; + + /* Borrow the btcb iterator since we have an exclusive catalog lock. */ + iterator = &((BTreeControlBlockPtr)(fcb->ff_sysfileinfo))->iterator; + + /* + * For open-deleted files we need to do a lookup by cnid + * (using thread rec). + * + * For hard links and if not requested by caller, the target + * of the update is the inode itself (not the link record) + * so a lookup by fileid (i.e. thread rec) is needed. + */ + if ((update_hardlink == false) && + ((descp->cd_cnid != attrp->ca_fileid) || + (descp->cd_namelen == 0) || + (attrp->ca_recflags & kHFSHasLinkChainMask))) + { + result = getkey(hfsmp, attrp->ca_fileid, (CatalogKey *)&iterator->key); + } + else + { + result = buildkey(descp, (HFSPlusCatalogKey *)&iterator->key); + } + if (result) + goto exit; + + /* Pass a node hint */ + iterator->hint.nodeNum = descp->cd_hint; + + result = BTUpdateRecord(fcb, iterator, (IterateCallBackProcPtr)catrec_update, &state); + if (result) + goto exit; + + /* Update the node hint. */ + descp->cd_hint = iterator->hint.nodeNum; + +exit: + (void) BTFlushPath(fcb); + + return MacToVFSError(result); +} + +/* + * cat_update - update the catalog node described by descp + * using the data from attrp and forkp. + */ +int +cat_update(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attrp, + const struct cat_fork *dataforkp, const struct cat_fork *rsrcforkp) +{ + return cat_update_internal(hfsmp, false, descp, attrp, dataforkp, rsrcforkp); +} + +/* + * cat_delete - delete a node from the catalog + * + * Order of B-tree operations: + * 1. BTDeleteRecord(cnode); + * 2. BTDeleteRecord(thread); + * 3. BTUpdateRecord(parent); + */ +int +cat_delete(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attrp) +{ + FCB * fcb = hfsmp->hfs_catalog_cp->c_datafork; + BTreeIterator *iterator; + cnid_t cnid; + int result = 0; + + /* Preflight check: + * + * The root directory cannot be deleted + * A directory must be empty + * A file must be zero length (no blocks) + */ + if (descp->cd_cnid < kHFSFirstUserCatalogNodeID || descp->cd_parentcnid == kHFSRootParentID) + return (EINVAL); + + /* XXX Preflight Missing */ + + /* Borrow the btcb iterator since we have an exclusive catalog lock. */ + iterator = &((BTreeControlBlockPtr)(fcb->ff_sysfileinfo))->iterator; + iterator->hint.nodeNum = 0; + + /* + * Derive a key from either the file ID (for a virtual inode) + * or the descriptor. + */ + if (descp->cd_namelen == 0) + { + result = getkey(hfsmp, attrp->ca_fileid, (CatalogKey *)&iterator->key); + cnid = attrp->ca_fileid; + } + else + { + result = buildkey(descp, (HFSPlusCatalogKey *)&iterator->key); + cnid = descp->cd_cnid; + } + if (result) + goto exit; + + /* Delete record */ + result = BTDeleteRecord(fcb, iterator); + if (result) + { + if (result != btNotFound) + goto exit; + + struct cat_desc temp_desc; + + /* Probably the node has mangled name */ + result = cat_lookupmangled(hfsmp, descp, 0, &temp_desc, attrp, NULL); + if (result) + goto exit; + + /* The file has mangled name. Delete the file using full name */ + bzero(iterator, sizeof(*iterator)); + result = buildkey(&temp_desc, (HFSPlusCatalogKey *)&iterator->key); + cnid = temp_desc.cd_cnid; + if (result) + { + cat_releasedesc(&temp_desc); + goto exit; + } + + result = BTDeleteRecord(fcb, iterator); + if (result) + { + cat_releasedesc(&temp_desc); + goto exit; + } + + cat_releasedesc(&temp_desc); + } + + /* Delete thread record. On error, mark volume inconsistent */ + buildthreadkey(cnid, (CatalogKey *)&iterator->key); + if (BTDeleteRecord(fcb, iterator)) + { + LFHFS_LOG(LEVEL_ERROR, "cat_delete: failed to delete thread record id=%u on vol=%s\n", cnid, hfsmp->vcbVN); + hfs_mark_inconsistent(hfsmp, HFS_OP_INCOMPLETE); + } + +exit: + (void) BTFlushPath(fcb); + + return MacToVFSError(result); +} + +/* + * buildrecord - build a default catalog directory or file record + */ +static void +buildrecord(struct cat_attr *attrp, cnid_t cnid, u_int32_t encoding, CatalogRecord *crp, u_int32_t *recordSize) +{ + int type = attrp->ca_mode & S_IFMT; + u_int32_t createtime = to_hfs_time(attrp->ca_itime); + + struct HFSPlusBSDInfo * bsdp = NULL; + + if (type == S_IFDIR) + { + crp->recordType = kHFSPlusFolderRecord; + crp->hfsPlusFolder.flags = attrp->ca_recflags; + crp->hfsPlusFolder.valence = 0; + crp->hfsPlusFolder.folderID = cnid; + crp->hfsPlusFolder.createDate = createtime; + crp->hfsPlusFolder.contentModDate = createtime; + crp->hfsPlusFolder.attributeModDate = createtime; + crp->hfsPlusFolder.accessDate = createtime; + crp->hfsPlusFolder.backupDate = 0; + crp->hfsPlusFolder.textEncoding = encoding; + crp->hfsPlusFolder.folderCount = 0; + bcopy(attrp->ca_finderinfo, &crp->hfsPlusFolder.userInfo, 32); + bsdp = &crp->hfsPlusFolder.bsdInfo; + bsdp->special.linkCount = 1; + *recordSize = sizeof(HFSPlusCatalogFolder); + } + else + { + crp->recordType = kHFSPlusFileRecord; + crp->hfsPlusFile.flags = attrp->ca_recflags; + crp->hfsPlusFile.reserved1 = 0; + crp->hfsPlusFile.fileID = cnid; + crp->hfsPlusFile.createDate = createtime; + crp->hfsPlusFile.contentModDate = createtime; + crp->hfsPlusFile.accessDate = createtime; + crp->hfsPlusFile.attributeModDate = createtime; + crp->hfsPlusFile.backupDate = 0; + crp->hfsPlusFile.textEncoding = encoding; + crp->hfsPlusFile.reserved2 = 0; + bcopy(attrp->ca_finderinfo, &crp->hfsPlusFile.userInfo, 32); + bsdp = &crp->hfsPlusFile.bsdInfo; + /* BLK/CHR need to save the device info */ + if (type == S_IFBLK || type == S_IFCHR) + { + bsdp->special.rawDevice = attrp->ca_rdev; + } else { + bsdp->special.linkCount = 1; + } + bzero(&crp->hfsPlusFile.dataFork, 2*sizeof(HFSPlusForkData)); + *recordSize = sizeof(HFSPlusCatalogFile); + } + bsdp->ownerID = attrp->ca_uid; + bsdp->groupID = attrp->ca_gid; + bsdp->fileMode = attrp->ca_mode; + bsdp->adminFlags = attrp->ca_flags >> 16; + bsdp->ownerFlags = attrp->ca_flags & 0x000000FF; + +} + +/* + * cat_create - create a node in the catalog + * using MacRoman encoding + * + * NOTE: both the catalog file and attribute file locks must + * be held before calling this function. + * + * The caller is responsible for releasing the output + * catalog descriptor (when supplied outdescp is non-null). + */ +int +cat_create(struct hfsmount *hfsmp, cnid_t new_fileid, struct cat_desc *descp, struct cat_attr *attrp, struct cat_desc *out_descp) +{ + int result = 0; + + FCB * fcb= hfsmp->hfs_catalog_cp->c_datafork; + BTreeIterator* iterator = NULL; + HFSPlusCatalogKey* key = NULL; + CatalogRecord* data = NULL; + FSBufferDescriptor btdata = {0}; + u_int32_t datalen; + u_int32_t encoding = kTextEncodingMacRoman; + + /* The caller is expected to reserve a CNID before calling this-> function! */ + + /* Get space for iterator, key and data */ + iterator = hfs_mallocz(sizeof(BTreeIterator)); + key = hfs_mallocz(sizeof(HFSPlusCatalogKey)); + data = hfs_mallocz(sizeof(CatalogRecord)); + + if ( (iterator == NULL) || (key == NULL) || (data == NULL) ) + { + result =ENOMEM; + goto exit; + } + + result = buildkey(descp, key); + if (result) + goto exit; + + /* + * Insert the thread record first + */ + datalen = buildthread((void*)key, data, S_ISDIR(attrp->ca_mode)); + btdata.bufferAddress = data; + btdata.itemSize = datalen; + btdata.itemCount = 1; + + /* Caller asserts the following: + * 1) this CNID is not in use by any orphaned EAs + * 2) There are no lingering cnodes (removed on-disk but still in-core) with this CNID + * 3) There are no thread or catalog records for this ID + */ + buildthreadkey(new_fileid, (CatalogKey *) &iterator->key); + result = BTInsertRecord(fcb, iterator, &btdata, datalen); + if (result) + { + goto exit; + } + + /* + * Now insert the file/directory record + */ + buildrecord(attrp, new_fileid, encoding, data, &datalen); + btdata.bufferAddress = data; + btdata.itemSize = datalen; + btdata.itemCount = 1; + + bcopy(key, &iterator->key, sizeof(HFSPlusCatalogKey)); + + result = BTInsertRecord(fcb, iterator, &btdata, datalen); + if (result) + { + if (result == btExists) + result = EEXIST; + + /* Back out the thread record */ + buildthreadkey(new_fileid, (CatalogKey *)&iterator->key); + if (BTDeleteRecord(fcb, iterator)) + { + /* Error on deleting extra thread record, mark + * volume inconsistent + */ + LFHFS_LOG(LEVEL_ERROR, "cat_create() failed to delete thread record id=%u on vol=%s\n", new_fileid, hfsmp->vcbVN); + hfs_mark_inconsistent(hfsmp, HFS_ROLLBACK_FAILED); + } + + goto exit; + } + + /* + * Insert was successful, update name, parent and volume + */ + if (out_descp != NULL) + { + HFSPlusCatalogKey * pluskey = NULL; + + pluskey = (HFSPlusCatalogKey *)&iterator->key; + + builddesc(pluskey, new_fileid, iterator->hint.nodeNum, encoding, S_ISDIR(attrp->ca_mode), out_descp); + } + attrp->ca_fileid = new_fileid; + +exit: + (void) BTFlushPath(fcb); + if (iterator) + hfs_free(iterator); + if (key) + hfs_free(key); + if (data) + hfs_free(data); + + return MacToVFSError(result); +} + +/* This function sets kHFSHasChildLinkBit in a directory hierarchy in the + * catalog btree of given cnid by walking up the parent chain till it reaches + * either the root folder, or the private metadata directory for storing + * directory hard links. This function updates the corresponding in-core + * cnode, if any, and the directory record in the catalog btree. + * On success, returns zero. On failure, returns non-zero value. + */ +int +cat_set_childlinkbit(struct hfsmount *hfsmp, cnid_t cnid) +{ + int retval = 0; + int lockflags = 0; + struct cat_desc desc; + struct cat_attr attr = {0}; + + while ((cnid != kHFSRootFolderID) && (cnid != kHFSRootParentID) && + (cnid != hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid)) { + /* Update the bit in corresponding cnode, if any, in the hash. + * If the cnode has the bit already set, stop the traversal. + */ + retval = hfs_chash_set_childlinkbit(hfsmp, cnid); + if (retval == 0) { + break; + } + + /* Update the catalog record on disk if either cnode was not + * found in the hash, or if a cnode was found and the cnode + * did not have the bit set previously. + */ + retval = hfs_start_transaction(hfsmp); + if (retval) { + break; + } + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); + + /* Look up our catalog folder record */ + retval = cat_idlookup(hfsmp, cnid, 0, 0, &desc, &attr, NULL); + if (retval) { + hfs_systemfile_unlock(hfsmp, lockflags); + hfs_end_transaction(hfsmp); + break; + } + + /* Update the bit in the catalog record */ + attr.ca_recflags |= kHFSHasChildLinkMask; + retval = cat_update(hfsmp, &desc, &attr, NULL, NULL); + if (retval) { + hfs_systemfile_unlock(hfsmp, lockflags); + hfs_end_transaction(hfsmp); + cat_releasedesc(&desc); + break; + } + + hfs_systemfile_unlock(hfsmp, lockflags); + hfs_end_transaction(hfsmp); + + cnid = desc.cd_parentcnid; + cat_releasedesc(&desc); + } + + return retval; +} + +/* This function traverses the parent directory hierarchy from the given + * directory to one level below root directory and checks if any of its + * ancestors is - + * 1. A directory hard link. + * 2. The 'pointed at' directory. + * If any of these conditions fail or an internal error is encountered + * during look up of the catalog record, this function returns non-zero value. + */ +int +cat_check_link_ancestry(struct hfsmount *hfsmp, cnid_t cnid, cnid_t pointed_at_cnid) +{ + FSBufferDescriptor btdata; + HFSPlusCatalogFolder folder; + int invalid = 0; + int result; + + BDINIT(btdata, &folder); + BTreeIterator* ip = hfs_mallocz(sizeof(BTreeIterator)); + if (ip == NULL) + return ENOMEM; + + HFSPlusCatalogKey* keyp = (HFSPlusCatalogKey *)&ip->key; + FCB *fcb = hfsmp->hfs_catalog_cp->c_datafork; + + while (cnid != kHFSRootParentID) + { + /* Check if the 'pointed at' directory is an ancestor */ + if (pointed_at_cnid == cnid) + { + invalid = 1; + break; + } + if ((result = getkey(hfsmp, cnid, (CatalogKey *)keyp))) { + LFHFS_LOG(LEVEL_ERROR, "cat_check_link_ancestry: getkey failed id=%u, vol=%s\n", cnid, hfsmp->vcbVN); + invalid = 1; /* On errors, assume an invalid parent */ + break; + } + if ((result = BTSearchRecord(fcb, ip, &btdata, NULL, NULL))) { + LFHFS_LOG(LEVEL_ERROR, "cat_check_link_ancestry: cannot find id=%u, vol=%s\n", cnid, hfsmp->vcbVN); + invalid = 1; /* On errors, assume an invalid parent */ + break; + } + /* Check if this ancestor is a directory hard link */ + if (folder.flags & kHFSHasLinkChainMask) { + invalid = 1; + break; + } + cnid = keyp->parentID; + } + + hfs_free(ip); + return (invalid); +} + + +// --------------------------------------- Hard Link Support --------------------------------------------- + + +/* + * Resolve hard link reference to obtain the inode record. + */ +int +cat_resolvelink(struct hfsmount *hfsmp, u_int32_t linkref, int isdirlink, struct HFSPlusCatalogFile *recp) +{ + FSBufferDescriptor btdata; + BTreeIterator *iterator; + struct cat_desc idesc; + char inodename[32]; + cnid_t parentcnid; + int result = 0; + + BDINIT(btdata, recp); + + if (isdirlink) { + MAKE_DIRINODE_NAME(inodename, sizeof(inodename), (unsigned int)linkref); + parentcnid = hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid; + } else { + MAKE_INODE_NAME(inodename, sizeof(inodename), (unsigned int)linkref); + parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; + } + + /* Get space for iterator */ + iterator = hfs_mallocz(sizeof(BTreeIterator)); + if (iterator == NULL) + { + return ENOMEM; + } + + /* Build a descriptor for private dir. */ + idesc.cd_parentcnid = parentcnid; + idesc.cd_nameptr = (const u_int8_t *)inodename; + idesc.cd_namelen = strlen(inodename); + idesc.cd_flags = 0; + idesc.cd_hint = 0; + idesc.cd_encoding = 0; + (void) buildkey(&idesc, (HFSPlusCatalogKey *)&iterator->key); + + result = BTSearchRecord(VTOF(HFSTOVCB(hfsmp)->catalogRefNum), iterator,&btdata, NULL, NULL); + + if (result == 0) { + /* Make sure there's a reference */ + if (recp->hl_linkCount == 0) + recp->hl_linkCount = 2; + } else { + LFHFS_LOG(LEVEL_ERROR, "cat_resolvelink: can't find inode=%s on vol=%s\n", inodename, hfsmp->vcbVN); + } + + hfs_free(iterator); + + return (result ? ENOENT : 0); +} + +/* + * Resolve hard link reference to obtain the inode number. + */ +static int +resolvelinkid(struct hfsmount *hfsmp, u_int32_t linkref, ino_t *ino) +{ + struct HFSPlusCatalogFile record; + int error; + + /* + * Since we know resolvelinkid is only called from + * cat_getdirentries, we can assume that only file + * hardlinks need to be resolved (cat_getdirentries + * can resolve directory hardlinks in place). + */ + error = cat_resolvelink(hfsmp, linkref, 0, &record); + if (error == 0) { + if (record.fileID == 0) + error = ENOENT; + else + *ino = record.fileID; + } + return (error); +} + + +/* + * cat_lookup_lastlink - find the last sibling link in the chain (no "next" ptr) + */ +int +cat_lookup_lastlink(struct hfsmount *hfsmp, cnid_t linkfileid, cnid_t *lastlink, struct cat_desc *cdesc) +{ + FCB * fcb; + BTreeIterator * iterator; + FSBufferDescriptor btdata = {0}; + struct HFSPlusCatalogFile file; + int result = 0; + int itercount = 0; + int foundlast = 0; + cnid_t currentlink = linkfileid; + + fcb = hfsmp->hfs_catalog_cp->c_datafork; + + /* Create an iterator for use by us temporarily */ + iterator = hfs_mallocz(sizeof(*iterator)); + if (iterator == NULL) + return ENOMEM; + + while ((foundlast == 0) && (itercount < HFS_LINK_MAX )) { + itercount++; + bzero(iterator, sizeof(*iterator)); + + if ((result = getkey(hfsmp, currentlink, (CatalogKey *)&iterator->key))) { + goto exit; + } + BDINIT(btdata, &file); + + if ((result = BTSearchRecord(fcb, iterator, &btdata, NULL, NULL))) { + goto exit; + } + + /* The prev/next chain is only valid when kHFSHasLinkChainMask is set. */ + if (file.flags & kHFSHasLinkChainMask) { + cnid_t parent; + + parent = ((HFSPlusCatalogKey *)&iterator->key)->parentID; + /* + * The raw inode for a directory hardlink doesn't have a chain. + * Its link information lives in an EA. + */ + if (parent == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) { + /* We don't iterate to find the oldest directory hardlink. */ + result = ENOLINK; + goto exit; + } + else if (parent == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) { + /* Raw inode for file hardlink (the base inode) */ + currentlink = file.hl_firstLinkID; + + /* + * One minor special-casing here is necessary. + * If our ID brought us to the raw hardlink inode, and it does + * not have any siblings, then it's an open-unlinked file, and we + * should not proceed any further. + */ + if (currentlink == 0) { + result = ENOLINK; + goto exit; + } + } + else { + /* Otherwise, this item's parent is a legitimate directory in the namespace */ + if (file.hl_nextLinkID == 0) { + /* If nextLinkID is 0, then we found the end; no more hardlinks */ + foundlast = 1; + *lastlink = currentlink; + /* + * Since we had to construct a catalog key to do this lookup + * we still hold it in-hand. We might as well use it to build + * the descriptor that the caller asked for. + */ + builddesc ((HFSPlusCatalogKey*)&iterator->key, currentlink, 0, 0, 0, cdesc); + break; + } + + currentlink = file.hl_nextLinkID; + } + } + else { + /* Sorry, can't help you without a link chain */ + result = ENOLINK; + goto exit; + } + } +exit: + /* If we didn't find what we were looking for, zero out the args */ + if (foundlast == 0) { + if (cdesc) { + bzero (cdesc, sizeof(struct cat_desc)); + } + if (lastlink) { + *lastlink = 0; + } + } + + hfs_free(iterator); + return MacToVFSError(result); +} + +/* + * cat_lookuplink - lookup a link by it's name + */ +int +cat_lookuplink(struct hfsmount *hfsmp, struct cat_desc *descp, cnid_t *linkfileid, cnid_t *prevlinkid, cnid_t *nextlinkid) +{ + FCB * fcb; + BTreeIterator * iterator; + FSBufferDescriptor btdata; + struct HFSPlusCatalogFile file; + int result; + + fcb = hfsmp->hfs_catalog_cp->c_datafork; + + /* Create an iterator for use by us temporarily */ + iterator = hfs_mallocz(sizeof(*iterator)); + if (iterator == NULL) + return ENOMEM; + + if ((result = buildkey(descp, (HFSPlusCatalogKey *)&iterator->key))) { + goto exit; + } + BDINIT(btdata, &file); + + if ((result = BTSearchRecord(fcb, iterator, &btdata, NULL, NULL))) { + goto exit; + } + if (file.recordType != kHFSPlusFileRecord) { + result = ENOENT; + goto exit; + } + *linkfileid = file.fileID; + + if (file.flags & kHFSHasLinkChainMask) { + *prevlinkid = file.hl_prevLinkID; + *nextlinkid = file.hl_nextLinkID; + } else { + *prevlinkid = 0; + *nextlinkid = 0; + } +exit: + hfs_free(iterator); + return MacToVFSError(result); +} + +/* + * cat_deletelink - delete a link from the catalog + */ +int +cat_deletelink(struct hfsmount *hfsmp, struct cat_desc *descp) +{ + struct HFSPlusCatalogFile file = {0}; + struct cat_attr cattr = {0}; + uint32_t totalBlocks; + int result = 0; + + cattr.ca_fileid = descp->cd_cnid; + + /* Directory links have alias content to remove. */ + if (descp->cd_flags & CD_ISDIR) { + FCB * fcb; + BTreeIterator * iterator; + FSBufferDescriptor btdata; + + fcb = hfsmp->hfs_catalog_cp->c_datafork; + + /* Borrow the btcb iterator since we have an exclusive catalog lock. */ + iterator = &((BTreeControlBlockPtr)(fcb->ff_sysfileinfo))->iterator; + iterator->hint.nodeNum = 0; + + if ((result = buildkey(descp, (HFSPlusCatalogKey *)&iterator->key))) { + goto exit; + } + BDINIT(btdata, &file); + + if ((result = BTSearchRecord(fcb, iterator, &btdata, NULL, NULL))) { + goto exit; + } + } + + result = cat_delete(hfsmp, descp, &cattr); + + if ((result == 0) && + (descp->cd_flags & CD_ISDIR) && + (file.recordType == kHFSPlusFileRecord)) { + + totalBlocks = file.resourceFork.totalBlocks; + + for (int i = 0; (i < 8) && (totalBlocks > 0); i++) { + if ((file.resourceFork.extents[i].blockCount == 0) && + (file.resourceFork.extents[i].startBlock == 0)) { + break; + } + + (void) BlockDeallocate(hfsmp,file.resourceFork.extents[i].startBlock,file.resourceFork.extents[i].blockCount, 0); + + totalBlocks -= file.resourceFork.extents[i].blockCount; + file.resourceFork.extents[i].startBlock = 0; + file.resourceFork.extents[i].blockCount = 0; + } + } +exit: + return (result); +} + +/* + * update_siblinglinks_callback - update a link's chain + */ + +struct linkupdate_state { + cnid_t filelinkid; + cnid_t prevlinkid; + cnid_t nextlinkid; +}; + +static int +update_siblinglinks_callback(__unused const CatalogKey *ckp, CatalogRecord *crp, struct linkupdate_state *state) +{ + HFSPlusCatalogFile *file; + + if (crp->recordType != kHFSPlusFileRecord) { + LFHFS_LOG(LEVEL_ERROR, "update_siblinglinks_callback: unexpected rec type %d\n", crp->recordType); + return (btNotFound); + } + + file = (struct HFSPlusCatalogFile *)crp; + if (file->flags & kHFSHasLinkChainMask) { + if (state->prevlinkid != HFS_IGNORABLE_LINK) { + file->hl_prevLinkID = state->prevlinkid; + } + if (state->nextlinkid != HFS_IGNORABLE_LINK) { + file->hl_nextLinkID = state->nextlinkid; + } + } else { + LFHFS_LOG(LEVEL_ERROR, "update_siblinglinks_callback: file %d isn't a chain\n", file->fileID); + } + return (0); +} + +/* + * cat_update_siblinglinks - update a link's chain + */ +int +cat_update_siblinglinks(struct hfsmount *hfsmp, cnid_t linkfileid, cnid_t prevlinkid, cnid_t nextlinkid) +{ + FCB * fcb; + BTreeIterator * iterator; + struct linkupdate_state state; + int result; + + fcb = hfsmp->hfs_catalog_cp->c_datafork; + state.filelinkid = linkfileid; + state.prevlinkid = prevlinkid; + state.nextlinkid = nextlinkid; + + /* Create an iterator for use by us temporarily */ + iterator = hfs_mallocz(sizeof(*iterator)); + if (iterator == NULL) + return ENOMEM; + + result = getkey(hfsmp, linkfileid, (CatalogKey *)&iterator->key); + if (result == 0) { + result = BTUpdateRecord(fcb, iterator, (IterateCallBackProcPtr)update_siblinglinks_callback, &state); + (void) BTFlushPath(fcb); + } else { + LFHFS_LOG(LEVEL_ERROR, "cat_update_siblinglinks: couldn't resolve cnid=%d, vol=%s\n", linkfileid, hfsmp->vcbVN); + } + + hfs_free(iterator); + return MacToVFSError(result); +} + +void +cat_convertattr( + struct hfsmount *hfsmp, + CatalogRecord * recp, + struct cat_attr *attrp, + struct cat_fork *datafp, + struct cat_fork *rsrcfp) +{ + getbsdattr(hfsmp, (struct HFSPlusCatalogFile *)recp, attrp); + + if (isadir(recp)) + { + bzero(datafp, sizeof(*datafp)); + }else { + /* Convert the data fork. */ + datafp->cf_size = recp->hfsPlusFile.dataFork.logicalSize; + datafp->cf_new_size = 0; + datafp->cf_blocks = recp->hfsPlusFile.dataFork.totalBlocks; + datafp->cf_bytesread = 0; + datafp->cf_vblocks = 0; + bcopy(&recp->hfsPlusFile.dataFork.extents[0], + &datafp->cf_extents[0], sizeof(HFSPlusExtentRecord)); + + /* Convert the resource fork. */ + rsrcfp->cf_size = recp->hfsPlusFile.resourceFork.logicalSize; + rsrcfp->cf_new_size = 0; + rsrcfp->cf_blocks = recp->hfsPlusFile.resourceFork.totalBlocks; + datafp->cf_bytesread = 0; + rsrcfp->cf_vblocks = 0; + bcopy(&recp->hfsPlusFile.resourceFork.extents[0], + &rsrcfp->cf_extents[0], sizeof(HFSPlusExtentRecord)); + } +} + +/* Create and write an alias that points at the directory represented by given + * inode number on the same volume. Directory hard links are visible as + * aliases in pre-Leopard systems and this function creates these aliases. + * + * Note: This code is very specific to creating alias for the purpose + * of directory hard links only, and should not be generalized. + */ +static int +cat_makealias(struct hfsmount *hfsmp, u_int32_t inode_num, struct HFSPlusCatalogFile *crp) +{ + GenericLFBufPtr bp = NULL; + daddr64_t blkno; + u_int32_t blkcount; + int blksize; + int sectorsize; + int result; + HFSPlusForkData *rsrcforkp; + char *alias; + uint32_t *valptr; + + rsrcforkp = &(crp->resourceFork); + + blksize = hfsmp->blockSize; + blkcount = howmany(kHFSAliasSize, blksize); + sectorsize = hfsmp->hfs_logical_block_size; + bzero(rsrcforkp, sizeof(HFSPlusForkData)); + + /* Allocate some disk space for the alias content. */ + result = BlockAllocate(hfsmp, 0, blkcount, blkcount, + HFS_ALLOC_FORCECONTIG | HFS_ALLOC_METAZONE, + &rsrcforkp->extents[0].startBlock, + &rsrcforkp->extents[0].blockCount); + /* Did it fail with an out of space error? If so, re-try and allow journal flushing. */ + if (result == dskFulErr ) { + result = BlockAllocate(hfsmp, 0, blkcount, blkcount, + HFS_ALLOC_FORCECONTIG | HFS_ALLOC_METAZONE | HFS_ALLOC_FLUSHTXN, + &rsrcforkp->extents[0].startBlock, + &rsrcforkp->extents[0].blockCount); + } + + if (result) { + rsrcforkp->extents[0].startBlock = 0; + goto exit; + } + + /* Acquire a buffer cache block for our block. */ + blkno = ((u_int64_t)rsrcforkp->extents[0].startBlock * (u_int64_t)blksize) / sectorsize; + blkno += hfsmp->hfsPlusIOPosOffset / sectorsize; + + bp = lf_hfs_generic_buf_allocate( hfsmp->hfs_devvp, blkno, roundup(kHFSAliasSize, hfsmp->hfs_logical_block_size), 0); + result = lf_hfs_generic_buf_read(bp); + if (result) { + goto exit; + } + + if (hfsmp->jnl) { + journal_modify_block_start(hfsmp->jnl, bp); + } + + /* Generate alias content */ + alias = (char *)bp->pvData; + bzero(alias, bp->uDataSize); + bcopy(hfs_dirlink_alias_rsrc, alias, kHFSAliasSize); + + /* Set the volume create date, local time in Mac OS format */ + valptr = (uint32_t *)(alias + kHFSAliasVolCreateDateOffset); + *valptr = OSSwapHostToBigInt32(hfsmp->localCreateDate); + + /* Set id of the parent of the target directory */ + valptr = (uint32_t *)(alias + kHFSAliasParentIDOffset); + *valptr = OSSwapHostToBigInt32(hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid); + + /* Set id of the target directory */ + valptr = (uint32_t *)(alias + kHFSAliasTargetIDOffset); + *valptr = OSSwapHostToBigInt32(inode_num); + + /* Write alias content to disk. */ + if (hfsmp->jnl) { + journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL); + } else + + if ((result = lf_hfs_generic_buf_write(bp))) { + goto exit; + } + + /* Finish initializing the fork data. */ + rsrcforkp->logicalSize = kHFSAliasSize; + rsrcforkp->totalBlocks = rsrcforkp->extents[0].blockCount; + +exit: + if (bp) { + lf_hfs_generic_buf_release(bp); + } + + if (result && rsrcforkp->extents[0].startBlock != 0) { + (void) BlockDeallocate(hfsmp, rsrcforkp->extents[0].startBlock, rsrcforkp->extents[0].blockCount, 0); + rsrcforkp->extents[0].startBlock = 0; + rsrcforkp->extents[0].blockCount = 0; + rsrcforkp->logicalSize = 0; + rsrcforkp->totalBlocks = 0; + } + return (result); +} + +/* + * cat_createlink - create a link in the catalog + * + * The following cat_attr fields are expected to be set: + * ca_linkref + * ca_itime + * ca_mode (S_IFREG) + * ca_recflags + * ca_flags + * ca_finderinfo (type and creator) + */ +int +cat_createlink(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attrp, cnid_t nextlinkid, cnid_t *linkfileid) +{ + FCB * fcb; + struct btobj * bto; + FSBufferDescriptor btdata; + HFSPlusForkData *rsrcforkp; + u_int32_t nextCNID; + u_int32_t datalen; + int thread_inserted = 0; + int alias_allocated = 0; + int result = 0; + + fcb = hfsmp->hfs_catalog_cp->c_datafork; + + /* + * Get the next CNID. Note that we are currently holding catalog lock. + */ + result = cat_acquire_cnid(hfsmp, &nextCNID); + if (result) { + return result; + } + + /* Get space for iterator, key and data */ + bto = hfs_malloc(sizeof(struct btobj)); + bto->iterator.hint.nodeNum = 0; + rsrcforkp = &bto->data.hfsPlusFile.resourceFork; + + result = buildkey(descp, &bto->key); + if (result) { + LFHFS_LOG(LEVEL_ERROR, "cat_createlink: err %d from buildkey\n", result); + goto exit; + } + + /* + * Insert the thread record first. + */ + datalen = buildthread((void*)&bto->key, &bto->data, 0); + btdata.bufferAddress = &bto->data; + btdata.itemSize = datalen; + btdata.itemCount = 1; + + buildthreadkey(nextCNID, (CatalogKey *) &bto->iterator.key); + result = BTInsertRecord(fcb, &bto->iterator, &btdata, datalen); + if (result) { + goto exit; + } + thread_inserted = 1; + + /* + * Now insert the link record. + */ + buildrecord(attrp, nextCNID, kTextEncodingMacUnicode, &bto->data, &datalen); + + bto->data.hfsPlusFile.hl_prevLinkID = 0; + bto->data.hfsPlusFile.hl_nextLinkID = nextlinkid; + bto->data.hfsPlusFile.hl_linkReference = attrp->ca_linkref; + + /* For directory hard links, create alias in resource fork */ + if (descp->cd_flags & CD_ISDIR) { + if ((result = cat_makealias(hfsmp, attrp->ca_linkref, &bto->data.hfsPlusFile))) { + goto exit; + } + alias_allocated = 1; + } + btdata.bufferAddress = &bto->data; + btdata.itemSize = datalen; + btdata.itemCount = 1; + + bcopy(&bto->key, &bto->iterator.key, sizeof(bto->key)); + + result = BTInsertRecord(fcb, &bto->iterator, &btdata, datalen); + if (result) { + if (result == btExists) + result = EEXIST; + goto exit; + } + if (linkfileid != NULL) { + *linkfileid = nextCNID; + } +exit: + if (result) { + if (thread_inserted) { + LFHFS_LOG(LEVEL_ERROR, "cat_createlink: BTInsertRecord err=%d, vol=%s\n", MacToVFSError(result), hfsmp->vcbVN); + + buildthreadkey(nextCNID, (CatalogKey *)&bto->iterator.key); + if (BTDeleteRecord(fcb, &bto->iterator)) { + LFHFS_LOG(LEVEL_ERROR, "cat_createlink: failed to delete thread record on volume %s\n", hfsmp->vcbVN); + hfs_mark_inconsistent(hfsmp, HFS_ROLLBACK_FAILED); + } + } + if (alias_allocated && rsrcforkp->extents[0].startBlock != 0) { + (void) BlockDeallocate(hfsmp, rsrcforkp->extents[0].startBlock, + rsrcforkp->extents[0].blockCount, 0); + rsrcforkp->extents[0].startBlock = 0; + rsrcforkp->extents[0].blockCount = 0; + } + } + (void) BTFlushPath(fcb); + hfs_free(bto); + + return MacToVFSError(result); +} diff --git a/livefiles_hfs_plugin/lf_hfs_catalog.h b/livefiles_hfs_plugin/lf_hfs_catalog.h new file mode 100644 index 0000000..68ea389 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_catalog.h @@ -0,0 +1,290 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_catalog.h + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#ifndef lf_hfs_catalog_h +#define lf_hfs_catalog_h + +#include "lf_hfs_format.h" +#include "lf_hfs_locks.h" + +#include + +#define HFS_IDHASH_DEFAULT (64) + +/* + * Catalog Operations Hint + * + * lower 16 bits: count of B-tree insert operations + * upper 16 bits: count of B-tree delete operations + * + */ +#define CAT_DELETE 0x00010000 +#define CAT_CREATE 0x00000002 +#define CAT_RENAME 0x00010002 +#define CAT_EXCHANGE 0x00010002 + +typedef struct +{ + void* pvBuffer; + uint64_t uBufferResid; + uint64_t uBufferSize; +} ReadDirBuff_s, *ReadDirBuff_t; + +#define READDIR_BUF_OFFSET(buf) (buf->uBufferSize - buf->uBufferResid) +/* + * Catalog ADTs + * + * The cat_desc, cat_attr, and cat_fork structures are + * use to import/export data to/from the Catalog file. + * The fields in these structures are always in BSD + * runtime format (e.g. dates and names). + */ + +typedef u_int32_t cnid_t; + +/* + * Catalog Node Descriptor (runtime) + */ +struct cat_desc { + u_int8_t cd_flags; /* see below (8 bits) */ + u_int8_t cd_encoding; /* name encoding */ + int16_t cd_namelen; /* length of cnode name */ + cnid_t cd_parentcnid; /* parent directory CNID */ + u_int32_t cd_hint; /* catalog file hint */ + cnid_t cd_cnid; /* cnode id (for getattrlist) */ + const u_int8_t *cd_nameptr; /* pointer to cnode name */ +}; + +/* cd_flags + * + * CD_EOF is used by hfs_vnop_readdir / cat_getdirentries to indicate EOF was + * encountered during a directory enumeration. When this flag is observed + * on the next call to hfs_vnop_readdir it tells the caller that there's no + * need to descend into the catalog as EOF was encountered during the last call. + * This flag should only be set on the descriptor embedded in the directoryhint. + */ + +#define CD_HASBUF 0x01 /* allocated filename buffer */ +#define CD_DECOMPOSED 0x02 /* name is fully decomposed */ +#define CD_EOF 0x04 /* see above */ +#define CD_ISMETA 0x40 /* describes a metadata file */ +#define CD_ISDIR 0x80 /* describes a directory */ + +/* + * Catalog Node Attributes (runtime) + */ +struct cat_attr { + cnid_t ca_fileid; /* inode number (for stat) normally == cnid */ + mode_t ca_mode; /* file access mode and type (16 bits) */ + u_int16_t ca_recflags; /* catalog record flags (16 bit integer) */ + u_int32_t ca_linkcount; /* real hard link count */ + uid_t ca_uid; /* file owner */ + gid_t ca_gid; /* file group */ + union { + dev_t cau_rdev; /* special file device (VBLK or VCHAR only) */ + u_int32_t cau_linkref; /* hardlink reference number */ + } ca_union1; + time_t ca_atime; /* last access time */ + time_t ca_atimeondisk; /* access time value on disk */ + time_t ca_mtime; /* last data modification time */ + time_t ca_ctime; /* last file status change */ + time_t ca_itime; /* file initialization time */ + time_t ca_btime; /* last backup time */ + u_int32_t ca_flags; /* status flags (chflags) */ + union { + u_int32_t cau_blocks; /* total file blocks used (rsrc + data) */ + u_int32_t cau_entries; /* total directory entries (valence) */ + } ca_union2; + union { + u_int32_t cau_dircount; /* count of sub dirs (for posix nlink) */ + u_int32_t cau_firstlink; /* first hardlink link (files only) */ + } ca_union3; + union { + u_int8_t ca_finderinfo[32]; /* Opaque Finder information */ + struct { + FndrFileInfo ca_finderfileinfo; + struct FndrExtendedFileInfo ca_finderextendedfileinfo; + }; + struct { + FndrDirInfo ca_finderdirinfo; + struct FndrExtendedDirInfo ca_finderextendeddirinfo; + }; + }; +}; + +/* Aliases for common fields */ +#define ca_rdev ca_union1.cau_rdev +#define ca_linkref ca_union1.cau_linkref +#define ca_blocks ca_union2.cau_blocks +#define ca_entries ca_union2.cau_entries +#define ca_dircount ca_union3.cau_dircount +#define ca_firstlink ca_union3.cau_firstlink +#define ca_bsdflags ca_flags + +/* + * Catalog Node Fork (runtime) + * + * NOTE: this is not the same as a struct HFSPlusForkData + * + * NOTE: if cf_new_size > cf_size, then a write is in progress and is extending + * the EOF; the new EOF will be cf_new_size. Writes and pageouts may validly + * write up to cf_new_size, but reads should only read up to cf_size. When + * an extending write is not in progress, cf_new_size is zero. + */ + +struct cat_fork { + off_t cf_size; /* fork's logical size in bytes */ + off_t cf_new_size; /* fork's logical size after write completes */ + union { + u_int32_t cfu_clump; /* fork's clump size in bytes (sys files only) */ + u_int64_t cfu_bytesread; /* bytes read from this fork */ + } cf_union; + u_int32_t cf_vblocks; /* virtual (unalloated) blocks */ + u_int32_t cf_blocks; /* total blocks used by this fork */ + struct HFSPlusExtentDescriptor cf_extents[8]; /* initial set of extents */ + + /* + * NOTE: If you change this structure, make sure you change you change + * hfs_fork_copy. + */ +}; + +#define cf_clump cf_union.cfu_clump +#define cf_bytesread cf_union.cfu_bytesread + +#define HFS_MAXDIRHINTS 32 +#define HFS_DIRHINT_TTL 45 + +#define HFS_INDEX_MASK 0x03ffffff +#define HFS_INDEX_BITS 26 + +/* Finder Info's file type and creator for directory hard link alias */ +enum { + kHFSAliasType = 0x66647270, /* 'fdrp' */ + kHFSAliasCreator = 0x4D414353 /* 'MACS' */ +}; + +/* + * Directory Hint + * Used to hold state across directory enumerations. + * + */ +struct directoryhint { + TAILQ_ENTRY(directoryhint) dh_link; /* chain */ + int dh_index; /* index into directory (zero relative) */ + u_int32_t dh_threadhint; /* node hint of a directory's thread record */ + u_int32_t dh_time; + struct cat_desc dh_desc; /* entry's descriptor */ +}; +typedef struct directoryhint directoryhint_t; + +/* + * The size of cat_cookie_t must match the size of + * the nreserve struct (in BTreeNodeReserve.c). + */ +typedef struct cat_cookie_t { +#if defined(__LP64__) + char opaque[40]; +#else + char opaque[24]; +#endif +} cat_cookie_t; + +/* Universal catalog key */ +union CatalogKey { + HFSPlusCatalogKey hfsPlus; +}; +typedef union CatalogKey CatalogKey; + +/* Universal catalog data record */ +union CatalogRecord { + int16_t recordType; + HFSPlusCatalogFolder hfsPlusFolder; + HFSPlusCatalogFile hfsPlusFile; + HFSPlusCatalogThread hfsPlusThread; +}; +typedef union CatalogRecord CatalogRecord; + +/* + * Catalog Node Entry + * + * A cat_entry is used for bulk enumerations (hfs_readdirattr). + */ +struct cat_entry { + struct cat_desc ce_desc; + struct cat_attr ce_attr; + off_t ce_datasize; + off_t ce_rsrcsize; + u_int32_t ce_datablks; + u_int32_t ce_rsrcblks; +}; + +/* + * Catalog Node Entry List + * + * A cat_entrylist is a list of Catalog Node Entries. + */ +struct cat_entrylist { + u_int32_t maxentries; /* number of entries requested */ + u_int32_t realentries; /* number of valid entries returned */ + u_int32_t skipentries; /* number of entries skipped (reserved HFS+ files) */ + struct cat_entry entry[1]; /* array of entries */ +}; + +#define CE_LIST_SIZE(entries) \ + sizeof (*ce_list) + (((entries) - 1) * sizeof (struct cat_entry)) + + +typedef struct cat_preflightid { + cnid_t fileid; + LIST_ENTRY(cat_preflightid) id_hash; +} cat_preflightid_t; + +void hfs_idhash_init (struct hfsmount *hfsmp); +void hfs_idhash_destroy (struct hfsmount *hfsmp); + +int cat_binarykeycompare( HFSPlusCatalogKey *searchKey, HFSPlusCatalogKey *trialKey ); +int CompareExtendedCatalogKeys( HFSPlusCatalogKey *searchKey, HFSPlusCatalogKey *trialKey ); +void cat_releasedesc( struct cat_desc *descp ); +int cat_lookup(struct hfsmount *hfsmp, struct cat_desc *descp, int wantrsrc, + struct cat_desc *outdescp, struct cat_attr *attrp, + struct cat_fork *forkp, cnid_t *desc_cnid); +int cat_idlookup(struct hfsmount *hfsmp, cnid_t cnid, int allow_system_files, int wantrsrc, + struct cat_desc *outdescp, struct cat_attr *attrp, struct cat_fork *forkp); +int cat_lookupmangled(struct hfsmount *hfsmp, struct cat_desc *descp, int wantrsrc, + struct cat_desc *outdescp, struct cat_attr *attrp, struct cat_fork *forkp); +int cat_findname(struct hfsmount *hfsmp, cnid_t cnid, struct cat_desc *outdescp); +int cat_getdirentries(struct hfsmount *hfsmp, u_int32_t entrycnt, directoryhint_t *dirhint, + ReadDirBuff_s* psReadDirBuffer, int flags, int *items, bool *eofflag, UVFSDirEntry* psDotDotEntry); +int cat_getentriesattr(struct hfsmount *hfsmp, directoryhint_t *dirhint, struct cat_entrylist *ce_list, + int *reachedeof); +bool IsEntryAJnlFile(struct hfsmount *hfsmp, cnid_t cnid); +int cat_preflight(struct hfsmount *hfsmp, uint32_t ops, cat_cookie_t *cookie); +void cat_postflight(struct hfsmount *hfsmp, cat_cookie_t *cookie); +int cat_rename ( struct hfsmount * hfsmp, struct cat_desc * from_cdp, struct cat_desc * todir_cdp, + struct cat_desc * to_cdp, struct cat_desc * out_cdp ); +int cat_delete(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attrp); +int cat_update(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attrp, + const struct cat_fork *dataforkp, const struct cat_fork *rsrcforkp); +int cat_acquire_cnid (struct hfsmount *hfsmp, cnid_t *new_cnid); +int cat_create(struct hfsmount *hfsmp, cnid_t new_fileid, struct cat_desc *descp, struct cat_attr *attrp, struct cat_desc *out_descp); +int cat_set_childlinkbit(struct hfsmount *hfsmp, cnid_t cnid); +int cat_check_link_ancestry(struct hfsmount *hfsmp, cnid_t cnid, cnid_t pointed_at_cnid); + +// ------------------------------ Hard-Link Related ------------------------------ + +#define HFS_IGNORABLE_LINK 0x00000001 + +int cat_deletelink(struct hfsmount *hfsmp, struct cat_desc *descp); +int cat_update_siblinglinks(struct hfsmount *hfsmp, cnid_t linkfileid, cnid_t prevlinkid, cnid_t nextlinkid); +int cat_lookup_lastlink(struct hfsmount *hfsmp, cnid_t linkfileid, cnid_t *lastlink, struct cat_desc *cdesc); +int cat_resolvelink(struct hfsmount *hfsmp, u_int32_t linkref, int isdirlink, struct HFSPlusCatalogFile *recp); +int cat_lookuplink(struct hfsmount *hfsmp, struct cat_desc *descp, cnid_t *linkfileid, cnid_t *prevlinkid, cnid_t *nextlinkid); +void cat_convertattr(struct hfsmount *hfsmp, CatalogRecord * recp, struct cat_attr *attrp, struct cat_fork *datafp, struct cat_fork *rsrcfp); +int cat_createlink(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attrp, cnid_t nextlinkid, cnid_t *linkfileid); +#endif /* lf_hfs_catalog_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_chash.c b/livefiles_hfs_plugin/lf_hfs_chash.c new file mode 100644 index 0000000..70d99ca --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_chash.c @@ -0,0 +1,537 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_chash.c + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#include "lf_hfs_chash.h" +#include "lf_hfs_cnode.h" +#include "lf_hfs_locks.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_vfsutils.h" + +#define DESIRED_VNODES (128) /* number of vnodes desired */ +#define CNODEHASH(hfsmp, inum) (&hfsmp->hfs_cnodehashtbl[(inum) & hfsmp->hfs_cnodehash]) + +void +hfs_chash_wait(struct hfsmount *hfsmp, struct cnode *cp) +{ + SET(cp->c_hflag, H_WAITING); + pthread_cond_wait(&cp->c_cacsh_cond, &hfsmp->hfs_chash_mutex); +} + +void +hfs_chash_broadcast_and_unlock(struct hfsmount *hfsmp, struct cnode *cp) +{ + pthread_cond_signal(&cp->c_cacsh_cond); + hfs_chash_unlock(hfsmp); +} + +void +hfs_chash_raise_OpenLookupCounter(struct cnode *cp) +{ + if (!cp || cp->uOpenLookupRefCount == UINT32_MAX) + { + LFHFS_LOG(LEVEL_ERROR, + "hfs_chash_raise_OpenLookupCounter:" + "cp[%p] is NULL or reached max Open Lookup Counter", cp); + hfs_assert(0); + } + cp->uOpenLookupRefCount++; +} + +void +hfs_chash_lower_OpenLookupCounter(struct cnode *cp) +{ + if (cp->uOpenLookupRefCount == 0) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_chash_lower_OpenLookupCounter: reached min Open Lookup Counter \n"); + hfs_assert(0); + } + cp->uOpenLookupRefCount--; +} + +/* + * Initialize cnode hash table. + */ +void +hfs_chashinit() +{ +} + +void hfs_chash_lock(struct hfsmount *hfsmp) +{ + lf_lck_mtx_lock(&hfsmp->hfs_chash_mutex); +} + +void hfs_chash_lock_spin(struct hfsmount *hfsmp) +{ + lf_lck_mtx_lock_spin(&hfsmp->hfs_chash_mutex); +} + + +void hfs_chash_unlock(struct hfsmount *hfsmp) +{ + lf_lck_mtx_unlock(&hfsmp->hfs_chash_mutex); +} + +void +hfs_chashinit_finish(struct hfsmount *hfsmp) +{ + lf_lck_mtx_init(&hfsmp->hfs_chash_mutex); + hfsmp->hfs_cnodehashtbl = hashinit(DESIRED_VNODES / 4, &hfsmp->hfs_cnodehash); +} + +void +hfs_delete_chash(struct hfsmount *hfsmp) +{ + struct cnode *cp; + hfs_chash_lock_spin(hfsmp); + + for (ino_t inum = 0; inum < (DESIRED_VNODES/4); inum++) + { + for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) { + LFHFS_LOG(LEVEL_ERROR, "hfs_delete_chash: Cnode for file [%s], cnid: [%d] with open count [%d] left in the cache \n", cp->c_desc.cd_nameptr, cp->c_desc.cd_cnid, cp->uOpenLookupRefCount); + } + } + + + hfs_chash_unlock(hfsmp); + lf_lck_mtx_destroy(&hfsmp->hfs_chash_mutex); + hfs_free(hfsmp->hfs_cnodehashtbl); +} + +/* + * Use the device, fileid pair to find the incore cnode. + * If no cnode if found one is created + * + * If it is in core, but locked, wait for it. + * + * If the cnode is C_DELETED, then return NULL since that + * inum is no longer valid for lookups (open-unlinked file). + * + * If the cnode is C_DELETED but also marked C_RENAMED, then that means + * the cnode was renamed over and a new entry exists in its place. The caller + * should re-drive the lookup to get the newer entry. In that case, we'll still + * return NULL for the cnode, but also return GNV_CHASH_RENAMED in the output flags + * of this function to indicate the caller that they should re-drive. + */ +struct cnode* +hfs_chash_getcnode(struct hfsmount *hfsmp, ino_t inum, struct vnode **vpp, int wantrsrc, int skiplock, int *out_flags, int *hflags) +{ + struct cnode *cp; + struct cnode *ncp = NULL; + vnode_t vp; + + /* + * Go through the hash list + * If a cnode is in the process of being cleaned out or being + * allocated, wait for it to be finished and then try again. + */ +loop: + hfs_chash_lock_spin(hfsmp); +loop_with_lock: + for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) + { + if (cp->c_fileid != inum) + { + continue; + } + /* + * Wait if cnode is being created, attached to or reclaimed. + */ + if (ISSET(cp->c_hflag, H_ALLOC | H_ATTACH | H_TRANSIT)) + { + hfs_chash_wait(hfsmp, cp); + goto loop_with_lock; + } + + vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp; + if (vp == NULL) + { + /* + * The desired vnode isn't there so tag the cnode. + */ + SET(cp->c_hflag, H_ATTACH); + *hflags |= H_ATTACH; + } + + if (ncp) + { + /* + * someone else won the race to create + * this cnode and add it to the hash + * just dump our allocation + */ + hfs_free(ncp); + ncp = NULL; + } + + if (!skiplock) + { + if (hfs_lock(cp, HFS_TRY_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS)) + { + SET(cp->c_hflag, H_WAITING); + hfs_chash_broadcast_and_unlock(hfsmp,cp); + usleep(100); + goto loop; + } + } + vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp; + + /* + * Skip cnodes that are not in the name space anymore + * we need to check with the cnode lock held because + * we may have blocked acquiring the vnode ref or the + * lock on the cnode which would allow the node to be + * unlinked. + * + * Don't return a cnode in this case since the inum + * is no longer valid for lookups. + */ + if (((cp->c_flag & (C_NOEXISTS | C_DELETED)) && !wantrsrc) || + (cp->uOpenLookupRefCount == 0) || + (vp->uValidNodeMagic1 == VALID_NODE_BADMAGIC) || + (vp->uValidNodeMagic2 == VALID_NODE_BADMAGIC)) + { + int renamed = 0; + if (cp->c_flag & C_RENAMED) + renamed = 1; + if (!skiplock) + { + hfs_unlock(cp); + } + + if (vp != NULL) + { + vnode_rele(vp); + } + else + { + hfs_chashwakeup(hfsmp, cp, H_ATTACH); + *hflags &= ~H_ATTACH; + } + + vp = NULL; + cp = NULL; + if (renamed) + { + *out_flags = GNV_CHASH_RENAMED; + } + } + + if (cp) hfs_chash_raise_OpenLookupCounter(cp); + hfs_chash_broadcast_and_unlock(hfsmp,cp); + *vpp = vp; + return (cp); + } + + /* + * Allocate a new cnode + */ + if (skiplock && !wantrsrc) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_chash_getcnode: should never get here when skiplock is set \n"); + hfs_assert(0); + } + + if (ncp == NULL) + { + hfs_chash_unlock(hfsmp); + + ncp = hfs_mallocz(sizeof(struct cnode)); + if (ncp == NULL) + { + return ncp; + } + /* + * since we dropped the chash lock, + * we need to go back and re-verify + * that this node hasn't come into + * existence... + */ + goto loop; + } + + bzero(ncp, sizeof(*ncp)); + + SET(ncp->c_hflag, H_ALLOC); + *hflags |= H_ALLOC; + ncp->c_fileid = (cnid_t) inum; + TAILQ_INIT(&ncp->c_hintlist); /* make the list empty */ + TAILQ_INIT(&ncp->c_originlist); + + lf_lck_rw_init(&ncp->c_rwlock); + lf_cond_init(&ncp->c_cacsh_cond); + + if (!skiplock) + { + (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + } + + /* Insert the new cnode with it's H_ALLOC flag set */ + LIST_INSERT_HEAD(CNODEHASH(hfsmp, inum), ncp, c_hash); + hfs_chash_raise_OpenLookupCounter(ncp); + hfs_chash_unlock(hfsmp); + *vpp = NULL; + return (ncp); +} + +void +hfs_chashwakeup(struct hfsmount *hfsmp, struct cnode *cp, int hflags) +{ + hfs_chash_lock_spin(hfsmp); + + CLR(cp->c_hflag, hflags); + + if (ISSET(cp->c_hflag, H_WAITING)) { + CLR(cp->c_hflag, H_WAITING); + pthread_cond_broadcast(&cp->c_cacsh_cond); + } + + hfs_chash_unlock(hfsmp); +} + +/* + * Remove a cnode from the hash table and wakeup any waiters. + */ +void +hfs_chash_abort(struct hfsmount *hfsmp, struct cnode *cp) +{ + hfs_chash_lock_spin(hfsmp); + + LIST_REMOVE(cp, c_hash); + cp->c_hash.le_next = NULL; + cp->c_hash.le_prev = NULL; + + CLR(cp->c_hflag, H_ATTACH | H_ALLOC); + if (ISSET(cp->c_hflag, H_WAITING)) + { + CLR(cp->c_hflag, H_WAITING); + pthread_cond_broadcast(&cp->c_cacsh_cond); + } + hfs_chash_unlock(hfsmp); +} + +/* + * Use the device, inum pair to find the incore cnode. + * + * If it is in core, but locked, wait for it. + */ +struct vnode * +hfs_chash_getvnode(struct hfsmount *hfsmp, ino_t inum, int wantrsrc, int skiplock, int allow_deleted) +{ + struct cnode *cp; + struct vnode *vp; + + /* + * Go through the hash list + * If a cnode is in the process of being cleaned out or being + * allocated, wait for it to be finished and then try again. + */ +loop: + hfs_chash_lock_spin(hfsmp); + + for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) { + if (cp->c_fileid != inum) + continue; + /* Wait if cnode is being created or reclaimed. */ + if (ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) { + SET(cp->c_hflag, H_WAITING); + hfs_chash_broadcast_and_unlock(hfsmp,cp); + usleep(100); + goto loop; + } + /* Obtain the desired vnode. */ + vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp; + if (vp == NULL) + { + goto exit; + } + + if (!skiplock) + { + if (hfs_lock(cp, HFS_TRY_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS)) + { + SET(cp->c_hflag, H_WAITING); + hfs_chash_broadcast_and_unlock(hfsmp,cp); + usleep(100); + goto loop; + } + } + vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp; + + /* + * Skip cnodes that are not in the name space anymore + * we need to check with the cnode lock held because + * we may have blocked acquiring the vnode ref or the + * lock on the cnode which would allow the node to be + * unlinked + */ + if (!allow_deleted) { + if (cp->c_flag & (C_NOEXISTS | C_DELETED)) { + if (!skiplock) hfs_unlock(cp); + goto exit; + } + } + + hfs_chash_raise_OpenLookupCounter(cp); + hfs_chash_broadcast_and_unlock(hfsmp,cp); + return (vp); + } +exit: + hfs_chash_unlock(hfsmp); + return (NULL); +} + +int +hfs_chash_snoop(struct hfsmount *hfsmp, ino_t inum, int existence_only, + int (*callout)(const cnode_t *cp, void *), void * arg) +{ + struct cnode *cp; + int result = ENOENT; + + /* + * Go through the hash list + * If a cnode is in the process of being cleaned out or being + * allocated, wait for it to be finished and then try again. + */ + hfs_chash_lock(hfsmp); + + for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) { + if (cp->c_fileid != inum) + continue; + + /* + * Under normal circumstances, we would want to return ENOENT if a cnode is in + * the hash and it is marked C_NOEXISTS or C_DELETED. However, if the CNID + * namespace has wrapped around, then we have the possibility of collisions. + * In that case, we may use this function to validate whether or not we + * should trust the nextCNID value in the hfs mount point. + * + * If we didn't do this, then it would be possible for a cnode that is no longer backed + * by anything on-disk (C_NOEXISTS) to still exist in the hash along with its + * vnode. The cat_create routine could then create a new entry in the catalog + * re-using that CNID. Then subsequent hfs_getnewvnode calls will repeatedly fail + * trying to look it up/validate it because it is marked C_NOEXISTS. So we want + * to prevent that from happening as much as possible. + */ + if (existence_only) { + result = 0; + break; + } + + /* Skip cnodes that have been removed from the catalog */ + if (cp->c_flag & (C_NOEXISTS | C_DELETED)) { + result = EACCES; + break; + } + + /* Skip cnodes being created or reclaimed. */ + if (!ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) { + result = callout(cp, arg); + } + break; + } + hfs_chash_unlock(hfsmp); + + return (result); +} + +/* Search a cnode in the hash. This function does not return cnode which + * are getting created, destroyed or in transition. Note that this function + * does not acquire the cnode hash mutex, and expects the caller to acquire it. + * On success, returns pointer to the cnode found. On failure, returns NULL. + */ +static +struct cnode * +hfs_chash_search_cnid(struct hfsmount *hfsmp, cnid_t cnid) +{ + struct cnode *cp; + + for (cp = CNODEHASH(hfsmp, cnid)->lh_first; cp; cp = cp->c_hash.le_next) { + if (cp->c_fileid == cnid) { + break; + } + } + + /* If cnode is being created or reclaimed, return error. */ + if (cp && ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) { + cp = NULL; + } + + return cp; +} + +/* Search a cnode corresponding to given device and ID in the hash. If the + * found cnode has kHFSHasChildLinkBit cleared, set it. If the cnode is not + * found, no new cnode is created and error is returned. + * + * Return values - + * -1 : The cnode was not found. + * 0 : The cnode was found, and the kHFSHasChildLinkBit was already set. + * 1 : The cnode was found, the kHFSHasChildLinkBit was not set, and the + * function had to set that bit. + */ +int +hfs_chash_set_childlinkbit(struct hfsmount *hfsmp, cnid_t cnid) +{ + int retval = -1; + struct cnode *cp; + + hfs_chash_lock_spin(hfsmp); + + cp = hfs_chash_search_cnid(hfsmp, cnid); + if (cp) { + if (cp->c_attr.ca_recflags & kHFSHasChildLinkMask) { + retval = 0; + } else { + cp->c_attr.ca_recflags |= kHFSHasChildLinkMask; + retval = 1; + } + } + hfs_chash_unlock(hfsmp); + + return retval; +} + +/* + * Remove a cnode from the hash table. + * Need to lock cache from caller + */ +int +hfs_chashremove(struct hfsmount *hfsmp, struct cnode *cp) +{ + hfs_chash_lock_spin(hfsmp); + + /* Check if a vnode is getting attached */ + if (ISSET(cp->c_hflag, H_ATTACH)) { + hfs_chash_unlock(hfsmp); + return (EBUSY); + } + if (cp->c_hash.le_next || cp->c_hash.le_prev) { + LIST_REMOVE(cp, c_hash); + cp->c_hash.le_next = NULL; + cp->c_hash.le_prev = NULL; + } + + hfs_chash_unlock(hfsmp); + + return (0); +} + +/* + * mark a cnode as in transition + */ +void +hfs_chash_mark_in_transit(struct hfsmount *hfsmp, struct cnode *cp) +{ + hfs_chash_lock_spin(hfsmp); + + SET(cp->c_hflag, H_TRANSIT); + + hfs_chash_unlock(hfsmp); +} diff --git a/livefiles_hfs_plugin/lf_hfs_chash.h b/livefiles_hfs_plugin/lf_hfs_chash.h new file mode 100644 index 0000000..548ae0c --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_chash.h @@ -0,0 +1,30 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_chash.h + * livefiles_hfs + * + * Created by Or Haimovich on 20/3/18. + */ + +#ifndef lf_hfs_chash_h +#define lf_hfs_chash_h +#include "lf_hfs_common.h" +#include "lf_hfs.h" + +struct cnode* hfs_chash_getcnode(struct hfsmount *hfsmp, ino_t inum, struct vnode **vpp, int wantrsrc, int skiplock, int *out_flags, int *hflags); +void hfs_chash_lock(struct hfsmount *hfsmp); +void hfs_chash_lock_spin(struct hfsmount *hfsmp); +void hfs_chash_lock_convert(struct hfsmount *hfsmp); +void hfs_chash_unlock(struct hfsmount *hfsmp); +void hfs_chashwakeup(struct hfsmount *hfsmp, struct cnode *cp, int hflags); +void hfs_chash_abort(struct hfsmount *hfsmp, struct cnode *cp); +struct vnode* hfs_chash_getvnode(struct hfsmount *hfsmp, ino_t inum, int wantrsrc, int skiplock, int allow_deleted); +int hfs_chash_snoop(struct hfsmount *hfsmp, ino_t inum, int existence_only, int (*callout)(const cnode_t *cp, void *), void * arg); +int hfs_chash_set_childlinkbit(struct hfsmount *hfsmp, cnid_t cnid); +int hfs_chashremove(struct hfsmount *hfsmp, struct cnode *cp); +void hfs_chash_mark_in_transit(struct hfsmount *hfsmp, struct cnode *cp); +void hfs_chash_lower_OpenLookupCounter(struct cnode *cp); +void hfs_chash_raise_OpenLookupCounter(struct cnode *cp); +void hfs_chash_wait(struct hfsmount *hfsmp, struct cnode *cp); + +#endif /* lf_hfs_chash_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_cnode.c b/livefiles_hfs_plugin/lf_hfs_cnode.c new file mode 100644 index 0000000..be2582d --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_cnode.c @@ -0,0 +1,2040 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_cnode.c + * livefiles_hfs + * + * Created by Or Haimovich on 20/3/18. + */ + +#include "lf_hfs_cnode.h" +#include "lf_hfs.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_chash.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_vnops.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_btrees_internal.h" +#include "lf_hfs_readwrite_ops.h" +#include "lf_hfs_utils.h" +#include +#include "lf_hfs_xattr.h" +#include "lf_hfs_link.h" +#include "lf_hfs_generic_buf.h" + +static void +hfs_reclaim_cnode(struct cnode *cp) +{ + /* + * If the descriptor has a name then release it + */ + if ((cp->c_desc.cd_flags & CD_HASBUF) && (cp->c_desc.cd_nameptr != 0)) + { + cp->c_desc.cd_flags &= ~CD_HASBUF; + cp->c_desc.cd_namelen = 0; + hfs_free((void*)cp->c_desc.cd_nameptr); + cp->c_desc.cd_nameptr = NULL; + } + + /* + * We only call this function if we are in hfs_vnop_reclaim and + * attempting to reclaim a cnode with only one live fork. Because the vnode + * went through reclaim, any future attempts to use this item will have to + * go through lookup again, which will need to create a new vnode. Thus, + * destroying the locks below is safe. + */ + + lf_lck_rw_destroy(&cp->c_rwlock); + lf_cond_destroy(&cp->c_cacsh_cond); + lf_lck_rw_destroy(&cp->c_truncatelock); + + hfs_free(cp); +} + +/* + * hfs_getnewvnode - get new default vnode + * + * The vnode is returned with an iocount and the cnode locked. + * The cnode of the parent vnode 'dvp' may or may not be locked, depending on + * the circumstances. The cnode in question (if acquiring the resource fork), + * may also already be locked at the time we enter this function. + * + * Note that there are both input and output flag arguments to this function. + * If one of the input flags (specifically, GNV_USE_VP), is set, then + * hfs_getnewvnode will use the parameter *vpp, which is traditionally only + * an output parameter, as both an input and output parameter. It will use + * the vnode provided in the output, and pass it to vnode_create with the + * proper flavor so that a new vnode is _NOT_ created on our behalf when + * we dispatch to VFS. This may be important in various HFS vnode creation + * routines, such a create or get-resource-fork, because we risk deadlock if + * jetsam is involved. + * + * Deadlock potential exists if jetsam is synchronously invoked while we are waiting + * for a vnode to be recycled in order to give it the identity we want. If jetsam + * happens to target a process for termination that is blocked in-kernel, waiting to + * acquire the cnode lock on our parent 'dvp', while our current thread has it locked, + * neither side will make forward progress and the watchdog timer will eventually fire. + * To prevent this, a caller of hfs_getnewvnode may choose to proactively force + * any necessary vnode reclamation/recycling while it is not holding any locks and + * thus not prone to deadlock. If this is the case, GNV_USE_VP will be set and + * the parameter will be used as described above. + * + * !!! !!!! + * In circumstances when GNV_USE_VP is set, this function _MUST_ clean up and either consume + * or dispose of the provided vnode. We funnel all errors to a single return value so that + * if provided_vp is still non-NULL, then we will dispose of the vnode. This will occur in + * all error cases of this function -- anywhere we zero/NULL out the *vpp parameter. It may + * also occur if the current thread raced with another to create the same vnode, and we + * find the entry already present in the cnode hash. + * !!! !!! + */ +int +hfs_getnewvnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, struct cat_desc *descp, int flags, struct cat_attr *attrp, struct cat_fork *forkp, struct vnode **vpp, int *out_flags) +{ + struct mount *mp = HFSTOVFS(hfsmp); + struct vnode *vp = NULL; + struct vnode **cvpp; + struct vnode *tvp = NULL; + struct cnode *cp = NULL; + struct filefork *fp = NULL; + struct vnode *provided_vp = NULL; + struct vnode_fsparam vfsp = {0}; + enum vtype vtype = IFTOVT(attrp->ca_mode); + int retval = 0; + int hflags = 0; + int issystemfile = (descp->cd_flags & CD_ISMETA) && (vtype == VREG); + int wantrsrc = flags & GNV_WANTRSRC;; + int need_update_identity = 0; + + /* Zero out the out_flags */ + *out_flags = 0; + + if (flags & GNV_USE_VP) + { + /* Store the provided VP for later use */ + provided_vp = *vpp; + } + + /* Zero out the vpp regardless of provided input */ + *vpp = NULL; + + if (attrp->ca_fileid == 0) + { + retval = ENOENT; + goto gnv_exit; + } + + /* Sanity checks: */ + if ( (vtype == VBAD) || + ( (vtype != VDIR && forkp && + ( (attrp->ca_blocks < forkp->cf_blocks) || (howmany((uint64_t)forkp->cf_size, hfsmp->blockSize) > forkp->cf_blocks) || + ( (vtype == VLNK) && ((uint64_t)forkp->cf_size > MAXPATHLEN) ) ) ) ) ) + { + /* Mark the FS as corrupt and bail out */ + hfs_mark_inconsistent(hfsmp, HFS_INCONSISTENCY_DETECTED); + retval = EINVAL; + goto gnv_exit; + } + + /* + * Get a cnode (new or existing) + */ + cp = hfs_chash_getcnode(hfsmp, attrp->ca_fileid, vpp, wantrsrc, (flags & GNV_SKIPLOCK), out_flags, &hflags); + + /* + * If the id is no longer valid for lookups we'll get back a NULL cp. + */ + if (cp == NULL) + { + retval = ENOENT; + goto gnv_exit; + } + + /* + * We may have been provided a vnode via + * GNV_USE_VP. In this case, we have raced with + * a 2nd thread to create the target vnode. The provided + * vnode that was passed in will be dealt with at the + * end of the function, as we don't zero out the field + * until we're ready to pass responsibility to VFS. + */ + + + /* + * If we get a cnode/vnode pair out of hfs_chash_getcnode, then update the + * descriptor in the cnode as needed if the cnode represents a hardlink. + * We want the caller to get the most up-to-date copy of the descriptor + * as possible. However, we only do anything here if there was a valid vnode. + * If there isn't a vnode, then the cnode is brand new and needs to be initialized + * as it doesn't have a descriptor or cat_attr yet. + * + * If we are about to replace the descriptor with the user-supplied one, then validate + * that the descriptor correctly acknowledges this item is a hardlink. We could be + * subject to a race where the calling thread invoked cat_lookup, got a valid lookup + * result but the file was not yet a hardlink. With sufficient delay between there + * and here, we might accidentally copy in the raw inode ID into the descriptor in the + * call below. If the descriptor's CNID is the same as the fileID then it must + * not yet have been a hardlink when the lookup occurred. + */ + + if (!(cp->c_flag & (C_DELETED | C_NOEXISTS))) + { + // + // If the bytes of the filename in the descp do not match the bytes in the + // cnp (and we're not looking up the resource fork), then we want to update + // the vnode identity to contain the bytes that HFS stores so that when an + // fsevent gets generated, it has the correct filename. otherwise daemons + // that match filenames produced by fsevents with filenames they have stored + // elsewhere (e.g. bladerunner, backupd, mds), the filenames will not match. + // See: FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories + // for more details. + // + if (*vpp && cnp && cnp->cn_nameptr && descp && descp->cd_nameptr && strncmp((const char *)cnp->cn_nameptr, (const char *)descp->cd_nameptr, descp->cd_namelen) != 0) + { + vnode_update_identity (*vpp, dvp, (const char *)descp->cd_nameptr, descp->cd_namelen, 0, VNODE_UPDATE_NAME); + } + + if ((cp->c_flag & C_HARDLINK) && descp->cd_nameptr && descp->cd_namelen > 0) + { + /* If cnode is uninitialized, its c_attr will be zeroed out; cnids wont match. */ + if ((descp->cd_cnid == cp->c_attr.ca_fileid) && (attrp->ca_linkcount != cp->c_attr.ca_linkcount)) + { + + if ((flags & GNV_SKIPLOCK) == 0) + { + /* + * Then we took the lock. Drop it before calling + * vnode_put, which may invoke hfs_vnop_inactive and need to take + * the cnode lock again. + */ + hfs_unlock(cp); + } + + /* + * Emit ERECYCLE and GNV_CAT_ATTRCHANGED to + * force a re-drive in the lookup routine. + * Drop the iocount on the vnode obtained from + * chash_getcnode if needed. + */ + if (*vpp != NULL) + { + hfs_free(*vpp); + *vpp = NULL; + } + + /* + * If we raced with VNOP_RECLAIM for this vnode, the hash code could + * have observed it after the c_vp or c_rsrc_vp fields had been torn down; + * the hash code peeks at those fields without holding the cnode lock because + * it needs to be fast. As a result, we may have set H_ATTACH in the chash + * call above. Since we're bailing out, unset whatever flags we just set, and + * wake up all waiters for this cnode. + */ + if (hflags) + { + hfs_chashwakeup(hfsmp, cp, hflags); + } + + *out_flags = GNV_CAT_ATTRCHANGED; + retval = ERECYCLE; + goto gnv_exit; + } + else + { + /* + * Otherwise, CNID != fileid. Go ahead and copy in the new descriptor. + * + * Replacing the descriptor here is fine because we looked up the item without + * a vnode in hand before. If a vnode existed, its identity must be attached to this + * item. We are not susceptible to the lookup fastpath issue at this point. + */ + replace_desc(cp, descp); + + /* + * This item was a hardlink, and its name needed to be updated. By replacing the + * descriptor above, we've now updated the cnode's internal representation of + * its link ID/CNID, parent ID, and its name. However, VFS must now be alerted + * to the fact that this vnode now has a new parent, since we cannot guarantee + * that the new link lived in the same directory as the alternative name for + * this item. + */ + if ((*vpp != NULL) && (cnp || cp->c_desc.cd_nameptr)) + { + /* we could be requesting the rsrc of a hardlink file... */ + if (cp->c_desc.cd_nameptr) + { + // Update the identity with what we have stored on disk as the name of this file. + vnode_update_identity (*vpp, dvp, (const char *)cp->c_desc.cd_nameptr, cp->c_desc.cd_namelen, 0, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME)); + } + else if (cnp) + { + vnode_update_identity (*vpp, dvp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME)); + } + } + } + } + } + + /* + * At this point, we have performed hardlink and open-unlinked checks + * above. We have now validated the state of the vnode that was given back + * to us from the cnode hash code and find it safe to return. + */ + if (*vpp != NULL) + { + retval = 0; + goto gnv_exit; + } + + /* + * If this is a new cnode then initialize it. + */ + if (ISSET(cp->c_hflag, H_ALLOC)) + { + lf_lck_rw_init(&cp->c_truncatelock); + + /* Make sure its still valid (ie exists on disk). */ + if (!(flags & GNV_CREATE)) + { + int error = 0; + if (!hfs_valid_cnode (hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid, attrp, &error)) + { + hfs_chash_abort(hfsmp, cp); + if ((flags & GNV_SKIPLOCK) == 0) + { + hfs_unlock(cp); + } + + hfs_reclaim_cnode(cp); + *vpp = NULL; + /* + * If we hit this case, that means that the entry was there in the catalog when + * we did a cat_lookup earlier. Think hfs_lookup. However, in between the time + * that we checked the catalog and the time we went to get a vnode/cnode for it, + * it had been removed from the namespace and the vnode totally reclaimed. As a result, + * it's not there in the catalog during the check in hfs_valid_cnode and we bubble out + * an ENOENT. To indicate to the caller that they should really double-check the + * entry (it could have been renamed over and gotten a new fileid), we mark a bit + * in the output flags. + */ + if (error == ENOENT) + { + *out_flags = GNV_CAT_DELETED; + retval = ENOENT; + goto gnv_exit; + } + + /* + * Also, we need to protect the cat_attr acquired during hfs_lookup and passed into + * this function as an argument because the catalog may have changed w.r.t hardlink + * link counts and the firstlink field. If that validation check fails, then let + * lookup re-drive itself to get valid/consistent data with the same failure condition below. + */ + if (error == ERECYCLE) + { + *out_flags = GNV_CAT_ATTRCHANGED; + retval = ERECYCLE; + goto gnv_exit; + } + } + } + bcopy(attrp, &cp->c_attr, sizeof(struct cat_attr)); + bcopy(descp, &cp->c_desc, sizeof(struct cat_desc)); + + /* The name was inherited so clear descriptor state... */ + descp->cd_nameptr = NULL; + descp->cd_namelen = 0; + descp->cd_flags &= ~CD_HASBUF; + + /* Tag hardlinks */ + if ( (vtype == VREG || vtype == VDIR || vtype == VSOCK || vtype == VFIFO) && + (descp->cd_cnid != attrp->ca_fileid || ISSET(attrp->ca_recflags, kHFSHasLinkChainMask) ) ) + { + cp->c_flag |= C_HARDLINK; + } + + /* + * Fix-up dir link counts. + * + * Earlier versions of Leopard used ca_linkcount for posix + * nlink support (effectively the sub-directory count + 2). + * That is now accomplished using the ca_dircount field with + * the corresponding kHFSHasFolderCountMask flag. + * + * For directories the ca_linkcount is the true link count, + * tracking the number of actual hardlinks to a directory. + * + * We only do this if the mount has HFS_FOLDERCOUNT set; + * at the moment, we only set that for HFSX volumes. + */ + if ( (hfsmp->hfs_flags & HFS_FOLDERCOUNT) && (vtype == VDIR) && + (!(attrp->ca_recflags & kHFSHasFolderCountMask)) && (cp->c_attr.ca_linkcount > 1) ) + { + if (cp->c_attr.ca_entries == 0) + { + cp->c_attr.ca_dircount = 0; + } + else + { + cp->c_attr.ca_dircount = cp->c_attr.ca_linkcount - 2; + } + + cp->c_attr.ca_linkcount = 1; + cp->c_attr.ca_recflags |= kHFSHasFolderCountMask; + if ( !(hfsmp->hfs_flags & HFS_READ_ONLY) ) + { + cp->c_flag |= C_MODIFIED; + } + } + + /* Mark the output flag that we're vending a new cnode */ + *out_flags |= GNV_NEW_CNODE; + } + + if (vtype == VDIR) + { + if (cp->c_vp != NULL) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_getnewvnode: orphaned vnode (data)"); + assert(0); + } + cvpp = &cp->c_vp; + } + else + { + /* + * Allocate and initialize a file fork... + */ + fp = hfs_malloc(sizeof(struct filefork)); + if (fp == NULL) + { + retval = ENOMEM; + goto gnv_exit; + } + memset(fp,0,sizeof(struct filefork)); + + fp->ff_cp = cp; + if (forkp) + { + bcopy(forkp, &fp->ff_data, sizeof(struct cat_fork)); + } + else + { + bzero(&fp->ff_data, sizeof(struct cat_fork)); + } + rl_init(&fp->ff_invalidranges); + fp->ff_sysfileinfo = 0; + + if (wantrsrc) + { + if (cp->c_rsrcfork != NULL) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_getnewvnode: orphaned rsrc fork"); + hfs_assert(0); + } + if (cp->c_rsrc_vp != NULL) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_getnewvnode: orphaned vnode (rsrc)"); + hfs_assert(0); + } + cp->c_rsrcfork = fp; + cvpp = &cp->c_rsrc_vp; + if ( (tvp = cp->c_vp) != NULL ) + { + cp->c_flag |= C_NEED_DVNODE_PUT; + } + } + else + { + if (cp->c_datafork != NULL) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_getnewvnode: orphaned data fork"); + hfs_assert(0); + } + if (cp->c_vp != NULL) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_getnewvnode: orphaned vnode (data)"); + hfs_assert(0); + } + + cp->c_datafork = fp; + cvpp = &cp->c_vp; + if ( (tvp = cp->c_rsrc_vp) != NULL) + { + cp->c_flag |= C_NEED_RVNODE_PUT; + } + } + } +#if LF_HFS_FULL_VNODE_SUPPORT + if (tvp != NULL) + { + /* + * grab an iocount on the vnode we weren't + * interested in (i.e. we want the resource fork + * but the cnode already has the data fork) + * to prevent it from being + * recycled by us when we call vnode_create + * which will result in a deadlock when we + * try to take the cnode lock in hfs_vnop_fsync or + * hfs_vnop_reclaim... vnode_get can be called here + * because we already hold the cnode lock which will + * prevent the vnode from changing identity until + * we drop it.. vnode_get will not block waiting for + * a change of state... however, it will return an + * error if the current iocount == 0 and we've already + * started to terminate the vnode... we don't need/want to + * grab an iocount in the case since we can't cause + * the fileystem to be re-entered on this thread for this vp + * + * the matching vnode_put will happen in hfs_unlock + * after we've dropped the cnode lock + */ + if ( vnode_get(tvp) != 0) + { + cp->c_flag &= ~(C_NEED_RVNODE_PUT | C_NEED_DVNODE_PUT); + } + } +#endif + + vfsp.vnfs_mp = mp; + vfsp.vnfs_vtype = vtype; + vfsp.vnfs_str = "hfs"; + if ((cp->c_flag & C_HARDLINK) && (vtype == VDIR)) + { + vfsp.vnfs_dvp = NULL; /* no parent for me! */ + vfsp.vnfs_cnp = NULL; /* no name for me! */ + } + else + { + vfsp.vnfs_dvp = dvp; + if (cnp) + { + vfsp.vnfs_cnp = hfs_malloc(sizeof(struct componentname)); + if (vfsp.vnfs_cnp == NULL) + { + if (fp) + { + hfs_free(fp); + } + retval = ENOMEM; + goto gnv_exit; + } + + memcpy((void*) vfsp.vnfs_cnp, (void*)cnp, sizeof(struct componentname)); + vfsp.vnfs_cnp->cn_nameptr = lf_hfs_utils_allocate_and_copy_string( (char*) cnp->cn_nameptr, cnp->cn_namelen ); + + } else { + // Incase of ScanID of hardlinks, take the filename from the cnode + if (cp && cp->c_desc.cd_nameptr) { + vfsp.vnfs_cnp = hfs_malloc(sizeof(struct componentname)); + if (vfsp.vnfs_cnp == NULL) { + if (fp) hfs_free(fp); + retval = ENOMEM; + goto gnv_exit; + } + bzero(vfsp.vnfs_cnp, sizeof(struct componentname)); + vfsp.vnfs_cnp->cn_nameptr = lf_hfs_utils_allocate_and_copy_string( (char*) cp->c_desc.cd_nameptr, cp->c_desc.cd_namelen ); + vfsp.vnfs_cnp->cn_namelen = cp->c_desc.cd_namelen; + } + } + } + + vfsp.vnfs_fsnode = cp; + vfsp.vnfs_rdev = 0; + + if (forkp) + { + vfsp.vnfs_filesize = forkp->cf_size; + } + else + { + vfsp.vnfs_filesize = 0; + } + + if (cnp && cnp->cn_nameptr && cp->c_desc.cd_nameptr && strncmp((const char *)cnp->cn_nameptr, (const char *)cp->c_desc.cd_nameptr, cp->c_desc.cd_namelen) != 0) + { + // + // We don't want VFS to add an entry for this vnode because the name in the + // cnp does not match the bytes stored on disk for this file. Instead we'll + // update the identity later after the vnode is created and we'll do so with + // the correct bytes for this filename. For more details, see: + // FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories + // + need_update_identity = 1; + } + + + /* Tag system files */ + vfsp.vnfs_marksystem = issystemfile; + + /* Tag root directory */ + if (descp->cd_cnid == kHFSRootFolderID) + { + vfsp.vnfs_markroot = 1; + } + else + { + vfsp.vnfs_markroot = 0; + } + + /* + * If provided_vp was non-NULL, then it is an already-allocated (but not + * initialized) vnode. We simply need to initialize it to this identity. + * If it was NULL, then assume that we need to call vnode_create with the + * normal arguments/types. + */ + if (provided_vp) + { + vp = provided_vp; + /* + * After we assign the value of provided_vp into 'vp' (so that it can be + * mutated safely by vnode_initialize), we can NULL it out. At this point, the disposal + * and handling of the provided vnode will be the responsibility of VFS, which will + * clean it up and vnode_put it properly if vnode_initialize fails. + */ + provided_vp = NULL; + retval = vnode_initialize (sizeof(struct vnode_fsparam), &vfsp, &vp); + /* See error handling below for resolving provided_vp */ + } + else + { + /* Do a standard vnode_create */ + retval = vnode_create (sizeof(struct vnode_fsparam), &vfsp, &vp); + } + + /* + * We used a local variable to hold the result of vnode_create/vnode_initialize so that + * on error cases in vnode_create we won't accidentally harm the cnode's fields + */ + + if (retval) + { + /* Clean up if we encountered an error */ + if (fp) { + if (fp == cp->c_datafork) + cp->c_datafork = NULL; + else + cp->c_rsrcfork = NULL; + + hfs_free(fp); + } + /* + * If this is a newly created cnode or a vnode reclaim + * occurred during the attachment, then cleanup the cnode. + */ + if ((cp->c_vp == NULL) && (cp->c_rsrc_vp == NULL)) + { + hfs_chash_abort(hfsmp, cp); + + if ((flags & GNV_SKIPLOCK) == 0) + { + hfs_unlock(cp); + } + hfs_reclaim_cnode(cp); + } + else + { + hfs_chashwakeup(hfsmp, cp, H_ALLOC | H_ATTACH); + if ((flags & GNV_SKIPLOCK) == 0) + { + hfs_unlock(cp); + } + } + *vpp = NULL; + goto gnv_exit; + } + + /* If no error, then assign the value into the cnode's fields */ + *cvpp = vp; + + if (cp->c_flag & C_HARDLINK) + { + //TBD - this set is for vfs -> since we have the C_HARDLINK + // currently disable this set. + //vnode_setmultipath(vp); + } + + if (vp && need_update_identity) + { + // + // As above, update the name of the vnode if the bytes stored in hfs do not match + // the bytes in the cnp. See this radar: + // FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories + // for more details. + // + vnode_update_identity (vp, dvp, (const char *)cp->c_desc.cd_nameptr, cp->c_desc.cd_namelen, 0, VNODE_UPDATE_NAME); + } + /* + * Tag resource fork vnodes as needing an VNOP_INACTIVE + * so that any deferred removes (open unlinked files) + * have the chance to process the resource fork. + */ + if (vp && VNODE_IS_RSRC(vp)) + { + vnode_rele(vp); + } + hfs_chashwakeup(hfsmp, cp, H_ALLOC | H_ATTACH); + + SET_NODE_AS_VALID(vp); + *vpp = vp; + retval = 0; + +gnv_exit: + if (provided_vp) + { + /* Release our empty vnode if it was not used */ + vnode_rele (provided_vp); + } + return retval; +} + +/* + * Check ordering of two cnodes. Return true if they are are in-order. + */ +static int +hfs_isordered(struct cnode *cp1, struct cnode *cp2) +{ + if (cp1 == cp2) + return (0); + if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff) + return (1); + if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff) + return (0); + /* + * Locking order is cnode address order. + */ + return (cp1 < cp2); +} + +/* + * Acquire 4 cnode locks. + * - locked in cnode address order (lesser address first). + * - all or none of the locks are taken + * - only one lock taken per cnode (dup cnodes are skipped) + * - some of the cnode pointers may be null + */ +int +hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, + struct cnode *cp4, enum hfs_locktype locktype, struct cnode **error_cnode) +{ + struct cnode * a[3]; + struct cnode * b[3]; + struct cnode * list[4]; + struct cnode * tmp; + int i, j, k; + int error; + if (error_cnode) { + *error_cnode = NULL; + } + + if (hfs_isordered(cp1, cp2)) + { + a[0] = cp1; a[1] = cp2; + } + else { + a[0] = cp2; a[1] = cp1; + } + if (hfs_isordered(cp3, cp4)) { + b[0] = cp3; b[1] = cp4; + } else { + b[0] = cp4; b[1] = cp3; + } + a[2] = (struct cnode *)0xffffffff; /* sentinel value */ + b[2] = (struct cnode *)0xffffffff; /* sentinel value */ + + /* + * Build the lock list, skipping over duplicates + */ + for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) { + tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++]; + if (k == 0 || tmp != list[k-1]) + list[k++] = tmp; + } + + /* + * Now we can lock using list[0 - k]. + * Skip over NULL entries. + */ + for (i = 0; i < k; ++i) { + if (list[i]) + if ((error = hfs_lock(list[i], locktype, HFS_LOCK_DEFAULT))) { + /* Only stuff error_cnode if requested */ + if (error_cnode) { + *error_cnode = list[i]; + } + /* Drop any locks we acquired. */ + while (--i >= 0) { + if (list[i]) + hfs_unlock(list[i]); + } + return (error); + } + } + return (0); +} + +/* + * Unlock a group of cnodes. + */ +void +hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4) +{ + struct cnode * list[4]; + int i, k = 0; + + if (cp1) { + hfs_unlock(cp1); + list[k++] = cp1; + } + if (cp2) { + for (i = 0; i < k; ++i) { + if (list[i] == cp2) + goto skip1; + } + hfs_unlock(cp2); + list[k++] = cp2; + } +skip1: + if (cp3) { + for (i = 0; i < k; ++i) { + if (list[i] == cp3) + goto skip2; + } + hfs_unlock(cp3); + list[k++] = cp3; + } +skip2: + if (cp4) { + for (i = 0; i < k; ++i) { + if (list[i] == cp4) + return; + } + hfs_unlock(cp4); + } +} + +/* + * Lock a cnode. + * N.B. If you add any failure cases, *make* sure hfs_lock_always works + */ +int +hfs_lock(struct cnode *cp, enum hfs_locktype locktype, enum hfs_lockflags flags) +{ + pthread_t thread = pthread_self(); + + if (cp->c_lockowner == thread) + { + /* + * Only the extents and bitmap files support lock recursion + * here. The other system files support lock recursion in + * hfs_systemfile_lock. Eventually, we should change to + * handle recursion solely in hfs_systemfile_lock. + */ + if ((cp->c_fileid == kHFSExtentsFileID) || (cp->c_fileid == kHFSAllocationFileID)) + { + cp->c_syslockcount++; + } + else + { + LFHFS_LOG(LEVEL_ERROR, "hfs_lock: locking against myself!"); + hfs_assert(0); + } + } + else if (locktype == HFS_SHARED_LOCK) + { + lf_lck_rw_lock_shared(&cp->c_rwlock); + cp->c_lockowner = HFS_SHARED_OWNER; + } + else if (locktype == HFS_TRY_EXCLUSIVE_LOCK) + { + if (!lf_lck_rw_try_lock(&cp->c_rwlock, LCK_RW_TYPE_EXCLUSIVE)) + { + cp->c_lockowner = thread; + + /* Only the extents and bitmap files support lock recursion. */ + if ((cp->c_fileid == kHFSExtentsFileID) || (cp->c_fileid == kHFSAllocationFileID)) + { + cp->c_syslockcount = 1; + } + } + else + { + return (1); + } + } + else + { /* HFS_EXCLUSIVE_LOCK */ + lf_lck_rw_lock_exclusive(&cp->c_rwlock); + cp->c_lockowner = thread; + /* Only the extents and bitmap files support lock recursion. */ + if ((cp->c_fileid == kHFSExtentsFileID) || (cp->c_fileid == kHFSAllocationFileID)) + { + cp->c_syslockcount = 1; + } + } + + /* + * Skip cnodes for regular files that no longer exist + * (marked deleted, catalog entry gone). + */ + if (((flags & HFS_LOCK_ALLOW_NOEXISTS) == 0) && ((cp->c_desc.cd_flags & CD_ISMETA) == 0) && (cp->c_flag & C_NOEXISTS)) + { + hfs_unlock(cp); + return (ENOENT); + } + return (0); +} + +/* + * Unlock a cnode. + */ +void +hfs_unlock(struct cnode *cp) +{ + u_int32_t c_flag = 0; + + /* + * Only the extents and bitmap file's support lock recursion. + */ + if ((cp->c_fileid == kHFSExtentsFileID) || (cp->c_fileid == kHFSAllocationFileID)) + { + if (--cp->c_syslockcount > 0) + { + return; + } + } + + pthread_t thread = pthread_self(); + + if (cp->c_lockowner == thread) + { + c_flag = cp->c_flag; + + // If we have the truncate lock, we must defer the puts + if (cp->c_truncatelockowner == thread) + { + if (ISSET(c_flag, C_NEED_DVNODE_PUT) + && !cp->c_need_dvnode_put_after_truncate_unlock) + { + CLR(c_flag, C_NEED_DVNODE_PUT); + cp->c_need_dvnode_put_after_truncate_unlock = true; + } + if (ISSET(c_flag, C_NEED_RVNODE_PUT) + && !cp->c_need_rvnode_put_after_truncate_unlock) + { + CLR(c_flag, C_NEED_RVNODE_PUT); + cp->c_need_rvnode_put_after_truncate_unlock = true; + } + } + + CLR(cp->c_flag, (C_NEED_DATA_SETSIZE | C_NEED_RSRC_SETSIZE | C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT)); + + cp->c_lockowner = NULL; + lf_lck_rw_unlock_exclusive(&cp->c_rwlock); + } + else + { + cp->c_lockowner = NULL; + lf_lck_rw_unlock_shared(&cp->c_rwlock); + } + +#if LF_HFS_FULL_VNODE_SUPPORT + /* Perform any vnode post processing after cnode lock is dropped. */ + if (vp) + { + if (c_flag & C_NEED_DATA_SETSIZE) + { + ubc_setsize(vp, VTOF(vp)->ff_size); + } + if (c_flag & C_NEED_DVNODE_PUT) + { + vnode_put(vp); + } + } + if (rvp) + { + if (c_flag & C_NEED_RSRC_SETSIZE) + { + ubc_setsize(rvp, VTOF(rvp)->ff_size); + } + if (c_flag & C_NEED_RVNODE_PUT) + { + vnode_put(rvp); + } + } +#endif +} + +/* + * hfs_valid_cnode + * + * This function is used to validate data that is stored in-core against what is contained + * in the catalog. Common uses include validating that the parent-child relationship still exist + * for a specific directory entry (guaranteeing it has not been renamed into a different spot) at + * the point of the check. + */ +int +hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, cnid_t cnid, struct cat_attr *cattr, int *error) +{ + struct cat_attr attr; + struct cat_desc cndesc; + int stillvalid = 0; + + /* System files are always valid */ + if (cnid < kHFSFirstUserCatalogNodeID) + { + *error = 0; + return (1); + } + + /* XXX optimization: check write count in dvp */ + int lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + + if (dvp && cnp) + { + int lookup = 0; + struct cat_fork fork; + bzero(&cndesc, sizeof(cndesc)); + cndesc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; + cndesc.cd_namelen = cnp->cn_namelen; + cndesc.cd_parentcnid = VTOC(dvp)->c_fileid; + cndesc.cd_hint = VTOC(dvp)->c_childhint; + + /* + * We have to be careful when calling cat_lookup. The result argument + * 'attr' may get different results based on whether or not you ask + * for the filefork to be supplied as output. This is because cat_lookupbykey + * will attempt to do basic validation/smoke tests against the resident + * extents if there are no overflow extent records, but it needs someplace + * in memory to store the on-disk fork structures. + * + * Since hfs_lookup calls cat_lookup with a filefork argument, we should + * do the same here, to verify that block count differences are not + * due to calling the function with different styles. cat_lookupbykey + * will request the volume be fsck'd if there is true on-disk corruption + * where the number of blocks does not match the number generated by + * summing the number of blocks in the resident extents. + */ + lookup = cat_lookup (hfsmp, &cndesc, 0, NULL, &attr, &fork, NULL); + + if ((lookup == 0) && (cnid == attr.ca_fileid)) + { + stillvalid = 1; + *error = 0; + } + else + { + *error = ENOENT; + } + /* + * In hfs_getnewvnode, we may encounter a time-of-check vs. time-of-vnode creation + * race. Specifically, if there is no vnode/cnode pair for the directory entry + * being looked up, we have to go to the catalog. But since we don't hold any locks (aside + * from the dvp in 'shared' mode) there is nothing to protect us against the catalog record + * changing in between the time we do the cat_lookup there and the time we re-grab the + * catalog lock above to do another cat_lookup. + * + * However, we need to check more than just the CNID and parent-child name relationships above. + * Hardlinks can suffer the same race in the following scenario: Suppose we do a + * cat_lookup, and find a leaf record and a raw inode for a hardlink. Now, we have + * the cat_attr in hand (passed in above). But in between then and now, the vnode was + * created by a competing hfs_getnewvnode call, and is manipulated and reclaimed before we get + * a chance to do anything. This is possible if there are a lot of threads thrashing around + * with the cnode hash. In this case, if we don't check/validate the cat_attr in-hand, we will + * blindly stuff it into the cnode, which will make the in-core data inconsistent with what is + * on disk. So validate the cat_attr below, if required. This race cannot happen if the cnode/vnode + * already exists, as it does in the case of rename and delete. + */ + if (stillvalid && cattr != NULL) + { + if (cattr->ca_linkcount != attr.ca_linkcount) + { + stillvalid = 0; + *error = ERECYCLE; + goto notvalid; + } + + if (cattr->ca_union1.cau_linkref != attr.ca_union1.cau_linkref) + { + stillvalid = 0; + *error = ERECYCLE; + goto notvalid; + } + + if (cattr->ca_union3.cau_firstlink != attr.ca_union3.cau_firstlink) + { + stillvalid = 0; + *error = ERECYCLE; + goto notvalid; + } + if (cattr->ca_union2.cau_blocks != attr.ca_union2.cau_blocks) + { + stillvalid = 0; + *error = ERECYCLE; + goto notvalid; + } + } + } + else + { + if (cat_idlookup(hfsmp, cnid, 0, 0, NULL, NULL, NULL) == 0) + { + stillvalid = 1; + *error = 0; + } + else + { + *error = ENOENT; + } + } + +notvalid: + hfs_systemfile_unlock(hfsmp, lockflags); + + return (stillvalid); +} + +/* + * Protect a cnode against a truncation. + * + * Used mainly by read/write since they don't hold the + * cnode lock across calls to the cluster layer. + * + * The process doing a truncation must take the lock + * exclusive. The read/write processes can take it + * shared. The locktype argument is the same as supplied to + * hfs_lock. + */ +void +hfs_lock_truncate(struct cnode *cp, enum hfs_locktype locktype, enum hfs_lockflags flags) +{ + pthread_t thread = pthread_self(); + + if (cp->c_truncatelockowner == thread) { + /* + * Ignore grabbing the lock if it the current thread already + * holds exclusive lock. + * + * This is needed on the hfs_vnop_pagein path where we need to ensure + * the file does not change sizes while we are paging in. However, + * we may already hold the lock exclusive due to another + * VNOP from earlier in the call stack. So if we already hold + * the truncate lock exclusive, allow it to proceed, but ONLY if + * it's in the recursive case. + */ + if ((flags & HFS_LOCK_SKIP_IF_EXCLUSIVE) == 0) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_lock_truncate: cnode %p locked!", cp); + hfs_assert(0); + } + } else if (locktype == HFS_SHARED_LOCK) { + lf_lck_rw_lock_shared(&cp->c_truncatelock); + cp->c_truncatelockowner = HFS_SHARED_OWNER; + } else { /* HFS_EXCLUSIVE_LOCK */ + lf_lck_rw_lock_exclusive(&cp->c_truncatelock); + cp->c_truncatelockowner = thread; + } +} + +/* + * Unlock the truncate lock, which protects against size changes. + * + * If HFS_LOCK_SKIP_IF_EXCLUSIVE flag was set, it means that a previous + * hfs_lock_truncate() might have skipped grabbing a lock because + * the current thread was already holding the lock exclusive and + * we may need to return from this function without actually unlocking + * the truncate lock. + */ +void +hfs_unlock_truncate(struct cnode *cp, enum hfs_lockflags flags) +{ + pthread_t thread = pthread_self(); + + /* + * If HFS_LOCK_SKIP_IF_EXCLUSIVE is set in the flags AND the current + * lock owner of the truncate lock is our current thread, then + * we must have skipped taking the lock earlier by in + * hfs_lock_truncate() by setting HFS_LOCK_SKIP_IF_EXCLUSIVE in the + * flags (as the current thread was current lock owner). + * + * If HFS_LOCK_SKIP_IF_EXCLUSIVE is not set (most of the time) then + * we check the lockowner field to infer whether the lock was taken + * exclusively or shared in order to know what underlying lock + * routine to call. + */ + if (flags & HFS_LOCK_SKIP_IF_EXCLUSIVE) { + if (cp->c_truncatelockowner == thread) { + return; + } + } + + /* HFS_LOCK_EXCLUSIVE */ + if (thread == cp->c_truncatelockowner) { +// vnode_t vp = NULL, rvp = NULL; + + /* + * If there are pending set sizes, the cnode lock should be dropped + * first. + */ + hfs_assert(!(cp->c_lockowner == thread + && ISSET(cp->c_flag, C_NEED_DATA_SETSIZE | C_NEED_RSRC_SETSIZE))); + +// if (cp->c_need_dvnode_put_after_truncate_unlock) { +// vp = cp->c_vp; +// cp->c_need_dvnode_put_after_truncate_unlock = false; +// } +// if (cp->c_need_rvnode_put_after_truncate_unlock) { +// rvp = cp->c_rsrc_vp; +// cp->c_need_rvnode_put_after_truncate_unlock = false; +// } + + cp->c_truncatelockowner = NULL; + lf_lck_rw_unlock_exclusive(&cp->c_truncatelock); +// +// // Do the puts now +// if (vp) +// vnode_put(vp); +// if (rvp) +// vnode_put(rvp); + } else + { /* HFS_LOCK_SHARED */ + lf_lck_rw_unlock_shared(&cp->c_truncatelock); + } +} + +/* + * Lock a pair of cnodes. + */ +int +hfs_lockpair(struct cnode *cp1, struct cnode *cp2, enum hfs_locktype locktype) +{ + struct cnode *first, *last; + int error; + + /* + * If cnodes match then just lock one. + */ + if (cp1 == cp2) + { + return hfs_lock(cp1, locktype, HFS_LOCK_DEFAULT); + } + + /* + * Lock in cnode address order. + */ + if (cp1 < cp2) + { + first = cp1; + last = cp2; + } + else + { + first = cp2; + last = cp1; + } + + if ( (error = hfs_lock(first, locktype, HFS_LOCK_DEFAULT))) + { + return (error); + } + if ( (error = hfs_lock(last, locktype, HFS_LOCK_DEFAULT))) + { + hfs_unlock(first); + return (error); + } + return (0); +} + +/* + * Unlock a pair of cnodes. + */ +void +hfs_unlockpair(struct cnode *cp1, struct cnode *cp2) +{ + hfs_unlock(cp1); + if (cp2 != cp1) + hfs_unlock(cp2); +} + +/* + * Increase the gen count by 1; if it wraps around to 0, increment by + * two. The cnode *must* be locked exclusively by the caller. + * + * You may think holding the lock is unnecessary because we only need + * to change the counter, but consider this sequence of events: thread + * A calls hfs_incr_gencount and the generation counter is 2 upon + * entry. A context switch occurs and thread B increments the counter + * to 3, thread C now gets the generation counter (for whatever + * purpose), and then another thread makes another change and the + * generation counter is incremented again---it's now 4. Now thread A + * continues and it sets the generation counter back to 3. So you can + * see, thread C would miss the change that caused the generation + * counter to increment to 4 and for this reason the cnode *must* + * always be locked exclusively. + */ +uint32_t hfs_incr_gencount (struct cnode *cp) +{ + u_int8_t *finfo = NULL; + u_int32_t gcount = 0; + + /* overlay the FinderInfo to the correct pointer, and advance */ + finfo = (u_int8_t*)cp->c_finderinfo; + finfo = finfo + 16; + + /* + * FinderInfo is written out in big endian... make sure to convert it to host + * native before we use it. + * + * NOTE: the write_gen_counter is stored in the same location in both the + * FndrExtendedFileInfo and FndrExtendedDirInfo structs (it's the + * last 32-bit word) so it is safe to have one code path here. + */ + if (S_ISDIR(cp->c_attr.ca_mode) || S_ISREG(cp->c_attr.ca_mode)) + { + struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo; + gcount = extinfo->write_gen_counter; + + /* Was it zero to begin with (file originated in 10.8 or earlier?) */ + if (gcount == 0) + { + gcount++; + } + + /* now bump it */ + gcount++; + + /* Did it wrap around ? */ + if (gcount == 0) + { + gcount++; + } + extinfo->write_gen_counter = OSSwapHostToBigInt32 (gcount); + + SET(cp->c_flag, C_MINOR_MOD); + } + else + { + gcount = 0; + } + + return gcount; +} + +void hfs_write_gencount (struct cat_attr *attrp, uint32_t gencount) +{ + u_int8_t *finfo = NULL; + + /* overlay the FinderInfo to the correct pointer, and advance */ + finfo = (u_int8_t*)attrp->ca_finderinfo; + finfo = finfo + 16; + + /* + * Make sure to write it out as big endian, since that's how + * finder info is defined. + * + * Generation count is only supported for files. + */ + if (S_ISREG(attrp->ca_mode)) { + struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo; + extinfo->write_gen_counter = OSSwapHostToBigInt32(gencount); + } + + /* If it were neither directory/file, then we'd bail out */ + return; +} + +void hfs_clear_might_be_dirty_flag(cnode_t *cp) +{ + /* + * If we're about to touch both mtime and ctime, we can clear the + * C_MIGHT_BE_DIRTY_FROM_MAPPING since we can guarantee that + * subsequent page-outs can only be for data made dirty before + * now. + */ + CLR(cp->c_flag, C_MIGHT_BE_DIRTY_FROM_MAPPING); +} + +/* + * Touch cnode times based on c_touch_xxx flags + * + * cnode must be locked exclusive + * + * This will also update the volume modify time + */ +void +hfs_touchtimes(struct hfsmount *hfsmp, struct cnode* cp) +{ + + if (ISSET(hfsmp->hfs_flags, HFS_READ_ONLY) || ISSET(cp->c_flag, C_NOEXISTS)) { + cp->c_touch_acctime = FALSE; + cp->c_touch_chgtime = FALSE; + cp->c_touch_modtime = FALSE; + CLR(cp->c_flag, C_NEEDS_DATEADDED); + return; + } + + if (cp->c_touch_acctime || cp->c_touch_chgtime || + cp->c_touch_modtime || (cp->c_flag & C_NEEDS_DATEADDED)) { + struct timeval tv; + int touchvol = 0; + + if (cp->c_touch_modtime && cp->c_touch_chgtime) + hfs_clear_might_be_dirty_flag(cp); + + microtime(&tv); + + if (cp->c_touch_acctime) { + /* + * When the access time is the only thing changing, we + * won't necessarily write it to disk immediately. We + * only do the atime update at vnode recycle time, when + * fsync is called or when there's another reason to write + * to the metadata. + */ + cp->c_atime = tv.tv_sec; + cp->c_touch_acctime = FALSE; + } + if (cp->c_touch_modtime) { + cp->c_touch_modtime = FALSE; + time_t new_time = tv.tv_sec; + if (cp->c_mtime != new_time) { + cp->c_mtime = new_time; + cp->c_flag |= C_MINOR_MOD; + touchvol = 1; + } + } + if (cp->c_touch_chgtime) { + cp->c_touch_chgtime = FALSE; + if (cp->c_ctime != tv.tv_sec) { + cp->c_ctime = tv.tv_sec; + cp->c_flag |= C_MINOR_MOD; + touchvol = 1; + } + } + + if (cp->c_flag & C_NEEDS_DATEADDED) { + hfs_write_dateadded (&(cp->c_attr), tv.tv_sec); + cp->c_flag |= C_MINOR_MOD; + /* untwiddle the bit */ + cp->c_flag &= ~C_NEEDS_DATEADDED; + touchvol = 1; + } + + /* Touch the volume modtime if needed */ + if (touchvol) { + hfs_note_header_minor_change(hfsmp); + HFSTOVCB(hfsmp)->vcbLsMod = tv.tv_sec; + } + } +} + +/* + * Per HI and Finder requirements, HFS should add in the + * date/time that a particular directory entry was added + * to the containing directory. + * This is stored in the extended Finder Info for the + * item in question. + * + * Note that this field is also set explicitly in the hfs_vnop_setxattr code. + * We must ignore user attempts to set this part of the finderinfo, and + * so we need to save a local copy of the date added, write in the user + * finderinfo, then stuff the value back in. + */ +void hfs_write_dateadded (struct cat_attr *attrp, uint64_t dateadded) +{ + u_int8_t *finfo = NULL; + + /* overlay the FinderInfo to the correct pointer, and advance */ + finfo = (u_int8_t*)attrp->ca_finderinfo; + finfo = finfo + 16; + + /* + * Make sure to write it out as big endian, since that's how + * finder info is defined. + * + * NOTE: This is a Unix-epoch timestamp, not a HFS/Traditional Mac timestamp. + */ + if (S_ISREG(attrp->ca_mode)) { + struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo; + extinfo->date_added = OSSwapHostToBigInt32(dateadded); + attrp->ca_recflags |= kHFSHasDateAddedMask; + } + else if (S_ISDIR(attrp->ca_mode)) { + struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)finfo; + extinfo->date_added = OSSwapHostToBigInt32(dateadded); + attrp->ca_recflags |= kHFSHasDateAddedMask; + } + /* If it were neither directory/file, then we'd bail out */ + return; +} + +static u_int32_t +hfs_get_dateadded_internal(const uint8_t *finderinfo, mode_t mode) +{ + const uint8_t *finfo = NULL; + u_int32_t dateadded = 0; + + /* overlay the FinderInfo to the correct pointer, and advance */ + finfo = finderinfo + 16; + + /* + * FinderInfo is written out in big endian... make sure to convert it to host + * native before we use it. + */ + if (S_ISREG(mode)) { + const struct FndrExtendedFileInfo *extinfo = (const struct FndrExtendedFileInfo *)finfo; + dateadded = OSSwapBigToHostInt32 (extinfo->date_added); + } + else if (S_ISDIR(mode)) { + const struct FndrExtendedDirInfo *extinfo = (const struct FndrExtendedDirInfo *)finfo; + dateadded = OSSwapBigToHostInt32 (extinfo->date_added); + } + + return dateadded; +} + +u_int32_t +hfs_get_dateadded(struct cnode *cp) +{ + if ((cp->c_attr.ca_recflags & kHFSHasDateAddedMask) == 0) { + /* Date added was never set. Return 0. */ + return (0); + } + + return (hfs_get_dateadded_internal((u_int8_t*)cp->c_finderinfo, + cp->c_attr.ca_mode)); +} + +static bool +hfs_cnode_isinuse(struct cnode *cp, uint32_t uRefCount) +{ + return (cp->uOpenLookupRefCount > uRefCount); +} + +/* + * hfs_cnode_teardown + * + * This is an internal function that is invoked from both hfs_vnop_inactive + * and hfs_vnop_reclaim. As VNOP_INACTIVE is not necessarily called from vnodes + * being recycled and reclaimed, it is important that we do any post-processing + * necessary for the cnode in both places. Important tasks include things such as + * releasing the blocks from an open-unlinked file when all references to it have dropped, + * and handling resource forks separately from data forks. + * + * Note that we take only the vnode as an argument here (rather than the cnode). + * Recall that each cnode supports two forks (rsrc/data), and we can always get the right + * cnode from either of the vnodes, but the reverse is not true -- we can't determine which + * vnode we need to reclaim if only the cnode is supplied. + * + * This function is idempotent and safe to call from both hfs_vnop_inactive and hfs_vnop_reclaim + * if both are invoked right after the other. In the second call, most of this function's if() + * conditions will fail, since they apply generally to cnodes still marked with C_DELETED. + * As a quick check to see if this function is necessary, determine if the cnode is already + * marked C_NOEXISTS. If it is, then it is safe to skip this function. The only tasks that + * remain for cnodes marked in such a fashion is to teardown their fork references and + * release all directory hints and hardlink origins. However, both of those are done + * in hfs_vnop_reclaim. hfs_update, by definition, is not necessary if the cnode's catalog + * entry is no longer there. + * + * 'reclaim' argument specifies whether or not we were called from hfs_vnop_reclaim. If we are + * invoked from hfs_vnop_reclaim, we can not call functions that cluster_push since the UBC info + * is totally gone by that point. + * + * Assumes that both truncate and cnode locks for 'cp' are held. + */ +static int +hfs_cnode_teardown (struct vnode *vp, int reclaim) +{ + int forkcount = 0; + enum vtype v_type = vp->sFSParams.vnfs_vtype; + struct cnode* cp = VTOC(vp); + int error = 0; + bool started_tr = false; + struct hfsmount *hfsmp = VTOHFS(vp); + int truncated = 0; + cat_cookie_t cookie; + int cat_reserve = 0; + int lockflags = 0; + int ea_error = 0; + + if (cp->c_datafork) { + ++forkcount; + } + if (cp->c_rsrcfork) { + ++forkcount; + } + + /* + * Remove any directory hints or cached origins + */ + if (v_type == VDIR) { + hfs_reldirhints(cp, 0); + } + if (cp->c_flag & C_HARDLINK) { + hfs_relorigins(cp); + } + /* + * -- Handle open unlinked files -- + * + * If the vnode is in use, it means a force unmount is in progress + * in which case we defer cleaning up until either we come back + * through here via hfs_vnop_reclaim, at which point the UBC + * information will have been torn down and the vnode might no + * longer be in use, or if it's still in use, it will get cleaned + * up when next remounted. + */ + if (ISSET(cp->c_flag, C_DELETED) && !hfs_cnode_isinuse(cp, 0)) { + /* + * This check is slightly complicated. We should only truncate data + * in very specific cases for open-unlinked files. This is because + * we want to ensure that the resource fork continues to be available + * if the caller has the data fork open. However, this is not symmetric; + * someone who has the resource fork open need not be able to access the data + * fork once the data fork has gone inactive. + * + * If we're the last fork, then we have cleaning up to do. + * + * A) last fork, and vp == c_vp + * Truncate away own fork data. If rsrc fork is not in core, truncate it too. + * + * B) last fork, and vp == c_rsrc_vp + * Truncate ourselves, assume data fork has been cleaned due to C). + * + * If we're not the last fork, then things are a little different: + * + * C) not the last fork, vp == c_vp + * Truncate ourselves. Once the file has gone out of the namespace, + * it cannot be further opened. Further access to the rsrc fork may + * continue, however. + * + * D) not the last fork, vp == c_rsrc_vp + * Don't enter the block below, just clean up vnode and push it out of core. + */ + + if ((v_type == VREG || v_type == VLNK) && + ((forkcount == 1) || (!VNODE_IS_RSRC(vp)))) { + + /* Truncate away our own fork data. (Case A, B, C above) */ + if (VTOF(vp) && VTOF(vp)->ff_blocks != 0) { + /* + * SYMLINKS only: + * + * Encapsulate the entire change (including truncating the link) in + * nested transactions if we are modifying a symlink, because we know that its + * file length will be at most 4k, and we can fit both the truncation and + * any relevant bitmap changes into a single journal transaction. We also want + * the kill_block code to execute in the same transaction so that any dirty symlink + * blocks will not be written. Otherwise, rely on + * hfs_truncate doing its own transactions to ensure that we don't blow up + * the journal. + */ + if (!started_tr && (v_type == VLNK)) { + if (hfs_start_transaction(hfsmp) != 0) { + error = EINVAL; + goto out; + } + else { + started_tr = true; + } + } + + /* + * At this point, we have decided that this cnode is + * suitable for full removal. We are about to deallocate + * its blocks and remove its entry from the catalog. + * If it was a symlink, then it's possible that the operation + * which created it is still in the current transaction group + * due to coalescing. Take action here to kill the data blocks + * of the symlink out of the journal before moving to + * deallocate the blocks. We need to be in the middle of + * a transaction before calling buf_iterate like this. + * + * Note: we have to kill any potential symlink buffers out of + * the journal prior to deallocating their blocks. This is so + * that we don't race with another thread that may be doing an + * an allocation concurrently and pick up these blocks. It could + * generate I/O against them which could go out ahead of our journal + * transaction. + */ + + if (hfsmp->jnl && vnode_islnk(vp)) { + lf_hfs_generic_buf_write_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp); + } + + /* + * This truncate call (and the one below) is fine from VNOP_RECLAIM's + * context because we're only removing blocks, not zero-filling new + * ones. The C_DELETED check above makes things much simpler. + */ + error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 0); + if (error) { + goto out; + } + truncated = 1; + + /* (SYMLINKS ONLY): Close/End our transaction after truncating the file record */ + if (started_tr) { + hfs_end_transaction(hfsmp); + started_tr = false; + } + + } + + /* + * Truncate away the resource fork, if we represent the data fork and + * it is the last fork. That means, by definition, the rsrc fork is not in + * core. To avoid bringing a vnode into core for the sole purpose of deleting the + * data in the resource fork, we call cat_lookup directly, then hfs_release_storage + * to get rid of the resource fork's data. Note that because we are holding the + * cnode lock, it is impossible for a competing thread to create the resource fork + * vnode from underneath us while we do this. + * + * This is invoked via case A above only. + */ + if ((cp->c_blocks > 0) && (forkcount == 1) && (vp != cp->c_rsrc_vp)) { + struct cat_lookup_buffer *lookup_rsrc = NULL; + struct cat_desc *desc_ptr = NULL; + + lookup_rsrc = hfs_mallocz(sizeof(struct cat_lookup_buffer)); + + if (cp->c_desc.cd_namelen == 0) { + /* Initialize the rsrc descriptor for lookup if necessary*/ + MAKE_DELETED_NAME (lookup_rsrc->lookup_name, HFS_TEMPLOOKUP_NAMELEN, cp->c_fileid); + + lookup_rsrc->lookup_desc.cd_nameptr = (const uint8_t*) lookup_rsrc->lookup_name; + lookup_rsrc->lookup_desc.cd_namelen = strlen (lookup_rsrc->lookup_name); + lookup_rsrc->lookup_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; + lookup_rsrc->lookup_desc.cd_cnid = cp->c_cnid; + + desc_ptr = &lookup_rsrc->lookup_desc; + } + else { + desc_ptr = &cp->c_desc; + } + + lockflags = hfs_systemfile_lock (hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + + error = cat_lookup (hfsmp, desc_ptr, 1, (struct cat_desc *) NULL, (struct cat_attr*) NULL, &lookup_rsrc->lookup_fork.ff_data, NULL); + + hfs_systemfile_unlock (hfsmp, lockflags); + + if (error) { + hfs_free(lookup_rsrc); + goto out; + } + + /* + * Make the filefork in our temporary struct look like a real + * filefork. Fill in the cp, sysfileinfo and rangelist fields.. + */ + rl_init (&lookup_rsrc->lookup_fork.ff_invalidranges); + lookup_rsrc->lookup_fork.ff_cp = cp; + + /* + * If there were no errors, then we have the catalog's fork information + * for the resource fork in question. Go ahead and delete the data in it now. + */ + + error = hfs_release_storage (hfsmp, NULL, &lookup_rsrc->lookup_fork, cp->c_fileid); + hfs_free(lookup_rsrc); + + if (error) { + goto out; + } + + /* + * This fileid's resource fork extents have now been fully deleted on-disk + * and this CNID is no longer valid. At this point, we should be able to + * zero out cp->c_blocks to indicate there is no data left in this file. + */ + cp->c_blocks = 0; + } + } + + /* + * If we represent the last fork (or none in the case of a dir), + * and the cnode has become open-unlinked... + * + * We check c_blocks here because it is possible in the force + * unmount case for the data fork to be in use but the resource + * fork to not be in use in which case we will truncate the + * resource fork, but not the data fork. It will get cleaned + * up upon next mount. + */ + if (forkcount <= 1 && !cp->c_blocks) { + /* + * If it has EA's, then we need to get rid of them. + * + * Note that this must happen outside of any other transactions + * because it starts/ends its own transactions and grabs its + * own locks. This is to prevent a file with a lot of attributes + * from creating a transaction that is too large (which panics). + */ + if (ISSET(cp->c_attr.ca_recflags, kHFSHasAttributesMask)) + { + ea_error = hfs_removeallattr(hfsmp, cp->c_fileid, &started_tr); + if (ea_error) + goto out; + } + + /* + * Remove the cnode's catalog entry and release all blocks it + * may have been using. + */ + + /* + * Mark cnode in transit so that no one can get this + * cnode from cnode hash. + */ + // hfs_chash_mark_in_transit(hfsmp, cp); + // XXXdbg - remove the cnode from the hash table since it's deleted + // otherwise someone could go to sleep on the cnode and not + // be woken up until this vnode gets recycled which could be + // a very long time... + hfs_chashremove(hfsmp, cp); + + cp->c_flag |= C_NOEXISTS; // XXXdbg + cp->c_rdev = 0; + + if (!started_tr) { + if (hfs_start_transaction(hfsmp) != 0) { + error = EINVAL; + goto out; + } + started_tr = true; + } + + /* + * Reserve some space in the Catalog file. + */ + if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie))) { + goto out; + } + cat_reserve = 1; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK); + + if (cp->c_blocks > 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs_inactive: deleting non-empty%sfile %d, " + "blks %d\n", VNODE_IS_RSRC(vp) ? " rsrc " : " ", + (int)cp->c_fileid, (int)cp->c_blocks); + } + + // + // release the name pointer in the descriptor so that + // cat_delete() will use the file-id to do the deletion. + // in the case of hard links this is imperative (in the + // case of regular files the fileid and cnid are the + // same so it doesn't matter). + // + cat_releasedesc(&cp->c_desc); + + /* + * The descriptor name may be zero, + * in which case the fileid is used. + */ + error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr); + + if (error && truncated && (error != ENXIO)) { + LFHFS_LOG(LEVEL_ERROR, "hfs_inactive: couldn't delete a truncated file!"); + } + + /* Update HFS Private Data dir */ + if (error == 0) { + hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries--; + if (vnode_isdir(vp)) { + DEC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]); + } + (void)cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS], + &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL); + } + + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error) { + goto out; + } + + /* Already set C_NOEXISTS at the beginning of this block */ + cp->c_flag &= ~C_DELETED; + cp->c_touch_chgtime = TRUE; + cp->c_touch_modtime = TRUE; + + if (error == 0) + hfs_volupdate(hfsmp, (v_type == VDIR) ? VOL_RMDIR : VOL_RMFILE, 0); + } + } // if + + hfs_update(vp, reclaim ? HFS_UPDATE_FORCE : 0); + + /* + * Since we are about to finish what might be an inactive call, propagate + * any remaining modified or touch bits from the cnode to the vnode. This + * serves as a hint to vnode recycling that we shouldn't recycle this vnode + * synchronously. + * + * For now, if the node *only* has a dirty atime, we don't mark + * the vnode as dirty. VFS's asynchronous recycling can actually + * lead to worse performance than having it synchronous. When VFS + * is fixed to be more performant, we can be more honest about + * marking vnodes as dirty when it's only the atime that's dirty. + */ +#if LF_HFS_FULL_VNODE_SUPPORT + //TBD - need to decide how we mark a file as dirty + if (hfs_is_dirty(cp) == HFS_DIRTY || ISSET(cp->c_flag, C_DELETED)) { + vnode_setdirty(vp); + } else { + vnode_cleardirty(vp); + } +#endif + +out: + if (cat_reserve) + cat_postflight(hfsmp, &cookie); + + if (started_tr) { + hfs_end_transaction(hfsmp); + started_tr = false; + } + + return error; +} + + +/* + * Reclaim a cnode so that it can be used for other purposes. + */ +int +hfs_vnop_reclaim(struct vnode *vp) +{ + struct cnode* cp = VTOC(vp); + struct filefork *fp = NULL; + struct filefork *altfp = NULL; + struct hfsmount *hfsmp = VTOHFS(vp); + int reclaim_cnode = 0; + int err = 0; + + /* + * We don't take the truncate lock since by the time reclaim comes along, + * all dirty pages have been synced and nobody should be competing + * with us for this thread. + */ + hfs_chash_mark_in_transit(hfsmp, cp); + + hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + lf_hfs_generic_buf_cache_LockBufCache(); + + //In case we have other open lookups + //We need to decrease the counter and exit + if (cp->uOpenLookupRefCount > 1) + { + hfs_chash_lower_OpenLookupCounter(cp); + hfs_chashwakeup(hfsmp, cp, H_ALLOC | H_TRANSIT); + lf_hfs_generic_buf_cache_UnLockBufCache(); + hfs_unlock(cp); + return err; + } + + if (cp->uOpenLookupRefCount == 0) assert(0); + + hfs_chash_lower_OpenLookupCounter(cp); + lf_hfs_generic_buf_cache_remove_vnode(vp); + + lf_hfs_generic_buf_cache_UnLockBufCache(); + + /* + * Sync to disk any remaining data in the cnode/vnode. This includes + * a call to hfs_update if the cnode has outbound data. + * + * If C_NOEXISTS is set on the cnode, then there's nothing teardown needs to do + * because the catalog entry for this cnode is already gone. + */ + INVALIDATE_NODE(vp); + + if (!ISSET(cp->c_flag, C_NOEXISTS)) { + err = hfs_cnode_teardown(vp, 1); + if (err) + { + return err; + } + } + + if (vp->sFSParams.vnfs_cnp) + { + if (vp->sFSParams.vnfs_cnp->cn_nameptr) + hfs_free(vp->sFSParams.vnfs_cnp->cn_nameptr); + hfs_free(vp->sFSParams.vnfs_cnp); + } + + + /* + * Find file fork for this vnode (if any) + * Also check if another fork is active + */ + if (cp->c_vp == vp) { + fp = cp->c_datafork; + altfp = cp->c_rsrcfork; + + cp->c_datafork = NULL; + cp->c_vp = NULL; + } else if (cp->c_rsrc_vp == vp) { + fp = cp->c_rsrcfork; + altfp = cp->c_datafork; + + cp->c_rsrcfork = NULL; + cp->c_rsrc_vp = NULL; + } else { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_reclaim: vp points to wrong cnode (vp=%p cp->c_vp=%p cp->c_rsrc_vp=%p)\n", vp, cp->c_vp, cp->c_rsrc_vp); + hfs_assert(0); + } + + /* + * On the last fork, remove the cnode from its hash chain. + */ + if (altfp == NULL) { + /* If we can't remove it then the cnode must persist! */ + if (hfs_chashremove(hfsmp, cp) == 0) + reclaim_cnode = 1; + /* + * Remove any directory hints + */ + if (vnode_isdir(vp)) { + hfs_reldirhints(cp, 0); + } + + if(cp->c_flag & C_HARDLINK) { + hfs_relorigins(cp); + } + } + /* Release the file fork and related data */ + if (fp) + { + /* Dump cached symlink data */ + if (vnode_islnk(vp) && (fp->ff_symlinkptr != NULL)) { + hfs_free(fp->ff_symlinkptr); + } + rl_remove_all(&fp->ff_invalidranges); + hfs_free(fp); + } + + /* + * If there was only one active fork then we can release the cnode. + */ + if (reclaim_cnode) { + hfs_unlock(cp); + hfs_chashwakeup(hfsmp, cp, H_ALLOC); + hfs_reclaim_cnode(cp); + } + else + { + /* + * cnode in use. If it is a directory, it could have + * no live forks. Just release the lock. + */ + hfs_unlock(cp); + } + + hfs_free(vp); + vp = NULL; + return (0); +} diff --git a/livefiles_hfs_plugin/lf_hfs_cnode.h b/livefiles_hfs_plugin/lf_hfs_cnode.h new file mode 100644 index 0000000..cb6860d --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_cnode.h @@ -0,0 +1,367 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_cnode.h + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#ifndef lf_hfs_cnode_h +#define lf_hfs_cnode_h + +#include "lf_hfs_locks.h" +#include "lf_hfs_catalog.h" +#include "lf_hfs_rangelist.h" +#include "lf_hfs_vnode.h" +#include + +enum hfs_locktype { + HFS_SHARED_LOCK = 1, + HFS_EXCLUSIVE_LOCK = 2, + HFS_TRY_EXCLUSIVE_LOCK = 3 +}; + +/* Option flags for cnode and truncate lock functions */ +enum hfs_lockflags { + HFS_LOCK_DEFAULT = 0x0, /* Default flag, no options provided */ + HFS_LOCK_ALLOW_NOEXISTS = 0x1, /* Allow locking of all cnodes, including cnode marked deleted with no catalog entry */ + HFS_LOCK_SKIP_IF_EXCLUSIVE = 0x2, /* Skip locking if the current thread already holds the lock exclusive */ + + // Used when you do not want to check return from hfs_lock + HFS_LOCK_ALWAYS = HFS_LOCK_ALLOW_NOEXISTS, +}; +#define HFS_SHARED_OWNER (void *)0xffffffff + +#define ZFTIMELIMIT (5 * 60) + +/* Zero-fill file and push regions out to disk */ +enum { + // Use this flag if you're going to sync later + HFS_FILE_DONE_NO_SYNC = 1, +}; + +/* + * The filefork is used to represent an HFS file fork (data or resource). + * Reading or writing any of these fields requires holding cnode lock. + */ +struct filefork { + struct cnode *ff_cp; /* cnode associated with this fork */ + struct rl_head ff_invalidranges; /* Areas of disk that should read back as zeroes */ + union { + void *ffu_sysfileinfo; /* additional info for system files */ + char *ffu_symlinkptr; /* symbolic link pathname */ + } ff_union; + struct cat_fork ff_data; /* fork data (size, extents) */ +}; +typedef struct filefork filefork_t; + +/* Aliases for common fields */ +#define ff_size ff_data.cf_size +#define ff_new_size ff_data.cf_new_size +#define ff_clumpsize ff_data.cf_clump +#define ff_bytesread ff_data.cf_bytesread +#define ff_extents ff_data.cf_extents + +/* + * Note that the blocks fields are protected by the cnode lock, *not* + * the truncate lock. + */ +#define ff_blocks ff_data.cf_blocks +#define ff_unallocblocks ff_data.cf_vblocks + +#define ff_symlinkptr ff_union.ffu_symlinkptr +#define ff_sysfileinfo ff_union.ffu_sysfileinfo + +/* The btree code still needs these... */ +#define fcbEOF ff_size +#define fcbExtents ff_extents +#define fcbBTCBPtr ff_sysfileinfo + +typedef u_int8_t atomicflag_t; + +/* + * Hardlink Origin (for hardlinked directories). + */ +struct linkorigin { + TAILQ_ENTRY(linkorigin) lo_link; /* chain */ + void * lo_thread; /* thread that performed the lookup */ + cnid_t lo_cnid; /* hardlink's cnid */ + cnid_t lo_parentcnid; /* hardlink's parent cnid */ +}; +typedef struct linkorigin linkorigin_t; + +#define MAX_CACHED_ORIGINS 10 +#define MAX_CACHED_FILE_ORIGINS 8 + +/* + * The cnode is used to represent each active (or recently active) + * file or directory in the HFS filesystem. + * + * Reading or writing any of these fields requires holding c_lock. + */ +struct cnode { + pthread_rwlock_t c_rwlock; /* cnode's lock */ + pthread_t c_lockowner; /* cnode's lock owner (exclusive case only) */ + pthread_rwlock_t c_truncatelock; /* protects file from truncation during read/write */ + pthread_t c_truncatelockowner; /* truncate lock owner (exclusive case only) */ + pthread_cond_t c_cacsh_cond; /* cond for cnode cacsh*/ + + LIST_ENTRY(cnode) c_hash; /* cnode's hash chain */ + u_int32_t c_flag; /* cnode's runtime flags */ + u_int32_t c_hflag; /* cnode's flags for maintaining hash - protected by global hash lock */ + struct vnode *c_vp; /* vnode for data fork or dir */ + struct vnode *c_rsrc_vp; /* vnode for resource fork */ + u_int32_t c_childhint; /* catalog hint for children (small dirs only) */ + u_int32_t c_dirthreadhint; /* catalog hint for directory's thread rec */ + struct cat_desc c_desc; /* cnode's descriptor */ + struct cat_attr c_attr; /* cnode's attributes */ + TAILQ_HEAD(hfs_originhead, linkorigin) c_originlist; /* hardlink origin cache */ + TAILQ_HEAD(hfs_hinthead, directoryhint) c_hintlist; /* readdir directory hint list */ + int16_t c_dirhinttag; /* directory hint tag */ + union { + int16_t cu_dirhintcnt; /* directory hint count */ + int16_t cu_syslockcount; /* system file use only */ + } c_union; + u_int32_t c_dirchangecnt; /* changes each insert/delete (in-core only) */ + struct filefork *c_datafork; /* cnode's data fork */ + struct filefork *c_rsrcfork; /* cnode's rsrc fork */ + atomicflag_t c_touch_acctime; + atomicflag_t c_touch_chgtime; + atomicflag_t c_touch_modtime; + + // The following flags are protected by the truncate lock + union { + struct { + bool c_need_dvnode_put_after_truncate_unlock : 1; + bool c_need_rvnode_put_after_truncate_unlock : 1; + }; + uint8_t c_tflags; + }; + + /* + * Where we're using a journal, we keep track of the last + * transaction that we did an update in. If a minor modification + * is made, we'll still push it if we're still on the same + * transaction. + */ + uint32_t c_update_txn; + + volatile uint32_t uOpenLookupRefCount; + +}; +typedef struct cnode cnode_t; + +/* Aliases for common cnode fields */ +#define c_cnid c_desc.cd_cnid +#define c_hint c_desc.cd_hint +#define c_parentcnid c_desc.cd_parentcnid +#define c_encoding c_desc.cd_encoding + +#define c_fileid c_attr.ca_fileid +#define c_mode c_attr.ca_mode +#define c_linkcount c_attr.ca_linkcount +#define c_uid c_attr.ca_uid +#define c_gid c_attr.ca_gid +#define c_rdev c_attr.ca_union1.cau_rdev +#define c_atime c_attr.ca_atime +#define c_mtime c_attr.ca_mtime +#define c_ctime c_attr.ca_ctime +#define c_itime c_attr.ca_itime +#define c_btime c_attr.ca_btime +#define c_bsdflags c_attr.ca_bsdflags +#define c_finderinfo c_attr.ca_finderinfo +#define c_blocks c_attr.ca_union2.cau_blocks +#define c_entries c_attr.ca_union2.cau_entries +#define c_zftimeout c_childhint + +#define c_dirhintcnt c_union.cu_dirhintcnt +#define c_syslockcount c_union.cu_syslockcount + +/* hash maintenance flags kept in c_hflag and protected by hfs_chash_mutex */ +#define H_ALLOC 0x00001 /* CNode is being allocated */ +#define H_ATTACH 0x00002 /* CNode is being attached to by another vnode */ +#define H_TRANSIT 0x00004 /* CNode is getting recycled */ +#define H_WAITING 0x00008 /* CNode is being waited for */ + +/* + * Runtime cnode flags (kept in c_flag) + */ +#define C_NEED_RVNODE_PUT 0x0000001 /* Need to do a vnode_put on c_rsrc_vp after the unlock */ +#define C_NEED_DVNODE_PUT 0x0000002 /* Need to do a vnode_put on c_vp after the unlock */ +#define C_ZFWANTSYNC 0x0000004 /* fsync requested and file has holes */ +#define C_FROMSYNC 0x0000008 /* fsync was called from sync */ + +#define C_MODIFIED 0x0000010 /* CNode has been modified */ +#define C_NOEXISTS 0x0000020 /* CNode has been deleted, catalog entry is gone */ +#define C_DELETED 0x0000040 /* CNode has been marked to be deleted */ +#define C_HARDLINK 0x0000080 /* CNode is a hard link (file or dir) */ + +/* + * A minor modification is one where the volume would not be inconsistent if + * the change was not pushed to disk. For example, changes to times. + */ +#define C_MINOR_MOD 0x0000100 /* CNode has a minor modification */ + +#define C_HASXATTRS 0x0000200 /* cnode has extended attributes */ +/* + * For C_SSD_STATIC: SSDs may want to deal with the file payload data in a + * different manner knowing that the content is not likely to be modified. This is + * purely advisory at the HFS level, and is not maintained after the cnode goes out of core. + */ +#define C_SSD_STATIC 0x0000800 /* Assume future writes contain static content */ + +#define C_NEED_DATA_SETSIZE 0x0001000 /* Do a ubc_setsize(0) on c_rsrc_vp after the unlock */ +#define C_NEED_RSRC_SETSIZE 0x0002000 /* Do a ubc_setsize(0) on c_vp after the unlock */ +#define C_DIR_MODIFICATION 0x0004000 /* Directory is being modified, wait for lookups */ +#define C_ALWAYS_ZEROFILL 0x0008000 /* Always zero-fill the file on an fsync */ + +#define C_RENAMED 0x0010000 /* cnode was deleted as part of rename; C_DELETED should also be set */ +#define C_NEEDS_DATEADDED 0x0020000 /* cnode needs date-added written to the finderinfo bit */ +#define C_BACKINGSTORE 0x0040000 /* cnode is a backing store for an existing or currently-mounting filesystem */ + +/* + * This flag indicates the cnode might be dirty because it + * was mapped writable so if we get any page-outs, update + * the modification and change times. + */ +#define C_MIGHT_BE_DIRTY_FROM_MAPPING 0x0080000 + +/* + * Convert between cnode pointers and vnode pointers + */ +#define VTOC(vp) ((struct cnode *) (vp)->sFSParams.vnfs_fsnode) + +#define CTOV(cp,rsrc) (((rsrc) && S_ISREG((cp)->c_mode)) ? \ + (cp)->c_rsrc_vp : (cp)->c_vp) + +/* + * Convert between vnode pointers and file forks + * + * Note: no CTOF since that is ambiguous + */ + +#define FTOC(fp) ((fp)->ff_cp) + +#define VTOF(vp) ((vp) == VTOC((vp))->c_rsrc_vp ? \ + VTOC((vp))->c_rsrcfork : \ + VTOC((vp))->c_datafork) + +#define VCTOF(vp, cp) ((vp) == (cp)->c_rsrc_vp ? \ + (cp)->c_rsrcfork : \ + (cp)->c_datafork) + +#define FTOV(fp) ((fp) == FTOC(fp)->c_rsrcfork ? \ + FTOC(fp)->c_rsrc_vp : \ + FTOC(fp)->c_vp) +/* + * Test for a resource fork + */ +#define FORK_IS_RSRC(fp) ((fp) == FTOC(fp)->c_rsrcfork) + +#define VNODE_IS_RSRC(vp) ((vp) == VTOC((vp))->c_rsrc_vp) + +/* + * The following is the "invisible" bit from the fdFlags field + * in the FndrFileInfo. + */ +enum { kFinderInvisibleMask = 1 << 14 }; + +/* + * HFS cnode hash functions. + */ +void hfs_chashinit(void); +void hfs_chashinit_finish(struct hfsmount *hfsmp); +void hfs_delete_chash(struct hfsmount *hfsmp); + +/* Get new default vnode */ +int hfs_getnewvnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, struct cat_desc *descp, int flags, struct cat_attr *attrp, struct cat_fork *forkp, struct vnode **vpp, int *out_flags); + +#define ATIME_ONDISK_ACCURACY 300 + +static inline bool hfs_should_save_atime(cnode_t *cp) +{ + /* + * We only write atime updates to disk if the delta is greater + * than ATIME_ONDISK_ACCURACY. + */ + return (cp->c_atime < cp->c_attr.ca_atimeondisk || cp->c_atime - cp->c_attr.ca_atimeondisk > ATIME_ONDISK_ACCURACY); +} + +typedef enum { + HFS_NOT_DIRTY = 0, + HFS_DIRTY = 1, + HFS_DIRTY_ATIME = 2 +} hfs_dirty_t; + + +static inline hfs_dirty_t hfs_is_dirty(cnode_t *cp) +{ + if (ISSET(cp->c_flag, C_NOEXISTS)) + return HFS_NOT_DIRTY; + + if (ISSET(cp->c_flag, C_MODIFIED | C_MINOR_MOD | C_NEEDS_DATEADDED) + || cp->c_touch_chgtime || cp->c_touch_modtime) { + return HFS_DIRTY; + } + + if (cp->c_touch_acctime || hfs_should_save_atime(cp)) + return HFS_DIRTY_ATIME; + + return HFS_NOT_DIRTY; +} + +/* + * Catalog Lookup struct (runtime) + * + * This is used so that when we need to malloc a container for a catalog + * lookup operation, we can acquire memory for everything in one fell swoop + * as opposed to putting many of these objects on the stack. The cat_fork + * data structure can take up 100+bytes easily, and that can add to stack + * overhead. + * + * As a result, we use this to easily pass around the memory needed for a + * lookup operation. + */ +#define HFS_TEMPLOOKUP_NAMELEN 32 + +struct cat_lookup_buffer { + struct cat_desc lookup_desc; + struct cat_attr lookup_attr; + struct filefork lookup_fork; + struct componentname lookup_cn; + char lookup_name[HFS_TEMPLOOKUP_NAMELEN]; /* for open-unlinked paths only */ +}; + +/* Input flags for hfs_getnewvnode */ + +#define GNV_WANTRSRC 0x01 /* Request the resource fork vnode. */ +#define GNV_SKIPLOCK 0x02 /* Skip taking the cnode lock (when getting resource fork). */ +#define GNV_CREATE 0x04 /* The vnode is for a newly created item. */ +#define GNV_NOCACHE 0x08 /* Delay entering this item in the name cache */ +#define GNV_USE_VP 0x10 /* Use the vnode provided in *vpp instead of creating a new one */ + +/* Output flags for hfs_getnewvnode */ + +#define GNV_CHASH_RENAMED 0x01 /* The cnode was renamed in-flight */ +#define GNV_CAT_DELETED 0x02 /* The cnode was deleted from the catalog */ +#define GNV_NEW_CNODE 0x04 /* We are vending out a newly initialized cnode */ +#define GNV_CAT_ATTRCHANGED 0x08 /* Something in struct cat_attr changed in between cat_lookups */ + + +int hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, cnid_t cnid, struct cat_attr *cattr, int *error); +int hfs_lock(struct cnode *cp, enum hfs_locktype locktype, enum hfs_lockflags flags); +void hfs_unlock(struct cnode *cp); +void hfs_lock_truncate(struct cnode *cp, enum hfs_locktype locktype, enum hfs_lockflags flags); +void hfs_unlock_truncate(struct cnode *cp, enum hfs_lockflags flags); +int hfs_lockpair(struct cnode *cp1, struct cnode *cp2, enum hfs_locktype locktype); +void hfs_unlockpair(struct cnode *cp1, struct cnode *cp2); +int hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4, enum hfs_locktype locktype, struct cnode **error_cnode); +void hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4); +uint32_t hfs_incr_gencount (struct cnode *cp); +void hfs_clear_might_be_dirty_flag(cnode_t *cp); +void hfs_write_dateadded (struct cat_attr *attrp, uint64_t dateadded); +u_int32_t hfs_get_dateadded(struct cnode *cp); +void hfs_touchtimes(struct hfsmount *hfsmp, struct cnode* cp); +void hfs_write_gencount (struct cat_attr *attrp, uint32_t gencount); +int hfs_vnop_reclaim(struct vnode *vp); +#endif /* lf_hfs_cnode_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_common.h b/livefiles_hfs_plugin/lf_hfs_common.h new file mode 100644 index 0000000..2150b09 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_common.h @@ -0,0 +1,149 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_common.h + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 31/12/2017. + */ + +#ifndef lf_hfs_common_h +#define lf_hfs_common_h + +#include +#include +#include +#include +#import +#include +#include +#include + +#define LF_HFS_CHECK_UNMAPPED 0 +#define LF_HFS_QOUTA_SUPPORT 0 +#define LF_HFS_FULL_VNODE_SUPPORT 0 +#define LF_HFS_NATIVE_SEARCHFS_SUPPORT 0 + +#define min MIN +#define max MAX + +/* pseudo-errors returned inside kernel to modify return to process */ +#define ERESTART (-1) /* restart syscall */ +#define EJUSTRETURN (-2) /* don't modify regs, just return */ +#define ERECYCLE (-5) /* restart lookup under heavy vnode pressure/recycling */ +#define EREDRIVEOPEN (-6) +#define EKEEPLOOKING (-7) + +typedef struct +{ + int iFD; // File descriptor as received from usbstoraged + +} FileSystemRecord_s; + +#define VPTOFSRECORD(vp) (vp->sFSParams.vnfs_mp->psHfsmount->hfs_devvp->psFSRecord) + +#define VNODE_TO_IFD(vp) ((vp->bIsMountVnode)? (vp->psFSRecord->iFD) : ((VPTOFSRECORD(vp))->iFD)) + +/* Macros to clear/set/test flags. */ +#define SET(t, f) (t) |= (f) +#define CLR(t, f) (t) &= ~(f) +#define ISSET(t, f) ((t) & (f)) + +#undef ROUND_DOWN +#define ROUND_DOWN(_x, _m) (((_x) / (_m)) * (_m)) + +#undef ROUND_UP +#define ROUND_UP(_x, _m) ROUND_DOWN((_x) + (_m) - 1, (_m)) + +struct HFSUniStr255 { + u_int16_t length; /* number of unicode characters */ + u_int16_t unicode[255]; /* unicode characters */ +} __attribute__((aligned(2), packed)); +typedef struct HFSUniStr255 HFSUniStr255; +typedef const HFSUniStr255 *ConstHFSUniStr255Param; + +struct hfsmount; + +#define B_LOCKED 0x00000010 + +#define RDONLY 0x00000200 /* lookup with read-only semantics */ +#define HASBUF 0x00000400 /* has allocated pathname buffer */ +#define SAVENAME 0x00000800 /* save pathname buffer */ +#define SAVESTART 0x00001000 /* save starting directory */ +#define ISDOTDOT 0x00002000 /* current component name is .. */ +#define MAKEENTRY 0x00004000 /* entry is to be added to name cache */ +#define ISLASTCN 0x00008000 /* this is last component of pathname */ +#define ISSYMLINK 0x00010000 /* symlink needs interpretation */ +#define ISWHITEOUT 0x00020000 /* found whiteout */ +#define DOWHITEOUT 0x00040000 /* do whiteouts */ +#define WILLBEDIR 0x00080000 /* new files will be dirs; allow trailing / */ +#define ISUNICODE 0x00100000 /* current component name is unicode*/ +#define ISOPEN 0x00200000 /* caller is opening; return a real vnode. */ +#define NOCROSSMOUNT 0x00400000 /* do not cross mount points */ +#define NOMACCHECK 0x00800000 /* do not perform MAC checks */ +#define AUDITVNODE1 0x04000000 /* audit the looked up vnode information */ +#define AUDITVNODE2 0x08000000 /* audit the looked up vnode information */ +#define TRAILINGSLASH 0x10000000 /* path ended in a slash */ +#define NOCAPCHECK 0x20000000 /* do not perform capability checks */ +#define PARAMASK 0x3ffffe00 /* mask of parameter descriptors */ + +/* + * component name operations (for VNOP_LOOKUP) + */ +#define LOOKUP 0 /* perform name lookup only */ +#define CREATE 1 /* setup for file creation */ +#define DELETE 2 /* setup for file deletion */ +#define RENAME 3 /* setup for file renaming */ +#define OPMASK 3 /* mask for operation */ + +#define ALL_UVFS_MODES (UVFS_FA_MODE_OTH(UVFS_FA_MODE_RWX) | UVFS_FA_MODE_GRP(UVFS_FA_MODE_RWX) | UVFS_FA_MODE_USR(UVFS_FA_MODE_RWX)) + +#define UF_NODUMP 0x00000001 /* do not dump file */ + +#define VALID_NODE_MAGIC (0xC0BEC0BE) +#define VALID_NODE_BADMAGIC (0xDEADDABA) +#define INVALIDATE_NODE(psNodeRecord) \ + do { \ + if ( psNodeRecord != NULL ) { \ + ((vnode_t)psNodeRecord)->uValidNodeMagic1 = VALID_NODE_BADMAGIC; \ + ((vnode_t)psNodeRecord)->uValidNodeMagic2 = VALID_NODE_BADMAGIC; \ + } \ + } while(0) +#define SET_NODE_AS_VALID(psNodeRecord) \ + do { \ + if ( psNodeRecord != NULL ) { \ + ((vnode_t)psNodeRecord)->uValidNodeMagic1 = VALID_NODE_MAGIC; \ + ((vnode_t)psNodeRecord)->uValidNodeMagic2 = VALID_NODE_MAGIC; \ + } \ + } while(0) +#define VERIFY_NODE_IS_VALID(psNodeRecord) \ + do { \ + if ((psNodeRecord) && \ + ((vnode_t)psNodeRecord)->uValidNodeMagic1 == VALID_NODE_BADMAGIC && \ + ((vnode_t)psNodeRecord)->uValidNodeMagic2 == VALID_NODE_BADMAGIC ) { \ + LFHFS_LOG( LEVEL_ERROR, "[%s] Got stale node", __FUNCTION__ ); \ + return ESTALE; \ + } \ + if ((psNodeRecord == NULL) || \ + ((vnode_t)psNodeRecord)->uValidNodeMagic1 != VALID_NODE_MAGIC || \ + ((vnode_t)psNodeRecord)->uValidNodeMagic2 != VALID_NODE_MAGIC ) { \ + LFHFS_LOG( LEVEL_ERROR, "[%s] Got invalid node", __FUNCTION__ ); \ + return EINVAL; \ + } \ + } while(0) +#define VERIFY_NODE_IS_VALID_FOR_RECLAIM(psNodeRecord) \ + do { \ + if ((psNodeRecord == NULL) || \ + ( ((vnode_t)psNodeRecord)->uValidNodeMagic1 != VALID_NODE_MAGIC && \ + ((vnode_t)psNodeRecord)->uValidNodeMagic1 != VALID_NODE_BADMAGIC ) || \ + ( ((vnode_t)psNodeRecord)->uValidNodeMagic2 != VALID_NODE_MAGIC && \ + ((vnode_t)psNodeRecord)->uValidNodeMagic2 != VALID_NODE_BADMAGIC )) { \ + LFHFS_LOG( LEVEL_ERROR, "Got invalid node for reclaim" ); \ + return EINVAL; \ + } \ + } while(0) + +#define ALL_UVFS_MODES (UVFS_FA_MODE_OTH(UVFS_FA_MODE_RWX) | UVFS_FA_MODE_GRP(UVFS_FA_MODE_RWX) | UVFS_FA_MODE_USR(UVFS_FA_MODE_RWX)) +#define UVFS_READ_ONLY (UVFS_FA_MODE_OTH(UVFS_FA_MODE_R | UVFS_FA_MODE_X) | UVFS_FA_MODE_GRP(UVFS_FA_MODE_R | UVFS_FA_MODE_X) | UVFS_FA_MODE_USR(UVFS_FA_MODE_R | UVFS_FA_MODE_X)) + + +#endif /* lf_hfs_common_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_defs.h b/livefiles_hfs_plugin/lf_hfs_defs.h new file mode 100644 index 0000000..4bfecf5 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_defs.h @@ -0,0 +1,99 @@ +// +// lf_hfs_defs.h +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#ifndef lf_hfs_defs_h +#define lf_hfs_defs_h + +#include +#include "lf_hfs_vnode.h" + +typedef const unsigned char * ConstUTF8Param; +typedef struct vnode* FileReference; +typedef const UniChar * ConstUniCharArrayPtr; + +enum { + dskFulErr = -34, /*disk full*/ + bdNamErr = -37, /*there may be no bad names in the final system!*/ + paramErr = -50, /*error in user parameter list*/ + memFullErr = -108, /*Not enough room in heap zone*/ + fileBoundsErr = -1309, /*file's EOF, offset, mark or size is too big*/ + kTECUsedFallbacksStatus = -8783, + +}; + +enum { + fsRtParID = 1, + fsRtDirID = 2 +}; + +enum { + /* Mac OS encodings*/ + kTextEncodingMacRoman = 0L, + kTextEncodingMacJapanese = 1, + kTextEncodingMacChineseTrad = 2, + kTextEncodingMacKorean = 3, + kTextEncodingMacArabic = 4, + kTextEncodingMacHebrew = 5, + kTextEncodingMacGreek = 6, + kTextEncodingMacCyrillic = 7, + kTextEncodingMacDevanagari = 9, + kTextEncodingMacGurmukhi = 10, + kTextEncodingMacGujarati = 11, + kTextEncodingMacOriya = 12, + kTextEncodingMacBengali = 13, + kTextEncodingMacTamil = 14, + kTextEncodingMacTelugu = 15, + kTextEncodingMacKannada = 16, + kTextEncodingMacMalayalam = 17, + kTextEncodingMacSinhalese = 18, + kTextEncodingMacBurmese = 19, + kTextEncodingMacKhmer = 20, + kTextEncodingMacThai = 21, + kTextEncodingMacLaotian = 22, + kTextEncodingMacGeorgian = 23, + kTextEncodingMacArmenian = 24, + kTextEncodingMacChineseSimp = 25, + kTextEncodingMacTibetan = 26, + kTextEncodingMacMongolian = 27, + kTextEncodingMacEthiopic = 28, + kTextEncodingMacCentralEurRoman = 29, + kTextEncodingMacVietnamese = 30, + kTextEncodingMacExtArabic = 31, /* The following use script code 0, smRoman*/ + kTextEncodingMacSymbol = 33, + kTextEncodingMacDingbats = 34, + kTextEncodingMacTurkish = 35, + kTextEncodingMacCroatian = 36, + kTextEncodingMacIcelandic = 37, + kTextEncodingMacRomanian = 38, + kTextEncodingMacUnicode = 0x7E, + + kTextEncodingMacFarsi = 0x8C, /* Like MacArabic but uses Farsi digits */ /* The following use script code 7, smCyrillic */ + kTextEncodingMacUkrainian = 0x98, /* The following use script code 32, smUnimplemented */ +}; + +#if DEBUG +void RequireFileLock(FileReference vp, int shareable); +#define REQUIRE_FILE_LOCK(vp,s) RequireFileLock((vp),(s)) +#else +#define REQUIRE_FILE_LOCK(vp,s) +#endif + +#define BlockMoveData(src, dest, len) bcopy((src), (dest), (len)) + +#define ClearMemory(start, length) bzero((start), (size_t)(length)); + +enum { + /* Finder Flags */ + kHasBeenInited = 0x0100, + kHasCustomIcon = 0x0400, + kIsStationery = 0x0800, + kNameLocked = 0x1000, + kHasBundle = 0x2000, + kIsInvisible = 0x4000, + kIsAlias = 0x8000 +}; +#endif /* lf_hfs_defs_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_dirops_handler.c b/livefiles_hfs_plugin/lf_hfs_dirops_handler.c new file mode 100644 index 0000000..befe576 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_dirops_handler.c @@ -0,0 +1,435 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_fileops_handler.c + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 31/12/2017. + */ + +#include "lf_hfs_dirops_handler.h" +#include "lf_hfs_fileops_handler.h" +#include "lf_hfs_vnode.h" +#include "lf_hfs_lookup.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_vnops.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_attrlist.h" +#include "lf_hfs_vfsops.h" + +//---------------------------------- Functions Decleration --------------------------------------- +static int DIROPS_VerifyCookieAndVerifier(uint64_t uCookie, vnode_t psParentVnode, uint64_t uVerifier); +//---------------------------------- Functions Implementation ------------------------------------ + +static int +DIROPS_VerifyCookieAndVerifier(uint64_t uCookie, vnode_t psParentVnode, uint64_t uVerifier) +{ + int iError = 0; + struct cnode* dcp = VTOC(psParentVnode); + + if ( uCookie == 0 ) + { + if ( uVerifier != UVFS_DIRCOOKIE_VERIFIER_INITIAL ) + { + iError = UVFS_READDIR_VERIFIER_MISMATCHED; + goto exit; + } + } + else if (uCookie == UVFS_DIRCOOKIE_EOF) + { + iError = UVFS_READDIR_EOF_REACHED; + goto exit; + } + else if ( uVerifier != psParentVnode->sExtraData.sDirData.uDirVersion ) + { + iError = UVFS_READDIR_VERIFIER_MISMATCHED; + goto exit; + } + + cnid_t uChildIndex = (cnid_t)(uCookie & HFS_INDEX_MASK); + if (uChildIndex > (dcp->c_entries + 2)) + { /* searching pass the last item */ + iError = UVFS_READDIR_BAD_COOKIE; + } + +exit: + return iError; +} + +int DIROPS_RemoveInternal( UVFSFileNode psDirNode, const char *pcUTF8Name ) +{ + int iErr = 0; + vnode_t psParentVnode = (vnode_t)psDirNode; + UVFSFileNode psFileNode = {0}; + + if (!vnode_isdir(psParentVnode)) + { + return ENOTDIR; + } + + iErr = DIROPS_LookupInternal( psDirNode, pcUTF8Name, &psFileNode ); + if ( iErr != 0 ) + { + goto exit; + } + vnode_t psVnode = (vnode_t)psFileNode; + + if (vnode_isdir(psVnode)) + { + return EISDIR; + } + + struct componentname sCompName = {0}; + sCompName.cn_nameiop = DELETE; + sCompName.cn_flags = ISLASTCN; + sCompName.cn_pnbuf = (char *)pcUTF8Name; + sCompName.cn_pnlen = (int)strlen(pcUTF8Name); + sCompName.cn_nameptr = (char *)pcUTF8Name; + sCompName.cn_namelen = (int)strlen(pcUTF8Name); + sCompName.cn_hash = 0; + sCompName.cn_consume = (int)strlen(pcUTF8Name); + + iErr = hfs_vnop_remove(psParentVnode,psVnode, &sCompName, VNODE_REMOVE_NODELETEBUSY | VNODE_REMOVE_SKIP_NAMESPACE_EVENT ); + + LFHFS_Reclaim(psFileNode); + +exit: + return iErr; +} + +int DIROPS_LookupInternal( UVFSFileNode psDirNode, const char *pcUTF8Name, UVFSFileNode *ppsOutNode ) +{ + int iErr = 0; + // We are not supporting "." and ".." lookup. + if ( (strcmp( (char*)pcUTF8Name, "." ) == 0) || (strcmp( (char*)pcUTF8Name, ".." ) == 0) ) + { + *ppsOutNode = NULL; + iErr = EPERM; + goto exit; + } + + vnode_t psVnode = (vnode_t)psDirNode; + + if (!vnode_isdir(psVnode)) + { + iErr = ENOTDIR; + goto exit; + } + + struct componentname sCompName = {0}; + sCompName.cn_nameiop = LOOKUP; + sCompName.cn_flags = ISLASTCN; + sCompName.cn_pnbuf = (char *)pcUTF8Name; + sCompName.cn_pnlen = (int)strlen(pcUTF8Name); + sCompName.cn_nameptr = (char *)pcUTF8Name; + sCompName.cn_namelen = (int)strlen(pcUTF8Name); + sCompName.cn_hash = 0; + sCompName.cn_consume = (int)strlen(pcUTF8Name); + + iErr = hfs_vnop_lookup( psVnode, (vnode_t*)ppsOutNode, &sCompName ); + +exit: + return iErr; +} + +int +LFHFS_MkDir ( UVFSFileNode psDirNode, const char *pcName, const UVFSFileAttributes *psFileAttr, UVFSFileNode *ppsOutNode ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_MkDir\n"); + VERIFY_NODE_IS_VALID(psDirNode); + + int iError = 0; + vnode_t psParentVnode = (vnode_t)psDirNode; + + if (!vnode_isdir(psParentVnode)) + { + iError = ENOTDIR; + goto exit; + } + + //@param cnp Name information for new directory. + struct componentname sNewDirComponentName = {0}; + sNewDirComponentName.cn_nameptr = (char*) pcName; + sNewDirComponentName.cn_namelen = (int) strlen(pcName); + + iError = hfs_vnop_mkdir(psParentVnode,(vnode_t*)ppsOutNode, &sNewDirComponentName, (UVFSFileAttributes *) psFileAttr); + +exit: + return iError; +} + +int +LFHFS_RmDir ( UVFSFileNode psDirNode, const char *pcUTF8Name ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_RmDir\n"); + VERIFY_NODE_IS_VALID(psDirNode); + + int iErr = 0; + vnode_t psParentVnode = (vnode_t)psDirNode; + + if (!vnode_isdir(psParentVnode)) + { + iErr = ENOTDIR; + goto exit; + } + + UVFSFileNode psFileNode = {0}; + struct componentname sCompName = {0}; + + iErr = DIROPS_LookupInternal( psDirNode, pcUTF8Name, &psFileNode ); + if ( iErr != 0 ) + { + goto exit; + } + + vnode_t psVnode = (vnode_t)psFileNode; + + sCompName.cn_nameiop = DELETE; + sCompName.cn_flags = ISLASTCN; + sCompName.cn_pnbuf = (char *)pcUTF8Name; + sCompName.cn_pnlen = (int)strlen(pcUTF8Name); + sCompName.cn_nameptr = (char *)pcUTF8Name; + sCompName.cn_namelen = (int)strlen(pcUTF8Name); + sCompName.cn_hash = 0; + sCompName.cn_consume = (int)strlen(pcUTF8Name); + + iErr = hfs_vnop_rmdir(psParentVnode, psVnode, &sCompName); + + hfs_vnop_reclaim(psVnode); + +exit: + return iErr; +} + +int +LFHFS_Remove ( UVFSFileNode psDirNode, const char *pcUTF8Name, __unused UVFSFileNode victimNode) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Remove\n"); + VERIFY_NODE_IS_VALID(psDirNode); + + int iErr = DIROPS_RemoveInternal( psDirNode, pcUTF8Name ); + return iErr; +} + +int +LFHFS_Lookup ( UVFSFileNode psDirNode, const char *pcUTF8Name, UVFSFileNode *ppsOutNode ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Lookup\n"); + VERIFY_NODE_IS_VALID(psDirNode); + + return DIROPS_LookupInternal( psDirNode, pcUTF8Name, ppsOutNode ); +} + +int +LFHFS_ReadDir ( UVFSFileNode psDirNode, void* pvBuf, size_t iBufLen, uint64_t uCookie, size_t *iReadBytes, uint64_t *puVerifier ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_ReadDir\n"); + VERIFY_NODE_IS_VALID(psDirNode); + + int iError = 0; + *iReadBytes = 0; + struct vnode* psParentVnode = (struct vnode*) psDirNode; + + if (iReadBytes == NULL || puVerifier == NULL) + { + return EINVAL; + } + *iReadBytes = 0; + + // Make sure the UVFSFileNode is a directory. + if ( !IS_DIR(psParentVnode) ) + { + LFHFS_LOG(LEVEL_ERROR, "HFS_ReadDir node is not a directory.\n", ENOTDIR); + return ENOTDIR; + } + + // Make sure there is a place for at least one entry with maximal allowed name + uint64_t uMaxRecLen = UVFS_DIRENTRY_RECLEN(MAX_UTF8_NAME_LENGTH); + if ( iBufLen < uMaxRecLen ) + { + return EINVAL; + } + + iError = DIROPS_VerifyCookieAndVerifier(uCookie,psParentVnode, *puVerifier); + if ( iError != 0 ) + { + goto exit; + } + + + *puVerifier = psParentVnode->sExtraData.sDirData.uDirVersion; + + //Setting readDir Args + int iEofflag; + int iNumdirent; + int flags = VNODE_READDIR_EXTENDED|VNODE_READDIR_REQSEEKOFF; + ReadDirBuff_s sReadDirBuffer = {0}; + sReadDirBuffer.pvBuffer = pvBuf; + sReadDirBuffer.uBufferResid = sReadDirBuffer.uBufferSize = iBufLen; + + iError = hfs_vnop_readdir( psParentVnode, &iEofflag, &iNumdirent, &sReadDirBuffer, uCookie, flags); + + if (iError) + goto exit; + + if (iNumdirent == 0) + { + if (iEofflag) + { + iError = UVFS_READDIR_EOF_REACHED; + } + else + { + iError = EINVAL; + } + } + + *iReadBytes = sReadDirBuffer.uBufferSize - sReadDirBuffer.uBufferResid; +exit: + return iError; +} + +int +LFHFS_ReadDirAttr( UVFSFileNode psDirNode, void *pvBuf, size_t iBufLen, uint64_t uCookie, size_t *iReadBytes, uint64_t *puVerifier ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_ReadDirAttr\n"); + VERIFY_NODE_IS_VALID(psDirNode); + + int iError = 0; + *iReadBytes = 0; + struct vnode* psParentVnode = (struct vnode*) psDirNode; + + if (iReadBytes == NULL || puVerifier == NULL) + { + return EINVAL; + } + *iReadBytes = 0; + + // Make sure the UVFSFileNode is a directory. + if ( !IS_DIR(psParentVnode) ) + { + LFHFS_LOG(LEVEL_ERROR, "HFS_ReadDir node is not a directory.\n", ENOTDIR); + return ENOTDIR; + } + + // Make sure there is a place for at least one entry with maximal allowed name + uint64_t uMaxRecLen = UVFS_DIRENTRY_RECLEN(MAX_UTF8_NAME_LENGTH); + if ( iBufLen < uMaxRecLen ) + { + return EINVAL; + } + + iError = DIROPS_VerifyCookieAndVerifier(uCookie, psParentVnode, *puVerifier); + if ( iError != 0 ) + { + goto exit; + } + + *puVerifier = psParentVnode->sExtraData.sDirData.uDirVersion; + + + //Setting readDirAttr Args + int iEofflag; + int iNumdirent; + ReadDirBuff_s sReadDirBuffer = {0}; + sReadDirBuffer.pvBuffer = pvBuf; + sReadDirBuffer.uBufferResid = sReadDirBuffer.uBufferSize = iBufLen; + + iError = hfs_vnop_readdirattr( psParentVnode, &iEofflag, &iNumdirent, &sReadDirBuffer, uCookie); + + if (iError) + goto exit; + + if (iNumdirent == 0) + { + if (iEofflag) + { + iError = UVFS_READDIR_EOF_REACHED; + } + else + { + iError = EINVAL; + } + } + + *iReadBytes = sReadDirBuffer.uBufferSize - sReadDirBuffer.uBufferResid; + +exit: + return iError; +} + +int +LFHFS_ScanDir(UVFSFileNode psDirNode, scandir_matching_request_t* psMatchingCriteria, scandir_matching_reply_t* psMatchingResult) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_ScanDir\n"); + VERIFY_NODE_IS_VALID(psDirNode); + + int iErr = 0; + struct vnode* psParentVnode = (struct vnode*) psDirNode; + // Make sure the UVFSFileNode is a directory. + if ( !IS_DIR(psParentVnode) ) + { + LFHFS_LOG(LEVEL_ERROR, "LFHFS_ScanDir node is not a directory.\n", ENOTDIR); + return ENOTDIR; + } + + iErr = DIROPS_VerifyCookieAndVerifier(psMatchingCriteria->smr_start_cookie, psParentVnode, psMatchingCriteria->smr_verifier); + if ( iErr != 0 ) + { + goto exit; + } + + psMatchingResult->smr_result_type = 0; + psMatchingResult->smr_verifier = psParentVnode->sExtraData.sDirData.uDirVersion; + ScanDirRequest_s psScanDirRequest = {.psMatchingCriteria = psMatchingCriteria, .psMatchingResult = psMatchingResult}; + + iErr = hfs_scandir( psParentVnode, &psScanDirRequest); + +exit: + return iErr; +} + +int LFHFS_ScanIDs(UVFSFileNode psNode, + __unused uint64_t uRequestedAttributes, + const uint64_t* puFileIDArray, + unsigned int iFileIDCount, + scanids_match_block_t fMatchCallback) +{ + int error = 0; + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_ScanIDs\n"); + VERIFY_NODE_IS_VALID(psNode); + struct vnode* psVnode = (struct vnode*) psNode; + + char* pcName = malloc(sizeof(char)*MAX_UTF8_NAME_LENGTH); + if (pcName == NULL) + return ENOMEM; + + for (uint32_t uIDCounter = 0; uIDCounter < iFileIDCount; uIDCounter++) + { + memset(pcName,0,sizeof(char)*MAX_UTF8_NAME_LENGTH); + //if we got to the rootParentID just continue + if ((cnid_t)puFileIDArray[uIDCounter] == kHFSRootParentID) + continue; + + UVFSFileAttributes sFileAttrs; + error = hfs_GetInfoByID(VTOHFS(psVnode), (cnid_t)puFileIDArray[uIDCounter], &sFileAttrs, pcName); + if (error == ENOENT) { + error = 0; + continue; + } + + if (!error) { + if ((cnid_t)puFileIDArray[uIDCounter] == kHFSRootFolderID) { + sFileAttrs.fa_parentid = sFileAttrs.fa_fileid; + } + LFHFS_LOG(LEVEL_DEBUG, "scan found item %llu parent %llu", + sFileAttrs.fa_parentid, sFileAttrs.fa_fileid); + fMatchCallback((int) uIDCounter, &sFileAttrs, pcName); + } else { + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_ScanIDs: hfs_GetInfoByID failed with error %u\n",error); + break; + } + } + + free(pcName); + return error; +} diff --git a/livefiles_hfs_plugin/lf_hfs_dirops_handler.h b/livefiles_hfs_plugin/lf_hfs_dirops_handler.h new file mode 100644 index 0000000..f346cbb --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_dirops_handler.h @@ -0,0 +1,28 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_dirops_handler.h + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 31/12/2017. +*/ + +#ifndef lf_hfs_dirpos_handler_h +#define lf_hfs_dirpos_handler_h + +#include "lf_hfs_common.h" +#include "lf_hfs_catalog.h" + +#define MAX_UTF8_NAME_LENGTH (NAME_MAX*3+1) + +int LFHFS_MkDir ( UVFSFileNode psDirNode, const char *pcName, const UVFSFileAttributes *psFileAttr, UVFSFileNode *ppsOutNode ); +int LFHFS_RmDir ( UVFSFileNode psDirNode, const char *pcUTF8Name ); +int LFHFS_Remove ( UVFSFileNode psDirNode, const char *pcUTF8Name, UVFSFileNode victimNode); +int LFHFS_Lookup ( UVFSFileNode psDirNode, const char *pcUTF8Name, UVFSFileNode *ppsOutNode ); +int LFHFS_ReadDir ( UVFSFileNode psDirNode, void* pvBuf, size_t iBufLen, uint64_t uCookie, size_t *iReadBytes, uint64_t *puVerifier ); +int LFHFS_ReadDirAttr ( UVFSFileNode psDirNode, void *pvBuf, size_t iBufLen, uint64_t uCookie, size_t *iReadBytes, uint64_t *puVerifier ); +int LFHFS_ScanDir ( UVFSFileNode psDirNode, scandir_matching_request_t* psMatchingCriteria, scandir_matching_reply_t* psMatchingResult ); +int LFHFS_ScanIDs ( UVFSFileNode psNode, __unused uint64_t uRequestedAttributes, const uint64_t* puFileIDArray, unsigned int iFileIDCount, scanids_match_block_t fMatchCallback); + +int DIROPS_RemoveInternal( UVFSFileNode psDirNode, const char *pcUTF8Name ); +int DIROPS_LookupInternal( UVFSFileNode psDirNode, const char *pcUTF8Name, UVFSFileNode *ppsOutNode ); +#endif /* lf_hfs_dirpos_handler_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_endian.c b/livefiles_hfs_plugin/lf_hfs_endian.c new file mode 100644 index 0000000..9e4a7d0 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_endian.c @@ -0,0 +1,872 @@ +// +// lf_hfs_endian.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include + + +#include "lf_hfs_endian.h" +#include "lf_hfs_btrees_private.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_generic_buf.h" + +#define DEBUG_BTNODE_SWAP 0 + +/* + * Internal swapping routines + * + * These routines handle swapping the records of leaf and index nodes. The + * layout of the keys and records varies depending on the kind of B-tree + * (determined by fileID). + * + * The direction parameter must be kSwapBTNodeBigToHost or kSwapBTNodeHostToBig. + * The kSwapBTNodeHeaderRecordOnly "direction" is not valid for these routines. + */ +int hfs_swap_HFSPlusBTInternalNode (BlockDescriptor *src, HFSCatalogNodeID fileID, enum HFSBTSwapDirection direction); +void hfs_swap_HFSPlusForkData (HFSPlusForkData *src); + +/* + * hfs_swap_HFSPlusForkData + */ +void +hfs_swap_HFSPlusForkData ( + HFSPlusForkData *src + ) +{ + int i; + + src->logicalSize = SWAP_BE64 (src->logicalSize); + + src->clumpSize = SWAP_BE32 (src->clumpSize); + src->totalBlocks = SWAP_BE32 (src->totalBlocks); + + for (i = 0; i < kHFSPlusExtentDensity; i++) { + src->extents[i].startBlock = SWAP_BE32 (src->extents[i].startBlock); + src->extents[i].blockCount = SWAP_BE32 (src->extents[i].blockCount); + } +} + +/* + * hfs_swap_BTNode + * + * NOTE: This operation is not naturally symmetric. + * We have to determine which way we're swapping things. + */ +int +hfs_swap_BTNode ( + BlockDescriptor *src, + vnode_t vp, + enum HFSBTSwapDirection direction, + u_int8_t allow_empty_node + ) +{ + + GenericLFBuf *psBuf = src->blockHeader; + lf_hfs_generic_buf_lock(psBuf); + + switch(direction) { + case kSwapBTNodeBigToHost: + lf_hfs_generic_buf_set_cache_flag(psBuf, GEN_BUF_LITTLE_ENDIAN); + break; + case kSwapBTNodeHostToBig: + lf_hfs_generic_buf_clear_cache_flag(psBuf, GEN_BUF_LITTLE_ENDIAN); + break; + case kSwapBTNodeHeaderRecordOnly: + break; + default: + panic("invalid direction"); + } + + + BTNodeDescriptor *srcDesc = src->buffer; + u_int16_t *srcOffs = NULL; + BTreeControlBlockPtr btcb = (BTreeControlBlockPtr)VTOF(vp)->fcbBTCBPtr; + u_int16_t i; /* index to match srcDesc->numRecords */ + int error = 0; + + #if DEBUG_BTNODE_SWAP + printf("hfs_swap_BTNode: direction %u (%s), psVnode %p, blockNum %llu uPhyCluster %llu\n", direction, (direction==0)?"RD":(direction==1)?"WR":"NA", vp, src->blockNum, psBuf->uPhyCluster); + uint32_t *pData = src->buffer; + printf("hfs_swap_BTNode: %p before: 0x%x, 0x%x, 0x%x, 0x%x\n", pData, pData[0], pData[1], pData[2], pData[3]); + #endif + +#ifdef ENDIAN_DEBUG + if (direction == kSwapBTNodeBigToHost) { + LFHFS_LOG(LEVEL_DEBUG, "hfs: BE -> Native Swap\n"); + } else if (direction == kSwapBTNodeHostToBig) { + LFHFS_LOG(LEVEL_DEBUG, "hfs: Native -> BE Swap\n"); + } else if (direction == kSwapBTNodeHeaderRecordOnly) { + LFHFS_LOG(LEVEL_DEBUG, "hfs: Not swapping descriptors\n"); + } else { + LFHFS_LOG(LEVEL_ERROR, "hfs_swap_BTNode: This is impossible"); + hfs_assert(0); + } +#endif + + /* + * If we are doing a swap from on-disk to in-memory, then swap the node + * descriptor and record offsets before we need to use them. + */ + if (direction == kSwapBTNodeBigToHost) { + srcDesc->fLink = SWAP_BE32 (srcDesc->fLink); + srcDesc->bLink = SWAP_BE32 (srcDesc->bLink); + + /* + * When first opening a BTree, we have to read the header node before the + * control block is initialized. In this case, totalNodes will be zero, + * so skip the bounds checking. Also, we should ignore the header node when + * checking for invalid forwards and backwards links, since the header node's + * links can point back to itself legitimately. + */ + if (btcb->totalNodes != 0) { + if (srcDesc->fLink >= btcb->totalNodes) { + LFHFS_LOG( LEVEL_ERROR, "hfs_swap_BTNode: invalid forward link (0x%08x >= 0x%08x)\n", srcDesc->fLink, btcb->totalNodes); + error = fsBTInvalidHeaderErr; + goto fail; + } + if (srcDesc->bLink >= btcb->totalNodes) { + LFHFS_LOG( LEVEL_ERROR, "hfs_swap_BTNode: invalid backward link (0x%08x >= 0x%08x)\n", srcDesc->bLink, btcb->totalNodes); + error = fsBTInvalidHeaderErr; + goto fail; + } + + if ((src->blockNum != 0) && (srcDesc->fLink == (u_int32_t) src->blockNum)) { + LFHFS_LOG( LEVEL_ERROR, "hfs_swap_BTNode: invalid forward link (0x%08x == 0x%08x)\n", srcDesc->fLink, (u_int32_t) src->blockNum); + error = fsBTInvalidHeaderErr; + goto fail; + } + if ((src->blockNum != 0) && (srcDesc->bLink == (u_int32_t) src->blockNum)) { + LFHFS_LOG( LEVEL_ERROR, "hfs_swap_BTNode: invalid backward link (0x%08x == 0x%08x)\n", srcDesc->bLink, (u_int32_t) src->blockNum); + error = fsBTInvalidHeaderErr; + goto fail; + } + + + } + + /* + * Check srcDesc->kind. Don't swap it because it's only one byte. + */ + if (srcDesc->kind < kBTLeafNode || srcDesc->kind > kBTMapNode) { + LFHFS_LOG(LEVEL_ERROR , "hfs_swap_BTNode: invalid node kind (%d)\n", srcDesc->kind); + error = fsBTInvalidHeaderErr; + goto fail; + } + + /* + * Check srcDesc->height. Don't swap it because it's only one byte. + */ + if (srcDesc->height > kMaxTreeDepth) { + LFHFS_LOG(LEVEL_ERROR , "hfs_swap_BTNode: invalid node height (%d)\n", srcDesc->height); + error = fsBTInvalidHeaderErr; + goto fail; + } + + /* Don't swap srcDesc->reserved */ + + srcDesc->numRecords = SWAP_BE16 (srcDesc->numRecords); + + /* + * Swap the node offsets (including the free space one!). + */ + srcOffs = (u_int16_t *)((char *)src->buffer + (src->blockSize - ((srcDesc->numRecords + 1) * sizeof (u_int16_t)))); + + /* + * Sanity check that the record offsets are within the node itself. + */ + if ((char *)srcOffs > ((char *)src->buffer + src->blockSize) || + (char *)srcOffs < ((char *)src->buffer + sizeof(BTNodeDescriptor))) { + LFHFS_LOG(LEVEL_ERROR , "hfs_swap_BTNode: invalid record count (0x%04X)\n", srcDesc->numRecords); + error = fsBTInvalidHeaderErr; + goto fail; + } + + /* + * Swap and sanity check each of the record offsets. + */ + for (i = 0; i <= srcDesc->numRecords; i++) { + srcOffs[i] = SWAP_BE16 (srcOffs[i]); + + /* + * Sanity check: must be even, and within the node itself. + * + * We may be called to swap an unused node, which contains all zeroes. + * Unused nodes are expected only when allow_empty_node is true. + * If it is false and record offset is zero, return error. + */ + if ((srcOffs[i] & 1) || ( + (allow_empty_node == false) && (srcOffs[i] == 0)) || + (srcOffs[i] < sizeof(BTNodeDescriptor) && srcOffs[i] != 0) || + (srcOffs[i] > (src->blockSize - 2 * (srcDesc->numRecords + 1)))) { + LFHFS_LOG(LEVEL_ERROR , "hfs_swap_BTNode: offset #%d invalid (0x%04X) (blockSize 0x%x numRecords %d)\n", + i, srcOffs[i], (int32_t)src->blockSize, srcDesc->numRecords); + error = fsBTInvalidHeaderErr; + goto fail; + } + + /* + * Make sure the offsets are strictly increasing. Note that we're looping over + * them backwards, hence the order in the comparison. + */ + if ((i != 0) && (srcOffs[i] >= srcOffs[i-1])) { + LFHFS_LOG(LEVEL_ERROR , "hfs_swap_BTNode: offsets %d and %d out of order (0x%04X, 0x%04X)\n", + i, i-1, srcOffs[i], srcOffs[i-1]); + + error = fsBTInvalidHeaderErr; + goto fail; + } + } + } + + /* + * Swap the records (ordered by frequency of access) + */ + if ((srcDesc->kind == kBTIndexNode) || + (srcDesc->kind == kBTLeafNode)) { + + if (VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) { + error = hfs_swap_HFSPlusBTInternalNode (src, VTOC(vp)->c_fileid, direction); + } + + if (error) goto fail; + + } else if (srcDesc-> kind == kBTMapNode) { + /* Don't swap the bitmaps, they'll be done in the bitmap routines */ + + } else if (srcDesc-> kind == kBTHeaderNode) { + /* The header's offset is hard-wired because we cannot trust the offset pointers. */ + BTHeaderRec *srcHead = (BTHeaderRec *)((char *)src->buffer + sizeof(BTNodeDescriptor)); + + srcHead->treeDepth = SWAP_BE16 (srcHead->treeDepth); + + srcHead->rootNode = SWAP_BE32 (srcHead->rootNode); + srcHead->leafRecords = SWAP_BE32 (srcHead->leafRecords); + srcHead->firstLeafNode = SWAP_BE32 (srcHead->firstLeafNode); + srcHead->lastLeafNode = SWAP_BE32 (srcHead->lastLeafNode); + + srcHead->nodeSize = SWAP_BE16 (srcHead->nodeSize); + srcHead->maxKeyLength = SWAP_BE16 (srcHead->maxKeyLength); + + srcHead->totalNodes = SWAP_BE32 (srcHead->totalNodes); + srcHead->freeNodes = SWAP_BE32 (srcHead->freeNodes); + + srcHead->clumpSize = SWAP_BE32 (srcHead->clumpSize); + srcHead->attributes = SWAP_BE32 (srcHead->attributes); + + /* Don't swap srcHead->reserved1 */ + /* Don't swap srcHead->btreeType; it's only one byte */ + /* Don't swap srcHead->reserved2 */ + /* Don't swap srcHead->reserved3 */ + /* Don't swap bitmap */ + } + + /* + * If we are doing a swap from in-memory to on-disk, then swap the node + * descriptor and record offsets after we're done using them. + */ + if (direction == kSwapBTNodeHostToBig) { + /* + * Sanity check and swap the forward and backward links. + * Ignore the header node since its forward and backwards links can legitimately + * point to itself. + */ + if (srcDesc->fLink >= btcb->totalNodes) { + LFHFS_LOG(LEVEL_ERROR, "hfs_UNswap_BTNode: invalid forward link (0x%08X)\n", srcDesc->fLink); + error = fsBTInvalidHeaderErr; + goto fail; + } + if ((src->blockNum != 0) && (srcDesc->fLink == (u_int32_t) src->blockNum)) { + LFHFS_LOG(LEVEL_ERROR, "hfs_UNswap_BTNode: invalid forward link (0x%08x == 0x%08x)\n", + srcDesc->fLink, (u_int32_t) src->blockNum); + error = fsBTInvalidHeaderErr; + goto fail; + } + + if (srcDesc->bLink >= btcb->totalNodes) { + LFHFS_LOG(LEVEL_ERROR, "hfs_UNswap_BTNode: invalid backward link (0x%08X)\n", srcDesc->bLink); + error = fsBTInvalidHeaderErr; + goto fail; + } + if ((src->blockNum != 0) && (srcDesc->bLink == (u_int32_t) src->blockNum)) { + LFHFS_LOG(LEVEL_ERROR, "hfs_UNswap_BTNode: invalid backward link (0x%08x == 0x%08x)\n", + srcDesc->bLink, (u_int32_t) src->blockNum); + error = fsBTInvalidHeaderErr; + goto fail; + } + + + srcDesc->fLink = SWAP_BE32 (srcDesc->fLink); + srcDesc->bLink = SWAP_BE32 (srcDesc->bLink); + + /* + * Check srcDesc->kind. Don't swap it because it's only one byte. + */ + if (srcDesc->kind < kBTLeafNode || srcDesc->kind > kBTMapNode) { + LFHFS_LOG(LEVEL_ERROR, "hfs_UNswap_BTNode: invalid node kind (%d)\n", srcDesc->kind); + error = fsBTInvalidHeaderErr; + goto fail; + } + + /* + * Check srcDesc->height. Don't swap it because it's only one byte. + */ + if (srcDesc->height > kMaxTreeDepth) { + LFHFS_LOG(LEVEL_ERROR, "hfs_UNswap_BTNode: invalid node height (%d)\n", srcDesc->height); + error = fsBTInvalidHeaderErr; + goto fail; + } + + /* Don't swap srcDesc->reserved */ + + /* + * Swap the node offsets (including the free space one!). + */ + srcOffs = (u_int16_t *)((char *)src->buffer + (src->blockSize - ((srcDesc->numRecords + 1) * sizeof (u_int16_t)))); + + /* + * Sanity check that the record offsets are within the node itself. + */ + if ((char *)srcOffs > ((char *)src->buffer + src->blockSize) || + (char *)srcOffs < ((char *)src->buffer + sizeof(BTNodeDescriptor))) { + LFHFS_LOG(LEVEL_ERROR, "hfs_UNswap_BTNode: invalid record count (0x%04X)\n", srcDesc->numRecords); + error = fsBTInvalidHeaderErr; + goto fail; + } + + /* + * Swap and sanity check each of the record offsets. + */ + for (i = 0; i <= srcDesc->numRecords; i++) { + /* + * Sanity check: must be even, and within the node itself. + * + * We may be called to swap an unused node, which contains all zeroes. + * This can happen when the last record from a node gets deleted. + * This is why we allow the record offset to be zero. + * Unused nodes are expected only when allow_empty_node is true + * (the caller should set it to true for kSwapBTNodeBigToHost). + */ + if ((srcOffs[i] & 1) || + ((allow_empty_node == false) && (srcOffs[i] == 0)) || + (srcOffs[i] < sizeof(BTNodeDescriptor) && srcOffs[i] != 0) || + (srcOffs[i] > (src->blockSize - 2 * (srcDesc->numRecords + 1)))) { + LFHFS_LOG(LEVEL_ERROR, "hfs_UNswap_BTNode: offset #%d invalid (0x%04X) (blockSize 0x%lx numRecords %d)\n", + i, srcOffs[i], src->blockSize, srcDesc->numRecords); + error = fsBTInvalidHeaderErr; + goto fail; + } + + /* + * Make sure the offsets are strictly increasing. Note that we're looping over + * them backwards, hence the order in the comparison. + */ + if ((i < srcDesc->numRecords) && (srcOffs[i+1] >= srcOffs[i])) { + LFHFS_LOG(LEVEL_ERROR, "hfs_UNswap_BTNode: offsets %d and %d out of order (0x%04X, 0x%04X)\n", + i+1, i, srcOffs[i+1], srcOffs[i]); + error = fsBTInvalidHeaderErr; + goto fail; + } + + srcOffs[i] = SWAP_BE16 (srcOffs[i]); + } + + srcDesc->numRecords = SWAP_BE16 (srcDesc->numRecords); + } + +fail: + lf_hfs_generic_buf_unlock(psBuf); + if (error) { + /* + * Log some useful information about where the corrupt node is. + */ + LFHFS_LOG( LEVEL_ERROR, "lf_hfs: node=%lld fileID=%u volume=%s\n", src->blockNum, VTOC(vp)->c_fileid, VTOVCB(vp)->vcbVN); + hfs_mark_inconsistent(VTOVCB(vp), HFS_INCONSISTENCY_DETECTED); + } + #if DEBUG_BTNODE_SWAP + printf("hfs_swap_BTNode: after: 0x%x, 0x%x, 0x%x, 0x%x\n", pData[0], pData[1], pData[2], pData[3]); + #endif + + return (error); +} + +int +hfs_swap_HFSPlusBTInternalNode ( + BlockDescriptor *src, + HFSCatalogNodeID fileID, + enum HFSBTSwapDirection direction + ) +{ + BTNodeDescriptor *srcDesc = src->buffer; + u_int16_t *srcOffs = (u_int16_t *)((char *)src->buffer + (src->blockSize - (srcDesc->numRecords * sizeof (u_int16_t)))); + char *nextRecord; /* Points to start of record following current one */ + + /* + * i is an int32 because it needs to be negative to index the offset to free space. + * srcDesc->numRecords is a u_int16_t and is unlikely to become 32-bit so this should be ok. + */ + + int32_t i; + u_int32_t j; + + if (fileID == kHFSExtentsFileID) { + HFSPlusExtentKey *srcKey; + HFSPlusExtentDescriptor *srcRec; + size_t recordSize; /* Size of the data part of the record, or node number for index nodes */ + + if (srcDesc->kind == kBTIndexNode) + recordSize = sizeof(u_int32_t); + else + recordSize = sizeof(HFSPlusExtentDescriptor); + + for (i = 0; i < srcDesc->numRecords; i++) { + /* Point to the start of the record we're currently checking. */ + srcKey = (HFSPlusExtentKey *)((char *)src->buffer + srcOffs[i]); + + /* + * Point to start of next (larger offset) record. We'll use this + * to be sure the current record doesn't overflow into the next + * record. + */ + nextRecord = (char *)src->buffer + srcOffs[i-1]; + + /* + * Make sure the key and data are within the buffer. Since both key + * and data are fixed size, this is relatively easy. Note that this + * relies on the keyLength being a constant; we verify the keyLength + * below. + */ + if ((char *)srcKey + sizeof(HFSPlusExtentKey) + recordSize > nextRecord) { + + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: extents key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + if (direction == kSwapBTNodeBigToHost) + srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + if (srcKey->keyLength != sizeof(*srcKey) - sizeof(srcKey->keyLength)) { + + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: extents key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + srcRec = (HFSPlusExtentDescriptor *)((char *)srcKey + srcKey->keyLength + sizeof(srcKey->keyLength)); + if (direction == kSwapBTNodeHostToBig) + srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + + /* Don't swap srcKey->forkType; it's only one byte */ + /* Don't swap srcKey->pad */ + + srcKey->fileID = SWAP_BE32 (srcKey->fileID); + srcKey->startBlock = SWAP_BE32 (srcKey->startBlock); + + if (srcDesc->kind == kBTIndexNode) { + /* For index nodes, the record data is just a child node number. */ + *((u_int32_t *)srcRec) = SWAP_BE32 (*((u_int32_t *)srcRec)); + } else { + /* Swap the extent data */ + for (j = 0; j < kHFSPlusExtentDensity; j++) { + srcRec[j].startBlock = SWAP_BE32 (srcRec[j].startBlock); + srcRec[j].blockCount = SWAP_BE32 (srcRec[j].blockCount); + } + } + } + + } else if (fileID == kHFSCatalogFileID) { + HFSPlusCatalogKey *srcKey; + int16_t *srcPtr; + u_int16_t keyLength; + + for (i = 0; i < srcDesc->numRecords; i++) { + /* Point to the start of the record we're currently checking. */ + srcKey = (HFSPlusCatalogKey *)((char *)src->buffer + srcOffs[i]); + + /* + * Point to start of next (larger offset) record. We'll use this + * to be sure the current record doesn't overflow into the next + * record. + */ + nextRecord = (char *)src->buffer + (uintptr_t)(srcOffs[i-1]); + + /* + * Make sure we can safely dereference the keyLength and parentID fields. + */ + if ((char *)srcKey + offsetof(HFSPlusCatalogKey, nodeName.unicode[0]) > nextRecord) { + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: catalog key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + /* + * Swap and sanity check the key length + */ + if (direction == kSwapBTNodeBigToHost) + srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + keyLength = srcKey->keyLength; /* Put it in a local (native order) because we use it several times */ + if (direction == kSwapBTNodeHostToBig) + srcKey->keyLength = SWAP_BE16 (keyLength); + + /* Sanity check the key length */ + if (keyLength < kHFSPlusCatalogKeyMinimumLength || keyLength > kHFSPlusCatalogKeyMaximumLength) { + + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: catalog key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, keyLength); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + /* + * Make sure that we can safely dereference the record's type field or + * an index node's child node number. + */ + srcPtr = (int16_t *)((char *)srcKey + keyLength + sizeof(srcKey->keyLength)); + if ((char *)srcPtr + sizeof(u_int32_t) > nextRecord) { + + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: catalog key #%d too big\n", srcDesc->numRecords-i-1); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + srcKey->parentID = SWAP_BE32 (srcKey->parentID); + + /* + * Swap and sanity check the key's node name + */ + if (direction == kSwapBTNodeBigToHost) + srcKey->nodeName.length = SWAP_BE16 (srcKey->nodeName.length); + /* Make sure name length is consistent with key length */ + if (keyLength < sizeof(srcKey->parentID) + sizeof(srcKey->nodeName.length) + + srcKey->nodeName.length*sizeof(srcKey->nodeName.unicode[0])) { + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: catalog record #%d keyLength=%d expected=%lu\n", + srcDesc->numRecords-i, keyLength, sizeof(srcKey->parentID) + sizeof(srcKey->nodeName.length) + + srcKey->nodeName.length*sizeof(srcKey->nodeName.unicode[0])); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + for (j = 0; j < srcKey->nodeName.length; j++) { + srcKey->nodeName.unicode[j] = SWAP_BE16 (srcKey->nodeName.unicode[j]); + } + if (direction == kSwapBTNodeHostToBig) + srcKey->nodeName.length = SWAP_BE16 (srcKey->nodeName.length); + + /* + * For index nodes, the record data is just the child's node number. + * Skip over swapping the various types of catalog record. + */ + if (srcDesc->kind == kBTIndexNode) { + *((u_int32_t *)srcPtr) = SWAP_BE32 (*((u_int32_t *)srcPtr)); + continue; + } + + /* Make sure the recordType is in native order before using it. */ + if (direction == kSwapBTNodeBigToHost) + srcPtr[0] = SWAP_BE16 (srcPtr[0]); + + if (srcPtr[0] == kHFSPlusFolderRecord) { + HFSPlusCatalogFolder *srcRec = (HFSPlusCatalogFolder *)srcPtr; + if ((char *)srcRec + sizeof(*srcRec) > nextRecord) { + + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: catalog folder record #%d too big\n", srcDesc->numRecords-i-1); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + srcRec->flags = SWAP_BE16 (srcRec->flags); + srcRec->valence = SWAP_BE32 (srcRec->valence); + srcRec->folderID = SWAP_BE32 (srcRec->folderID); + srcRec->createDate = SWAP_BE32 (srcRec->createDate); + srcRec->contentModDate = SWAP_BE32 (srcRec->contentModDate); + srcRec->attributeModDate = SWAP_BE32 (srcRec->attributeModDate); + srcRec->accessDate = SWAP_BE32 (srcRec->accessDate); + srcRec->backupDate = SWAP_BE32 (srcRec->backupDate); + + srcRec->bsdInfo.ownerID = SWAP_BE32 (srcRec->bsdInfo.ownerID); + srcRec->bsdInfo.groupID = SWAP_BE32 (srcRec->bsdInfo.groupID); + + /* Don't swap srcRec->bsdInfo.adminFlags; it's only one byte */ + /* Don't swap srcRec->bsdInfo.ownerFlags; it's only one byte */ + + srcRec->bsdInfo.fileMode = SWAP_BE16 (srcRec->bsdInfo.fileMode); + srcRec->bsdInfo.special.iNodeNum = SWAP_BE32 (srcRec->bsdInfo.special.iNodeNum); + + srcRec->textEncoding = SWAP_BE32 (srcRec->textEncoding); + + /* Don't swap srcRec->userInfo */ + /* Don't swap srcRec->finderInfo */ + srcRec->folderCount = SWAP_BE32 (srcRec->folderCount); + + } else if (srcPtr[0] == kHFSPlusFileRecord) { + HFSPlusCatalogFile *srcRec = (HFSPlusCatalogFile *)srcPtr; + if ((char *)srcRec + sizeof(*srcRec) > nextRecord) { + + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: catalog file record #%d too big\n", srcDesc->numRecords-i-1); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + srcRec->flags = SWAP_BE16 (srcRec->flags); + + srcRec->fileID = SWAP_BE32 (srcRec->fileID); + + srcRec->createDate = SWAP_BE32 (srcRec->createDate); + srcRec->contentModDate = SWAP_BE32 (srcRec->contentModDate); + srcRec->attributeModDate = SWAP_BE32 (srcRec->attributeModDate); + srcRec->accessDate = SWAP_BE32 (srcRec->accessDate); + srcRec->backupDate = SWAP_BE32 (srcRec->backupDate); + + srcRec->bsdInfo.ownerID = SWAP_BE32 (srcRec->bsdInfo.ownerID); + srcRec->bsdInfo.groupID = SWAP_BE32 (srcRec->bsdInfo.groupID); + + /* Don't swap srcRec->bsdInfo.adminFlags; it's only one byte */ + /* Don't swap srcRec->bsdInfo.ownerFlags; it's only one byte */ + + srcRec->bsdInfo.fileMode = SWAP_BE16 (srcRec->bsdInfo.fileMode); + srcRec->bsdInfo.special.iNodeNum = SWAP_BE32 (srcRec->bsdInfo.special.iNodeNum); + + srcRec->textEncoding = SWAP_BE32 (srcRec->textEncoding); + + /* If kHFSHasLinkChainBit is set, reserved1 is hl_FirstLinkID. + * In all other context, it is expected to be zero. + */ + srcRec->reserved1 = SWAP_BE32 (srcRec->reserved1); + + /* Don't swap srcRec->userInfo */ + /* Don't swap srcRec->finderInfo */ + /* Don't swap srcRec->reserved2 */ + + hfs_swap_HFSPlusForkData (&srcRec->dataFork); + hfs_swap_HFSPlusForkData (&srcRec->resourceFork); + + } else if ((srcPtr[0] == kHFSPlusFolderThreadRecord) || + (srcPtr[0] == kHFSPlusFileThreadRecord)) { + + /* + * Make sure there is room for parentID and name length. + */ + HFSPlusCatalogThread *srcRec = (HFSPlusCatalogThread *)srcPtr; + if ((char *) &srcRec->nodeName.unicode[0] > nextRecord) { + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: catalog thread record #%d too big\n", srcDesc->numRecords-i-1); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + /* Don't swap srcRec->reserved */ + + srcRec->parentID = SWAP_BE32 (srcRec->parentID); + + if (direction == kSwapBTNodeBigToHost) + srcRec->nodeName.length = SWAP_BE16 (srcRec->nodeName.length); + + /* + * Make sure there is room for the name in the buffer. + * Then swap the characters of the name itself. + */ + if ((char *) &srcRec->nodeName.unicode[srcRec->nodeName.length] > nextRecord) { + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: catalog thread record #%d name too big\n", srcDesc->numRecords-i-1); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + for (j = 0; j < srcRec->nodeName.length; j++) { + srcRec->nodeName.unicode[j] = SWAP_BE16 (srcRec->nodeName.unicode[j]); + } + + if (direction == kSwapBTNodeHostToBig) + srcRec->nodeName.length = SWAP_BE16 (srcRec->nodeName.length); + + } else { + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: unrecognized catalog record type (0x%04X; record #%d)\n", srcPtr[0], srcDesc->numRecords-i-1); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + /* We can swap the record type now that we're done using it. */ + if (direction == kSwapBTNodeHostToBig) + srcPtr[0] = SWAP_BE16 (srcPtr[0]); + } + + } else if (fileID == kHFSAttributesFileID) { + HFSPlusAttrKey *srcKey; + HFSPlusAttrRecord *srcRec; + u_int16_t keyLength; + u_int32_t attrSize = 0; + + for (i = 0; i < srcDesc->numRecords; i++) { + /* Point to the start of the record we're currently checking. */ + srcKey = (HFSPlusAttrKey *)((char *)src->buffer + srcOffs[i]); + + /* + * Point to start of next (larger offset) record. We'll use this + * to be sure the current record doesn't overflow into the next + * record. + */ + nextRecord = (char *)src->buffer + srcOffs[i-1]; + + /* Make sure there is room in the buffer for a minimal key */ + if ((char *) &srcKey->attrName[1] > nextRecord) { + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: attr key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + /* Swap the key length field */ + if (direction == kSwapBTNodeBigToHost) + srcKey->keyLength = SWAP_BE16(srcKey->keyLength); + keyLength = srcKey->keyLength; /* Keep a copy in native order */ + if (direction == kSwapBTNodeHostToBig) + srcKey->keyLength = SWAP_BE16(srcKey->keyLength); + + /* + * Make sure that we can safely dereference the record's type field or + * an index node's child node number. + */ + srcRec = (HFSPlusAttrRecord *)((char *)srcKey + keyLength + sizeof(srcKey->keyLength)); + if ((char *)srcRec + sizeof(u_int32_t) > nextRecord) { + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: attr key #%d too big (%d)\n", srcDesc->numRecords-i-1, keyLength); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + srcKey->fileID = SWAP_BE32(srcKey->fileID); + srcKey->startBlock = SWAP_BE32(srcKey->startBlock); + + /* + * Swap and check the attribute name + */ + if (direction == kSwapBTNodeBigToHost) + srcKey->attrNameLen = SWAP_BE16(srcKey->attrNameLen); + /* Sanity check the attribute name length */ + if (srcKey->attrNameLen > kHFSMaxAttrNameLen || keyLength < (kHFSPlusAttrKeyMinimumLength + sizeof(u_int16_t)*srcKey->attrNameLen)) { + + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: attr key #%d keyLength=%d attrNameLen=%d\n", srcDesc->numRecords-i-1, keyLength, srcKey->attrNameLen); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + for (j = 0; j < srcKey->attrNameLen; j++) + srcKey->attrName[j] = SWAP_BE16(srcKey->attrName[j]); + if (direction == kSwapBTNodeHostToBig) + srcKey->attrNameLen = SWAP_BE16(srcKey->attrNameLen); + + /* + * For index nodes, the record data is just the child's node number. + * Skip over swapping the various types of attribute record. + */ + if (srcDesc->kind == kBTIndexNode) { + *((u_int32_t *)srcRec) = SWAP_BE32 (*((u_int32_t *)srcRec)); + continue; + } + + /* Swap the record data */ + if (direction == kSwapBTNodeBigToHost) + srcRec->recordType = SWAP_BE32(srcRec->recordType); + switch (srcRec->recordType) { + case kHFSPlusAttrInlineData: + /* Is there room for the inline data header? */ + if ((char *) &srcRec->attrData.attrData[0] > nextRecord) { + + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: attr inline #%d too big\n", srcDesc->numRecords-i-1); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + /* We're not swapping the reserved fields */ + + /* Swap the attribute size */ + if (direction == kSwapBTNodeHostToBig) + attrSize = srcRec->attrData.attrSize; + srcRec->attrData.attrSize = SWAP_BE32(srcRec->attrData.attrSize); + if (direction == kSwapBTNodeBigToHost) + attrSize = srcRec->attrData.attrSize; + + /* Is there room for the inline attribute data? */ + if ((char *) &srcRec->attrData.attrData[attrSize] > nextRecord) { + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: attr inline #%d too big (attrSize=%u)\n", srcDesc->numRecords-i-1, attrSize); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + /* Not swapping the attribute data itself */ + break; + + case kHFSPlusAttrForkData: + /* Is there room for the fork data record? */ + if ((char *)srcRec + sizeof(HFSPlusAttrForkData) > nextRecord) { + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: attr fork data #%d too big\n", srcDesc->numRecords-i-1); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + /* We're not swapping the reserved field */ + + hfs_swap_HFSPlusForkData(&srcRec->forkData.theFork); + break; + + case kHFSPlusAttrExtents: + /* Is there room for an extent record? */ + if ((char *)srcRec + sizeof(HFSPlusAttrExtents) > nextRecord) { + LFHFS_LOG((direction == kSwapBTNodeHostToBig) ? LEVEL_ERROR : LEVEL_DEBUG, "hfs_swap_HFSPlusBTInternalNode: attr extents #%d too big\n", srcDesc->numRecords-i-1); + if (direction == kSwapBTNodeHostToBig) { + hfs_assert(0); + } + return fsBTInvalidNodeErr; + } + + /* We're not swapping the reserved field */ + + for (j = 0; j < kHFSPlusExtentDensity; j++) { + srcRec->overflowExtents.extents[j].startBlock = + SWAP_BE32(srcRec->overflowExtents.extents[j].startBlock); + srcRec->overflowExtents.extents[j].blockCount = + SWAP_BE32(srcRec->overflowExtents.extents[j].blockCount); + } + break; + } + if (direction == kSwapBTNodeHostToBig) + srcRec->recordType = SWAP_BE32(srcRec->recordType); + } + } + else { + LFHFS_LOG(LEVEL_ERROR, "hfs_swap_HFSPlusBTInternalNode: fileID %u is not a system B-tree\n", fileID); + hfs_assert(0); + } + + + return (0); +} + diff --git a/livefiles_hfs_plugin/lf_hfs_endian.h b/livefiles_hfs_plugin/lf_hfs_endian.h new file mode 100644 index 0000000..6094bb2 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_endian.h @@ -0,0 +1,50 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_endian.h + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#ifndef lf_hfs_endian_h +#define lf_hfs_endian_h + +#include +#include "lf_hfs_btrees_internal.h" + + +/*********************/ +/* BIG ENDIAN Macros */ +/*********************/ +#define SWAP_BE16(__a) OSSwapBigToHostInt16 (__a) +#define SWAP_BE32(__a) OSSwapBigToHostInt32 (__a) +#define SWAP_BE64(__a) OSSwapBigToHostInt64 (__a) + + +/* + * Constants for the "unswap" argument to hfs_swap_BTNode: + */ +enum HFSBTSwapDirection { + kSwapBTNodeBigToHost = 0, + kSwapBTNodeHostToBig = 1, + + /* + * kSwapBTNodeHeaderRecordOnly is used to swap just the header record + * of a header node from big endian (on disk) to host endian (in memory). + * It does not swap the node descriptor (forward/backward links, record + * count, etc.). It assumes the header record is at offset 0x000E. + * + * Since HFS Plus doesn't have fixed B-tree node sizes, we have to read + * the header record to determine the actual node size for that tree + * before we can set up the B-tree control block. We read it initially + * as 512 bytes, then re-read it once we know the correct node size. Since + * we may not have read the entire header node the first time, we can't + * swap the record offsets, other records, or do most sanity checks. + */ + kSwapBTNodeHeaderRecordOnly = 3 +}; + +int hfs_swap_BTNode (BlockDescriptor *src, vnode_t vp, enum HFSBTSwapDirection direction, u_int8_t allow_empty_node); + + +#endif /* lf_hfs_endian_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_file_extent_mapping.c b/livefiles_hfs_plugin/lf_hfs_file_extent_mapping.c new file mode 100644 index 0000000..2a680f8 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_file_extent_mapping.c @@ -0,0 +1,1764 @@ +// +// lf_hfs_file_extent_mapping.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include "lf_hfs.h" +#include "lf_hfs_format.h" +#include "lf_hfs_file_extent_mapping.h" +#include "lf_hfs_endian.h" +#include "lf_hfs_file_mgr_internal.h" +#include "lf_hfs_btrees_internal.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_utils.h" + + +enum +{ + kDataForkType = 0, + kResourceForkType = 0xFF, + + kPreviousRecord = -1 +}; + +static OSErr FindExtentRecord( + const ExtendedVCB *vcb, + u_int8_t forkType, + u_int32_t fileID, + u_int32_t startBlock, + Boolean allowPrevious, + HFSPlusExtentKey *foundKey, + HFSPlusExtentRecord foundData, + u_int32_t *foundHint); + +static OSErr DeleteExtentRecord( + const ExtendedVCB *vcb, + u_int8_t forkType, + u_int32_t fileID, + u_int32_t startBlock); + +static OSErr CreateExtentRecord( + ExtendedVCB *vcb, + HFSPlusExtentKey *key, + HFSPlusExtentRecord extents, + u_int32_t *hint); + + +static OSErr GetFCBExtentRecord( + const FCB *fcb, + HFSPlusExtentRecord extents); + +static OSErr SearchExtentRecord( + ExtendedVCB *vcb, + u_int32_t searchFABN, + const HFSPlusExtentRecord extentData, + u_int32_t extentDataStartFABN, + u_int32_t *foundExtentDataOffset, + u_int32_t *endingFABNPlusOne, + Boolean *noMoreExtents); + +static OSErr ReleaseExtents( + ExtendedVCB *vcb, + const HFSPlusExtentRecord extentRecord, + u_int32_t *numReleasedAllocationBlocks, + Boolean *releasedLastExtent); + +static OSErr DeallocateFork( + ExtendedVCB *vcb, + HFSCatalogNodeID fileID, + u_int8_t forkType, + HFSPlusExtentRecord catalogExtents, + Boolean * recordDeleted); + +static OSErr TruncateExtents( + ExtendedVCB *vcb, + u_int8_t forkType, + u_int32_t fileID, + u_int32_t startBlock, + Boolean * recordDeleted); + +static OSErr UpdateExtentRecord ( + ExtendedVCB *vcb, + FCB *fcb, + int deleted, + const HFSPlusExtentKey *extentFileKey, + const HFSPlusExtentRecord extentData, + u_int32_t extentBTreeHint); + +static Boolean ExtentsAreIntegral( + const HFSPlusExtentRecord extentRecord, + u_int32_t mask, + u_int32_t *blocksChecked, + Boolean *checkedLastExtent); + +//_________________________________________________________________________________ +// +// Routine: FindExtentRecord +// +// Purpose: Search the extents BTree for an extent record matching the given +// FileID, fork, and starting file allocation block number. +// +// Inputs: +// vcb Volume to search +// forkType 0 = data fork, -1 = resource fork +// fileID File's FileID (CatalogNodeID) +// startBlock Starting file allocation block number +// allowPrevious If the desired record isn't found and this flag is set, +// then see if the previous record belongs to the same fork. +// If so, then return it. +// +// Outputs: +// foundKey The key data for the record actually found +// foundData The extent record actually found (NOTE: on an HFS volume, the +// fourth entry will be zeroes. +// foundHint The BTree hint to find the node again +//_________________________________________________________________________________ +static OSErr FindExtentRecord( + const ExtendedVCB *vcb, + u_int8_t forkType, + u_int32_t fileID, + u_int32_t startBlock, + Boolean allowPrevious, + HFSPlusExtentKey *foundKey, + HFSPlusExtentRecord foundData, + u_int32_t *foundHint) +{ + FCB * fcb; + BTreeIterator *btIterator = NULL; + FSBufferDescriptor btRecord; + OSErr err; + u_int16_t btRecordSize; + + err = noErr; + if (foundHint) + *foundHint = 0; + fcb = GetFileControlBlock(vcb->extentsRefNum); + + btIterator = hfs_mallocz(sizeof(BTreeIterator)); + + /* HFS Plus / HFSX */ + if (vcb->vcbSigWord != kHFSSigWord) { + HFSPlusExtentKey * extentKeyPtr; + HFSPlusExtentRecord extentData; + + extentKeyPtr = (HFSPlusExtentKey*) &btIterator->key; + extentKeyPtr->keyLength = kHFSPlusExtentKeyMaximumLength; + extentKeyPtr->forkType = forkType; + extentKeyPtr->pad = 0; + extentKeyPtr->fileID = fileID; + extentKeyPtr->startBlock = startBlock; + + btRecord.bufferAddress = &extentData; + btRecord.itemSize = sizeof(HFSPlusExtentRecord); + btRecord.itemCount = 1; + + err = BTSearchRecord(fcb, btIterator, &btRecord, &btRecordSize, btIterator); + + if (err == btNotFound && allowPrevious) { + err = BTIterateRecord(fcb, kBTreePrevRecord, btIterator, &btRecord, &btRecordSize); + + // A previous record may not exist, so just return btNotFound (like we would if + // it was for the wrong file/fork). + if (err == (OSErr) fsBTStartOfIterationErr) //•• fsBTStartOfIterationErr is type unsigned long + err = btNotFound; + + if (err == noErr) { + // Found a previous record. Does it belong to the same fork of the same file? + if (extentKeyPtr->fileID != fileID || extentKeyPtr->forkType != forkType) + err = btNotFound; + } + } + + if (err == noErr) { + // Copy the found key back for the caller + if (foundKey) + BlockMoveData(extentKeyPtr, foundKey, sizeof(HFSPlusExtentKey)); + // Copy the found data back for the caller + BlockMoveData(&extentData, foundData, sizeof(HFSPlusExtentRecord)); + } + } + + if (foundHint) + *foundHint = btIterator->hint.nodeNum; + + hfs_free(btIterator); + return err; +} + + + +static OSErr CreateExtentRecord( + ExtendedVCB *vcb, + HFSPlusExtentKey *key, + HFSPlusExtentRecord extents, + u_int32_t *hint) +{ + BTreeIterator *btIterator = NULL; + FSBufferDescriptor btRecord; + u_int16_t btRecordSize = 0; + int lockflags; + OSErr err; + + err = noErr; + *hint = 0; + + btIterator = hfs_mallocz(sizeof(BTreeIterator)); + + /* + * The lock taken by callers of ExtendFileC is speculative and + * only occurs when the file already has overflow extents. So + * We need to make sure we have the lock here. The extents + * btree lock can be nested (its recursive) so we always take + * it here. + */ + lockflags = hfs_systemfile_lock(vcb, SFL_EXTENTS, HFS_EXCLUSIVE_LOCK); + + /* HFS+/HFSX */ + btRecordSize = sizeof(HFSPlusExtentRecord); + btRecord.bufferAddress = extents; + btRecord.itemSize = btRecordSize; + btRecord.itemCount = 1; + + BlockMoveData(key, &btIterator->key, sizeof(HFSPlusExtentKey)); + + if (err == noErr) + err = BTInsertRecord(GetFileControlBlock(vcb->extentsRefNum), btIterator, &btRecord, btRecordSize); + + if (err == noErr) + *hint = btIterator->hint.nodeNum; + + (void) BTFlushPath(GetFileControlBlock(vcb->extentsRefNum)); + + hfs_systemfile_unlock(vcb, lockflags); + + hfs_free(btIterator); + return err; +} + + +static OSErr DeleteExtentRecord( + const ExtendedVCB *vcb, + u_int8_t forkType, + u_int32_t fileID, + u_int32_t startBlock) +{ + BTreeIterator *btIterator = NULL; + OSErr err = noErr; + + btIterator = hfs_mallocz(sizeof(BTreeIterator)); + if (btIterator == NULL) return ENOMEM; + + /* HFS+ / HFSX */ + HFSPlusExtentKey * keyPtr; + + keyPtr = (HFSPlusExtentKey*) &btIterator->key; + keyPtr->keyLength = kHFSPlusExtentKeyMaximumLength; + keyPtr->forkType = forkType; + keyPtr->pad = 0; + keyPtr->fileID = fileID; + keyPtr->startBlock = startBlock; + + err = BTDeleteRecord(GetFileControlBlock(vcb->extentsRefNum), btIterator); + (void) BTFlushPath(GetFileControlBlock(vcb->extentsRefNum)); + + + hfs_free(btIterator); + return err; +} + + + +//_________________________________________________________________________________ +// +// Routine: MapFileBlock +// +// Function: Maps a file position into a physical disk address. +// +//_________________________________________________________________________________ + +OSErr MapFileBlockC ( + ExtendedVCB *vcb, // volume that file resides on + FCB *fcb, // FCB of file + size_t numberOfBytes, // number of contiguous bytes desired + off_t offset, // starting offset within file (in bytes) + daddr64_t *startSector, // first sector (NOT an allocation block) + size_t *availableBytes) // number of contiguous bytes (up to numberOfBytes) +{ + OSErr err; + u_int32_t allocBlockSize; // Size of the volume's allocation block + u_int32_t sectorSize; + HFSPlusExtentKey foundKey; + HFSPlusExtentRecord foundData; + u_int32_t foundIndex; + u_int32_t hint; + u_int32_t firstFABN = 0; // file allocation block of first block in found extent + u_int32_t nextFABN; // file allocation block of block after end of found extent + off_t dataEnd; // (offset) end of range that is contiguous + u_int32_t sectorsPerBlock; // Number of sectors per allocation block + u_int32_t startBlock = 0; // volume allocation block corresponding to firstFABN + daddr64_t temp; + off_t tmpOff; + + allocBlockSize = vcb->blockSize; + sectorSize = VCBTOHFS(vcb)->hfs_logical_block_size; + + err = SearchExtentFile(vcb, fcb, offset, &foundKey, foundData, &foundIndex, &hint, &nextFABN); + if (err == noErr) { + startBlock = foundData[foundIndex].startBlock; + firstFABN = nextFABN - foundData[foundIndex].blockCount; + } + + if (err != noErr) + { + return err; + } + + // + // Determine the end of the available space. It will either be the end of the extent, + // or the file's PEOF, whichever is smaller. + // + dataEnd = (off_t)((off_t)(nextFABN) * (off_t)(allocBlockSize)); // Assume valid data through end of this extent + if (((off_t)fcb->ff_blocks * (off_t)allocBlockSize) < dataEnd) // Is PEOF shorter? + dataEnd = (off_t)fcb->ff_blocks * (off_t)allocBlockSize; // Yes, so only map up to PEOF + + // Compute the number of sectors in an allocation block + sectorsPerBlock = allocBlockSize / sectorSize; // sectors per allocation block + + // + // Compute the absolute sector number that contains the offset of the given file + // offset in sectors from start of the extent + + // offset in sectors from start of allocation block space + // + temp = (daddr64_t)((offset - (off_t)((off_t)(firstFABN) * (off_t)(allocBlockSize)))/sectorSize); + temp += (daddr64_t)startBlock * (daddr64_t)sectorsPerBlock; + + /* Add in any volume offsets */ + if (vcb->vcbSigWord == kHFSPlusSigWord) + temp += vcb->hfsPlusIOPosOffset / sectorSize; + else + temp += vcb->vcbAlBlSt; + + // Return the desired sector for file position "offset" + *startSector = temp; + + // + // Determine the number of contiguous bytes until the end of the extent + // (or the amount they asked for, whichever comes first). + // + if (availableBytes) + { + tmpOff = dataEnd - offset; + /* + * Disallow negative runs. + */ + if (tmpOff <= 0) { + /* This shouldn't happen unless something is corrupt */ + LFHFS_LOG( LEVEL_ERROR, "MapFileBlockC: tmpOff <= 0 (%lld)\n", tmpOff); + return EINVAL; + } + + if (tmpOff > (off_t)(numberOfBytes)) { + *availableBytes = numberOfBytes; // more there than they asked for, so pin the output + } + else { + *availableBytes = tmpOff; + } + } + + return noErr; +} + + +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã +// Routine: ReleaseExtents +// +// Function: Release the extents of a single extent data record. +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã + +static OSErr ReleaseExtents( + ExtendedVCB *vcb, + const HFSPlusExtentRecord extentRecord, + u_int32_t *numReleasedAllocationBlocks, + Boolean *releasedLastExtent) +{ + u_int32_t extentIndex; + u_int32_t numberOfExtents; + OSErr err = noErr; + + *numReleasedAllocationBlocks = 0; + *releasedLastExtent = false; + + if (vcb->vcbSigWord == kHFSPlusSigWord) + numberOfExtents = kHFSPlusExtentDensity; + else + numberOfExtents = kHFSExtentDensity; + + for( extentIndex = 0; extentIndex < numberOfExtents; extentIndex++) + { + u_int32_t numAllocationBlocks; + + // Loop over the extent record and release the blocks associated with each extent. + numAllocationBlocks = extentRecord[extentIndex].blockCount; + if ( numAllocationBlocks == 0 ) + { + *releasedLastExtent = true; + break; + } + + err = BlockDeallocate( vcb, extentRecord[extentIndex].startBlock, numAllocationBlocks , 0); + if ( err != noErr ) + break; + + *numReleasedAllocationBlocks += numAllocationBlocks; // bump FABN to beg of next extent + } + + return( err ); +} + + + +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã +// Routine: TruncateExtents +// +// Purpose: Delete extent records whose starting file allocation block number +// is greater than or equal to a given starting block number. The +// allocation blocks represented by the extents are deallocated. +// +// Inputs: +// vcb Volume to operate on +// fileID Which file to operate on +// startBlock Starting file allocation block number for first extent +// record to delete. +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã + +static OSErr TruncateExtents( + ExtendedVCB *vcb, + u_int8_t forkType, + u_int32_t fileID, + u_int32_t startBlock, + Boolean * recordDeleted) +{ + OSErr err; + u_int32_t numberExtentsReleased; + Boolean releasedLastExtent; + u_int32_t hint; + HFSPlusExtentKey key; + HFSPlusExtentRecord extents = {0}; + int lockflags; + + /* + * The lock taken by callers of TruncateFileC is speculative and + * only occurs when the file already has overflow extents. So + * We need to make sure we have the lock here. The extents + * btree lock can be nested (its recursive) so we always take + * it here. + */ + lockflags = hfs_systemfile_lock(vcb, SFL_EXTENTS, HFS_EXCLUSIVE_LOCK); + + while (true) { + err = FindExtentRecord(vcb, forkType, fileID, startBlock, false, &key, extents, &hint); + if (err != noErr) { + if (err == btNotFound) + err = noErr; + break; + } + + err = ReleaseExtents( vcb, extents, &numberExtentsReleased, &releasedLastExtent ); + if (err != noErr) break; + + err = DeleteExtentRecord(vcb, forkType, fileID, startBlock); + if (err != noErr) break; + + *recordDeleted = true; + startBlock += numberExtentsReleased; + } + hfs_systemfile_unlock(vcb, lockflags); + + return err; +} + + + +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã +// Routine: DeallocateFork +// +// Function: De-allocates all disk space allocated to a specified fork. +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã + +static OSErr DeallocateFork( + ExtendedVCB *vcb, + HFSCatalogNodeID fileID, + u_int8_t forkType, + HFSPlusExtentRecord catalogExtents, + Boolean * recordDeleted) /* true if a record was deleted */ +{ + OSErr err; + u_int32_t numReleasedAllocationBlocks; + Boolean releasedLastExtent; + + // Release the catalog extents + err = ReleaseExtents( vcb, catalogExtents, &numReleasedAllocationBlocks, &releasedLastExtent ); + // Release the extra extents, if present + if (err == noErr && !releasedLastExtent) + err = TruncateExtents(vcb, forkType, fileID, numReleasedAllocationBlocks, recordDeleted); + + return( err ); +} + +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã +// Routine: FlushExtentFile +// +// Function: Flushes the extent file for a specified volume +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã + +OSErr FlushExtentFile( ExtendedVCB *vcb ) +{ + FCB * fcb; + OSErr err; + int lockflags; + + fcb = GetFileControlBlock(vcb->extentsRefNum); + + lockflags = hfs_systemfile_lock(vcb, SFL_EXTENTS, HFS_EXCLUSIVE_LOCK); + err = BTFlushPath(fcb); + hfs_systemfile_unlock(vcb, lockflags); + + if ( err == noErr ) + { + // If the FCB for the extent "file" is dirty, mark the VCB as dirty. + if (FTOC(fcb)->c_flag & C_MODIFIED) + { + MarkVCBDirty( vcb ); + } + } + + return( err ); +} + + +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã +// Routine: CompareExtentKeysPlus +// +// Function: Compares two extent file keys (a search key and a trial key) for +// an HFS volume. +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã + +__attribute__((weak)) int32_t CompareExtentKeysPlus( const HFSPlusExtentKey *searchKey, const HFSPlusExtentKey *trialKey ) +{ + int32_t result; // ± 1 + +#if DEBUG + if (searchKey->keyLength != kHFSPlusExtentKeyMaximumLength) + LFHFS_LOG( LEVEL_ERROR, "HFS: search Key is wrong length" ); + if (trialKey->keyLength != kHFSPlusExtentKeyMaximumLength) + LFHFS_LOG( LEVEL_ERROR, "HFS: search Key is wrong length" ); +#endif + + result = -1; // assume searchKey < trialKey + + if (searchKey->fileID == trialKey->fileID) { + // + // FileNum's are equal; compare fork types + // + if (searchKey->forkType == trialKey->forkType) { + // + // Fork types are equal; compare allocation block number + // + if (searchKey->startBlock == trialKey->startBlock) { + // + // Everything is equal + // + result = 0; + } + else { + // + // Allocation block numbers differ; determine sign + // + if (searchKey->startBlock > trialKey->startBlock) + result = 1; + } + } + else { + // + // Fork types differ; determine sign + // + if (searchKey->forkType > trialKey->forkType) + result = 1; + } + } + else { + // + // FileNums differ; determine sign + // + if (searchKey->fileID > trialKey->fileID) + result = 1; + } + + return( result ); +} + +/* + * Add a file extent to a file. + * + * Used by hfs_extendfs to extend the volume allocation bitmap file. + * + */ +int +AddFileExtent(ExtendedVCB *vcb, FCB *fcb, u_int32_t startBlock, u_int32_t blockCount) +{ + HFSPlusExtentKey foundKey; + HFSPlusExtentRecord foundData; + u_int32_t foundIndex; + u_int32_t hint; + u_int32_t nextBlock; + int64_t peof; + int i; + int error; + + peof = (int64_t)(fcb->ff_blocks + blockCount) * (int64_t)vcb->blockSize; + + error = SearchExtentFile(vcb, fcb, peof-1, &foundKey, foundData, &foundIndex, &hint, &nextBlock); + if (error != fxRangeErr) + return (EBUSY); + + /* + * Add new extent. See if there is room in the current record. + */ + if (foundData[foundIndex].blockCount != 0) + ++foundIndex; + if (foundIndex == kHFSPlusExtentDensity) { + /* + * Existing record is full so create a new one. + */ + foundKey.keyLength = kHFSPlusExtentKeyMaximumLength; + foundKey.forkType = kDataForkType; + foundKey.pad = 0; + foundKey.fileID = FTOC(fcb)->c_fileid; + foundKey.startBlock = nextBlock; + + foundData[0].startBlock = startBlock; + foundData[0].blockCount = blockCount; + + /* zero out remaining extents. */ + for (i = 1; i < kHFSPlusExtentDensity; ++i) { + foundData[i].startBlock = 0; + foundData[i].blockCount = 0; + } + + foundIndex = 0; + + error = CreateExtentRecord(vcb, &foundKey, foundData, &hint); + if (error == fxOvFlErr) { + error = dskFulErr; + } + + } else { + /* + * Add a new extent into existing record. + */ + foundData[foundIndex].startBlock = startBlock; + foundData[foundIndex].blockCount = blockCount; + error = UpdateExtentRecord(vcb, fcb, 0, &foundKey, foundData, hint); + } + (void) FlushExtentFile(vcb); + + return (error); +} + + +//_________________________________________________________________________________ +// +// Routine: Extendfile +// +// Function: Extends the disk space allocated to a file. +// +//_________________________________________________________________________________ + +OSErr ExtendFileC ( + ExtendedVCB *vcb, // volume that file resides on + FCB *fcb, // FCB of file to truncate + int64_t bytesToAdd, // number of bytes to allocate + u_int32_t blockHint, // desired starting allocation block + u_int32_t flags, // EFContig and/or EFAll + int64_t *actualBytesAdded) // number of bytes actually allocated +{ + OSErr err; + u_int32_t volumeBlockSize; + int64_t blocksToAdd; + int64_t bytesThisExtent; + HFSPlusExtentKey foundKey; + HFSPlusExtentRecord foundData; + u_int32_t foundIndex; + u_int32_t hint; + u_int32_t nextBlock; + u_int32_t startBlock; + Boolean allOrNothing; + Boolean forceContig; + Boolean wantContig; + Boolean useMetaZone; + Boolean needsFlush; + int allowFlushTxns; + u_int32_t actualStartBlock; + u_int32_t actualNumBlocks; + u_int32_t numExtentsPerRecord = 0; + int64_t maximumBytes; + int64_t availbytes; + int64_t peof; + u_int32_t prevblocks; + uint32_t fastdev = 0; + + struct hfsmount *hfsmp = (struct hfsmount*)vcb; + allowFlushTxns = 0; + needsFlush = false; + *actualBytesAdded = 0; + volumeBlockSize = vcb->blockSize; + allOrNothing = ((flags & kEFAllMask) != 0); + forceContig = ((flags & kEFContigMask) != 0); + prevblocks = fcb->ff_blocks; + + numExtentsPerRecord = kHFSPlusExtentDensity; + + // + // Determine how many blocks need to be allocated. + // Round up the number of desired bytes to add. + // + blocksToAdd = howmany(bytesToAdd, volumeBlockSize); + bytesToAdd = (int64_t)((int64_t)blocksToAdd * (int64_t)volumeBlockSize); + + /* + * For deferred allocations just reserve the blocks. + */ + if ((flags & kEFDeferMask) + && (vcb->vcbSigWord == kHFSPlusSigWord) + && (bytesToAdd < (int64_t)HFS_MAX_DEFERED_ALLOC) + && (blocksToAdd < hfs_freeblks(VCBTOHFS(vcb), 1))) { + hfs_lock_mount (hfsmp); + vcb->loanedBlocks += blocksToAdd; + hfs_unlock_mount(hfsmp); + + fcb->ff_unallocblocks += blocksToAdd; + FTOC(fcb)->c_blocks += blocksToAdd; + fcb->ff_blocks += blocksToAdd; + + /* + * We haven't touched the disk here; no blocks have been + * allocated and the volume will not be inconsistent if we + * don't update the catalog record immediately. + */ + FTOC(fcb)->c_flag |= C_MINOR_MOD; + *actualBytesAdded = bytesToAdd; + return (0); + } + /* + * Give back any unallocated blocks before doing real allocations. + */ + if (fcb->ff_unallocblocks > 0) { + u_int32_t loanedBlocks; + + loanedBlocks = fcb->ff_unallocblocks; + blocksToAdd += loanedBlocks; + bytesToAdd = (int64_t)blocksToAdd * (int64_t)volumeBlockSize; + FTOC(fcb)->c_blocks -= loanedBlocks; + fcb->ff_blocks -= loanedBlocks; + fcb->ff_unallocblocks = 0; + + hfs_lock_mount(hfsmp); + vcb->loanedBlocks -= loanedBlocks; + hfs_unlock_mount(hfsmp); + } + + // + // If the file's clump size is larger than the allocation block size, + // then set the maximum number of bytes to the requested number of bytes + // rounded up to a multiple of the clump size. + // + if ((vcb->vcbClpSiz > (int32_t)volumeBlockSize) + && (bytesToAdd < (int64_t)HFS_MAX_DEFERED_ALLOC) + && (flags & kEFNoClumpMask) == 0) { + maximumBytes = (int64_t)howmany(bytesToAdd, vcb->vcbClpSiz); + maximumBytes *= vcb->vcbClpSiz; + } else { + maximumBytes = bytesToAdd; + } + + // + // If allocation is all-or-nothing, make sure there are + // enough free blocks on the volume (quick test). + // + if (allOrNothing && + (blocksToAdd > hfs_freeblks(VCBTOHFS(vcb), flags & kEFReserveMask))) { + err = dskFulErr; + goto ErrorExit; + } + + // + // See if there are already enough blocks allocated to the file. + // + peof = ((int64_t)fcb->ff_blocks * (int64_t)volumeBlockSize) + bytesToAdd; // potential new PEOF + err = SearchExtentFile(vcb, fcb, peof-1, &foundKey, foundData, &foundIndex, &hint, &nextBlock); + if (err == noErr) { + // Enough blocks are already allocated. Just update the FCB to reflect the new length. + fcb->ff_blocks = (uint32_t)( peof / (int64_t)volumeBlockSize ); + FTOC(fcb)->c_blocks += (bytesToAdd / volumeBlockSize); + FTOC(fcb)->c_flag |= C_MODIFIED; + goto Exit; + } + if (err != fxRangeErr) // Any real error? + goto ErrorExit; // Yes, so exit immediately + + // + // Adjust the PEOF to the end of the last extent. + // + bytesThisExtent = (int64_t)(nextBlock - fcb->ff_blocks) * (int64_t)volumeBlockSize; + if (bytesThisExtent != 0) { + fcb->ff_blocks = nextBlock; + FTOC(fcb)->c_blocks += (bytesThisExtent / volumeBlockSize); + FTOC(fcb)->c_flag |= C_MODIFIED; + bytesToAdd -= bytesThisExtent; + } + + // + // Allocate some more space. + // + // First try a contiguous allocation (of the whole amount). + // If that fails, get whatever we can. + // If forceContig, then take whatever we got + // else, keep getting bits and pieces (non-contig) + + /* + * Note that for sparse devices (like sparse bundle dmgs), we + * should only be aggressive with re-using once-allocated pieces + * if we're not dealing with system files. If we're trying to operate + * on behalf of a system file, we need the maximum contiguous amount + * possible. For non-system files we favor locality and fragmentation over + * contiguity as it can result in fewer blocks being needed from the underlying + * filesystem that the sparse image resides upon. + */ + wantContig = true; + useMetaZone = flags & kEFMetadataMask; + do { + if (blockHint != 0) + startBlock = blockHint; + else + startBlock = foundData[foundIndex].startBlock + foundData[foundIndex].blockCount; + + actualNumBlocks = 0; + actualStartBlock = 0; + + /* Find number of free blocks based on reserved block flag option */ + availbytes = (int64_t)hfs_freeblks(VCBTOHFS(vcb), flags & kEFReserveMask) * + (int64_t)volumeBlockSize; + if (availbytes <= 0) { + err = dskFulErr; + } else { + if (wantContig && (availbytes < bytesToAdd)) { + err = dskFulErr; + } + else { + uint32_t ba_flags = fastdev; + + if (wantContig) { + ba_flags |= HFS_ALLOC_FORCECONTIG; + } + if (useMetaZone) { + ba_flags |= HFS_ALLOC_METAZONE; + } + if (allowFlushTxns) { + ba_flags |= HFS_ALLOC_FLUSHTXN; + } + + err = BlockAllocate( + vcb, + startBlock, + (uint32_t)howmany(MIN(bytesToAdd, availbytes), (int64_t)volumeBlockSize), + (uint32_t)howmany(MIN(maximumBytes, availbytes), (int64_t)volumeBlockSize), + ba_flags, + &actualStartBlock, + &actualNumBlocks); + } + } + if (err == dskFulErr) { + if (forceContig) { + if (allowFlushTxns == 0) { + /* If we're forcing contiguity, re-try but allow plucking from recently freed regions */ + allowFlushTxns = 1; + wantContig = 1; + err = noErr; + continue; + } + else { + break; // AllocContig failed because not enough contiguous space + } + } + if (wantContig) { + // Couldn't get one big chunk, so get whatever we can. + err = noErr; + wantContig = false; + continue; + } + if (actualNumBlocks != 0) + err = noErr; + + if (useMetaZone == 0) { + /* Couldn't get anything so dip into metadat zone */ + err = noErr; + useMetaZone = 1; + continue; + } + + /* If we couldn't find what we needed without flushing the journal, then go ahead and do it now */ + if (allowFlushTxns == 0) { + allowFlushTxns = 1; + err = noErr; + continue; + } + + } + if (err == noErr) { + // Add the new extent to the existing extent record, or create a new one. + if ((actualStartBlock == startBlock) && (blockHint == 0)) { + // We grew the file's last extent, so just adjust the number of blocks. + foundData[foundIndex].blockCount += actualNumBlocks; + err = UpdateExtentRecord(vcb, fcb, 0, &foundKey, foundData, hint); + if (err != noErr) break; + } + else { + u_int16_t i; + + // Need to add a new extent. See if there is room in the current record. + if (foundData[foundIndex].blockCount != 0) // Is current extent free to use? + ++foundIndex; // No, so use the next one. + if (foundIndex == numExtentsPerRecord) { + // This record is full. Need to create a new one. + if (FTOC(fcb)->c_fileid == kHFSExtentsFileID) { + (void) BlockDeallocate(vcb, actualStartBlock, actualNumBlocks, 0); + err = dskFulErr; // Oops. Can't extend extents file past first record. + break; + } + + foundKey.keyLength = kHFSPlusExtentKeyMaximumLength; + if (FORK_IS_RSRC(fcb)) + foundKey.forkType = kResourceForkType; + else + foundKey.forkType = kDataForkType; + foundKey.pad = 0; + foundKey.fileID = FTOC(fcb)->c_fileid; + foundKey.startBlock = nextBlock; + + foundData[0].startBlock = actualStartBlock; + foundData[0].blockCount = actualNumBlocks; + + // zero out remaining extents... + for (i = 1; i < kHFSPlusExtentDensity; ++i) + { + foundData[i].startBlock = 0; + foundData[i].blockCount = 0; + } + + foundIndex = 0; + + err = CreateExtentRecord(vcb, &foundKey, foundData, &hint); + if (err == fxOvFlErr) { + // We couldn't create an extent record because extents B-tree + // couldn't grow. Dellocate the extent just allocated and + // return a disk full error. + (void) BlockDeallocate(vcb, actualStartBlock, actualNumBlocks, 0); + err = dskFulErr; + } + if (err != noErr) break; + + needsFlush = true; // We need to update the B-tree header + } + else { + // Add a new extent into this record and update. + foundData[foundIndex].startBlock = actualStartBlock; + foundData[foundIndex].blockCount = actualNumBlocks; + err = UpdateExtentRecord(vcb, fcb, 0, &foundKey, foundData, hint); + if (err != noErr) break; + } + } + + // Figure out how many bytes were actually allocated. + // NOTE: BlockAllocate could have allocated more than we asked for. + // Don't set the PEOF beyond what our client asked for. + nextBlock += actualNumBlocks; + bytesThisExtent = (int64_t)((int64_t)actualNumBlocks * (int64_t)volumeBlockSize); + if (bytesThisExtent > bytesToAdd) { + bytesToAdd = 0; + } + else { + bytesToAdd -= bytesThisExtent; + maximumBytes -= bytesThisExtent; + } + fcb->ff_blocks += (bytesThisExtent / volumeBlockSize); + FTOC(fcb)->c_blocks += (bytesThisExtent / volumeBlockSize); + FTOC(fcb)->c_flag |= C_MODIFIED; + + // If contiguous allocation was requested, then we've already got one contiguous + // chunk. If we didn't get all we wanted, then adjust the error to disk full. + if (forceContig) { + if (bytesToAdd != 0) + err = dskFulErr; + break; // We've already got everything that's contiguous + } + } + } while (err == noErr && bytesToAdd); + +ErrorExit: +Exit: + if (VCBTOHFS(vcb)->hfs_flags & HFS_METADATA_ZONE) { + /* Keep the roving allocator out of the metadata zone. */ + if (vcb->nextAllocation >= VCBTOHFS(vcb)->hfs_metazone_start && + vcb->nextAllocation <= VCBTOHFS(vcb)->hfs_metazone_end) { + hfs_lock_mount (hfsmp); + HFS_UPDATE_NEXT_ALLOCATION(vcb, VCBTOHFS(vcb)->hfs_metazone_end + 1); + MarkVCBDirty(vcb); + hfs_unlock_mount(hfsmp); + } + } + if (prevblocks < fcb->ff_blocks) { + *actualBytesAdded = (int64_t)(fcb->ff_blocks - prevblocks) * (int64_t)volumeBlockSize; + } else { + *actualBytesAdded = 0; + } + + if (needsFlush) + (void) FlushExtentFile(vcb); + + return err; +} + + + +//_________________________________________________________________________________ +// +// Routine: TruncateFileC +// +// Function: Truncates the disk space allocated to a file. The file space is +// truncated to a specified new PEOF rounded up to the next allocation +// block boundry. If the 'TFTrunExt' option is specified, the file is +// truncated to the end of the extent containing the new PEOF. +// +//_________________________________________________________________________________ + +OSErr TruncateFileC ( + ExtendedVCB *vcb, // volume that file resides on + FCB *fcb, // FCB of file to truncate + int64_t peof, // new physical size for file + int deleted, // if nonzero, the file's catalog record has already been deleted. + int rsrc, // does this represent a resource fork or not? + uint32_t fileid, // the fileid of the file we're manipulating. + Boolean truncateToExtent) // if true, truncate to end of extent containing newPEOF + +{ + OSErr err; + u_int32_t nextBlock; // next file allocation block to consider + u_int32_t startBlock; // Physical (volume) allocation block number of start of a range + u_int32_t physNumBlocks; // Number of allocation blocks in file (according to PEOF) + u_int32_t numBlocks; + HFSPlusExtentKey key; // key for current extent record; key->keyLength == 0 if FCB's extent record + u_int32_t hint; // BTree hint corresponding to key + HFSPlusExtentRecord extentRecord; + u_int32_t extentIndex; + u_int32_t extentNextBlock; + u_int32_t numExtentsPerRecord; + int64_t temp64; + u_int8_t forkType; + Boolean extentChanged; // true if we actually changed an extent + Boolean recordDeleted; // true if an extent record got deleted + + recordDeleted = false; + + if (vcb->vcbSigWord == kHFSPlusSigWord) { + numExtentsPerRecord = kHFSPlusExtentDensity; + } + else { + numExtentsPerRecord = kHFSExtentDensity; + } + + if (rsrc) { + forkType = kResourceForkType; + } + else { + forkType = kDataForkType; + } + + temp64 = fcb->ff_blocks; + physNumBlocks = (u_int32_t)temp64; + + // + // Round newPEOF up to a multiple of the allocation block size. If new size is + // two gigabytes or more, then round down by one allocation block (??? really? + // shouldn't that be an error?). + // + nextBlock = (uint32_t)howmany(peof, (int64_t)vcb->blockSize); // number of allocation blocks to remain in file + peof = (int64_t)((int64_t)nextBlock * (int64_t)vcb->blockSize); // number of bytes in those blocks + + // + // Update FCB's length + // + /* + * XXX Any errors could cause ff_blocks and c_blocks to get out of sync... + */ + numBlocks = (uint32_t)( peof / (int64_t)vcb->blockSize ); + if (!deleted) { + FTOC(fcb)->c_blocks -= (fcb->ff_blocks - numBlocks); + } + fcb->ff_blocks = numBlocks; + + // this catalog entry is modified and *must* get forced + // to disk when hfs_update() is called + if (!deleted) { + /* + * If the file is already C_NOEXISTS, then the catalog record + * has been removed from disk already. We wouldn't need to force + * another update + */ + FTOC(fcb)->c_flag |= C_MODIFIED; + } + // + // If the new PEOF is 0, then truncateToExtent has no meaning (we should always deallocate + // all storage). + // + if (peof == 0) { + int i; + + // Deallocate all the extents for this fork + err = DeallocateFork(vcb, fileid, forkType, fcb->fcbExtents, &recordDeleted); + if (err != noErr) goto ErrorExit; // got some error, so return it + + // Update the catalog extent record (making sure it's zeroed out) + if (err == noErr) { + for (i=0; i < kHFSPlusExtentDensity; i++) { + fcb->fcbExtents[i].startBlock = 0; + fcb->fcbExtents[i].blockCount = 0; + } + } + goto Done; + } + + // + // Find the extent containing byte (peof-1). This is the last extent we'll keep. + // (If truncateToExtent is true, we'll keep the whole extent; otherwise, we'll only + // keep up through peof). The search will tell us how many allocation blocks exist + // in the found extent plus all previous extents. + // + err = SearchExtentFile(vcb, fcb, peof-1, &key, extentRecord, &extentIndex, &hint, &extentNextBlock); + if (err != noErr) goto ErrorExit; + + extentChanged = false; // haven't changed the extent yet + + if (!truncateToExtent) { + // + // Shorten this extent. It may be the case that the entire extent gets + // freed here. + // + numBlocks = extentNextBlock - nextBlock; // How many blocks in this extent to free up + if (numBlocks != 0) { + // Compute first volume allocation block to free + startBlock = extentRecord[extentIndex].startBlock + extentRecord[extentIndex].blockCount - numBlocks; + // Free the blocks in bitmap + err = BlockDeallocate(vcb, startBlock, numBlocks, 0); + if (err != noErr) goto ErrorExit; + // Adjust length of this extent + extentRecord[extentIndex].blockCount -= numBlocks; + // If extent is empty, set start block to 0 + if (extentRecord[extentIndex].blockCount == 0) + extentRecord[extentIndex].startBlock = 0; + // Remember that we changed the extent record + extentChanged = true; + } + } + + // + // Now move to the next extent in the record, and set up the file allocation block number + // + nextBlock = extentNextBlock; // Next file allocation block to free + ++extentIndex; // Its index within the extent record + + // + // Release all following extents in this extent record. Update the record. + // + while (extentIndex < numExtentsPerRecord && extentRecord[extentIndex].blockCount != 0) { + numBlocks = extentRecord[extentIndex].blockCount; + // Deallocate this extent + err = BlockDeallocate(vcb, extentRecord[extentIndex].startBlock, numBlocks, 0); + if (err != noErr) goto ErrorExit; + // Update next file allocation block number + nextBlock += numBlocks; + // Zero out start and length of this extent to delete it from record + extentRecord[extentIndex].startBlock = 0; + extentRecord[extentIndex].blockCount = 0; + // Remember that we changed an extent + extentChanged = true; + // Move to next extent in record + ++extentIndex; + } + + // + // If any of the extents in the current record were changed, then update that + // record (in the FCB, or extents file). + // + if (extentChanged) { + err = UpdateExtentRecord(vcb, fcb, deleted, &key, extentRecord, hint); + if (err != noErr) goto ErrorExit; + } + + // + // If there are any following allocation blocks, then we need + // to seach for their extent records and delete those allocation + // blocks. + // + if (nextBlock < physNumBlocks) + err = TruncateExtents(vcb, forkType, fileid, nextBlock, &recordDeleted); + +Done: +ErrorExit: + if (recordDeleted) + (void) FlushExtentFile(vcb); + + return err; +} + + +/* + * HFS Plus only + * + */ +OSErr HeadTruncateFile ( + ExtendedVCB *vcb, + FCB *fcb, + u_int32_t headblks) +{ + HFSPlusExtentRecord extents; + HFSPlusExtentRecord tailExtents; + HFSCatalogNodeID fileID; + u_int8_t forkType; + u_int32_t blkcnt = 0; + u_int32_t startblk; + u_int32_t blksfreed; + int i, j; + int error = 0; + int lockflags; + + + if (vcb->vcbSigWord != kHFSPlusSigWord) + return (-1); + + forkType = FORK_IS_RSRC(fcb) ? kResourceForkType : kDataForkType; + fileID = FTOC(fcb)->c_fileid; + bzero(tailExtents, sizeof(tailExtents)); + + blksfreed = 0; + startblk = 0; + + /* + * Process catalog resident extents + */ + for (i = 0, j = 0; i < kHFSPlusExtentDensity; ++i) { + blkcnt = fcb->fcbExtents[i].blockCount; + if (blkcnt == 0) + break; /* end of extents */ + + if (blksfreed < headblks) { + error = BlockDeallocate(vcb, fcb->fcbExtents[i].startBlock, blkcnt, 0); + /* + * Any errors after the first BlockDeallocate + * must be ignored so we can put the file in + * a known state. + */ + if (error ) { + if (i == 0) + goto ErrorExit; /* uh oh */ + else { + error = 0; + LFHFS_LOG(LEVEL_ERROR , "HeadTruncateFile: problems deallocating %s (%d)\n",FTOC(fcb)->c_desc.cd_nameptr ? (const char *)FTOC(fcb)->c_desc.cd_nameptr : "", error); + } + } + + blksfreed += blkcnt; + fcb->fcbExtents[i].startBlock = 0; + fcb->fcbExtents[i].blockCount = 0; + } else { + tailExtents[j].startBlock = fcb->fcbExtents[i].startBlock; + tailExtents[j].blockCount = blkcnt; + ++j; + } + startblk += blkcnt; + } + + if (blkcnt == 0) + goto CopyExtents; + + lockflags = hfs_systemfile_lock(vcb, SFL_EXTENTS, HFS_EXCLUSIVE_LOCK); + + /* + * Process overflow extents + */ + for (;;) { + u_int32_t extblks; + + error = FindExtentRecord(vcb, forkType, fileID, startblk, false, NULL, extents, NULL); + if (error) { + /* + * Any errors after the first BlockDeallocate + * must be ignored so we can put the file in + * a known state. + */ + if (error != btNotFound) + LFHFS_LOG(LEVEL_ERROR , "HeadTruncateFile: problems finding extents %s (%d)\n", + FTOC(fcb)->c_desc.cd_nameptr ? (const char *)FTOC(fcb)->c_desc.cd_nameptr : "", error); + error = 0; + break; + } + + for(i = 0, extblks = 0; i < kHFSPlusExtentDensity; ++i) { + blkcnt = extents[i].blockCount; + if (blkcnt == 0) + break; /* end of extents */ + + if (blksfreed < headblks) { + error = BlockDeallocate(vcb, extents[i].startBlock, blkcnt, 0); + if (error) { + LFHFS_LOG(LEVEL_ERROR , "HeadTruncateFile: problems deallocating %s (%d)\n", + FTOC(fcb)->c_desc.cd_nameptr ? (const char *)FTOC(fcb)->c_desc.cd_nameptr : "", error); + } + blksfreed += blkcnt; + } else { + tailExtents[j].startBlock = extents[i].startBlock; + tailExtents[j].blockCount = blkcnt; + ++j; + } + extblks += blkcnt; + } + + error = DeleteExtentRecord(vcb, forkType, fileID, startblk); + if (error) { + LFHFS_LOG(LEVEL_ERROR , "HeadTruncateFile: problems deallocating %s (%d)\n", + FTOC(fcb)->c_desc.cd_nameptr ? (const char *)FTOC(fcb)->c_desc.cd_nameptr : "", error); + error = 0; + } + + if (blkcnt == 0) + break; /* all done */ + + startblk += extblks; + } + hfs_systemfile_unlock(vcb, lockflags); + +CopyExtents: + if (blksfreed) { + bcopy(tailExtents, fcb->fcbExtents, sizeof(tailExtents)); + blkcnt = fcb->ff_blocks - headblks; + FTOC(fcb)->c_blocks -= headblks; + fcb->ff_blocks = blkcnt; + + FTOC(fcb)->c_flag |= C_MODIFIED; + FTOC(fcb)->c_touch_chgtime = TRUE; + + (void) FlushExtentFile(vcb); + } + +ErrorExit: + return MacToVFSError(error); +} + +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã +// Routine: SearchExtentRecord (was XRSearch) +// +// Function: Searches extent record for the extent mapping a given file +// allocation block number (FABN). +// +// Input: searchFABN - desired FABN +// extentData - pointer to extent data record (xdr) +// extentDataStartFABN - beginning FABN for extent record +// +// Output: foundExtentDataOffset - offset to extent entry within xdr +// result = noErr, offset to extent mapping desired FABN +// result = FXRangeErr, offset to last extent in record +// endingFABNPlusOne - ending FABN +1 +// noMoreExtents - True if the extent was not found, and the +// extent record was not full (so don't bother +// looking in subsequent records); false otherwise. +// +// Result: noErr = ok +// FXRangeErr = desired FABN > last mapped FABN in record +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã + +static OSErr SearchExtentRecord( + ExtendedVCB *vcb, + u_int32_t searchFABN, + const HFSPlusExtentRecord extentData, + u_int32_t extentDataStartFABN, + u_int32_t *foundExtentIndex, + u_int32_t *endingFABNPlusOne, + Boolean *noMoreExtents) +{ + OSErr err = noErr; + u_int32_t extentIndex; + /* Set it to the HFS std value */ + u_int32_t numberOfExtents = kHFSExtentDensity; + u_int32_t numAllocationBlocks; + Boolean foundExtent; + + *endingFABNPlusOne = extentDataStartFABN; + *noMoreExtents = false; + foundExtent = false; + + /* Override numberOfExtents for HFS+/HFSX */ + numberOfExtents = kHFSPlusExtentDensity; + + for( extentIndex = 0; extentIndex < numberOfExtents; ++extentIndex ) + { + + // Loop over the extent record and find the search FABN. + + numAllocationBlocks = extentData[extentIndex].blockCount; + if ( numAllocationBlocks == 0 ) + { + break; + } + + *endingFABNPlusOne += numAllocationBlocks; + + if( searchFABN < *endingFABNPlusOne ) + { + // Found the extent. + foundExtent = true; + break; + } + } + + if( foundExtent ) + { + // Found the extent. Note the extent offset + *foundExtentIndex = extentIndex; + } + else + { + // Did not find the extent. Set foundExtentDataOffset accordingly + if( extentIndex > 0 ) + { + *foundExtentIndex = extentIndex - 1; + } + else + { + *foundExtentIndex = 0; + } + + // If we found an empty extent, then set noMoreExtents. + if (extentIndex < numberOfExtents) + *noMoreExtents = true; + + // Finally, return an error to the caller + err = fxRangeErr; + } + + return( err ); +} + +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã +// Routine: SearchExtentFile (was XFSearch) +// +// Function: Searches extent file (including the FCB resident extent record) +// for the extent mapping a given file position. +// +// Input: vcb - VCB pointer +// fcb - FCB pointer +// filePosition - file position (byte address) +// +// Output: foundExtentKey - extent key record (xkr) +// If extent was found in the FCB's resident extent record, +// then foundExtentKey->keyLength will be set to 0. +// foundExtentData - extent data record(xdr) +// foundExtentIndex - index to extent entry in xdr +// result = 0, offset to extent mapping desired FABN +// result = FXRangeErr, offset to last extent in record +// (i.e., kNumExtentsPerRecord-1) +// extentBTreeHint - BTree hint for extent record +// kNoHint = Resident extent record +// endingFABNPlusOne - ending FABN +1 +// +// Result: +// noErr Found an extent that contains the given file position +// FXRangeErr Given position is beyond the last allocated extent +// (other) (some other internal I/O error) +//ããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããããã +OSErr SearchExtentFile( + ExtendedVCB *vcb, + const FCB *fcb, + int64_t filePosition, + HFSPlusExtentKey *foundExtentKey, + HFSPlusExtentRecord foundExtentData, + u_int32_t *foundExtentIndex, + u_int32_t *extentBTreeHint, + u_int32_t *endingFABNPlusOne ) +{ + OSErr err; + u_int32_t filePositionBlock; + int64_t temp64; + Boolean noMoreExtents; + int lockflags; + + temp64 = filePosition / (int64_t)vcb->blockSize; + filePositionBlock = (u_int32_t)temp64; + + bcopy ( fcb->fcbExtents, foundExtentData, sizeof(HFSPlusExtentRecord)); + + // Search the resident FCB first. + err = SearchExtentRecord( vcb, filePositionBlock, foundExtentData, 0, + foundExtentIndex, endingFABNPlusOne, &noMoreExtents ); + + if( err == noErr ) { + // Found the extent. Set results accordingly + *extentBTreeHint = kNoHint; // no hint, because not in the BTree + foundExtentKey->keyLength = 0; // 0 = the FCB itself + + goto Exit; + } + + // Didn't find extent in FCB. If FCB's extent record wasn't full, there's no point + // in searching the extents file. Note that SearchExtentRecord left us pointing at + // the last valid extent (or the first one, if none were valid). This means we need + // to fill in the hint and key outputs, just like the "if" statement above. + if ( noMoreExtents ) { + *extentBTreeHint = kNoHint; // no hint, because not in the BTree + foundExtentKey->keyLength = 0; // 0 = the FCB itself + err = fxRangeErr; // There are no more extents, so must be beyond PEOF + goto Exit; + } + + // + // Find the desired record, or the previous record if it is the same fork + // + lockflags = hfs_systemfile_lock(vcb, SFL_EXTENTS, HFS_EXCLUSIVE_LOCK); + + err = FindExtentRecord(vcb, FORK_IS_RSRC(fcb) ? kResourceForkType : kDataForkType, + FTOC(fcb)->c_fileid, filePositionBlock, true, foundExtentKey, foundExtentData, extentBTreeHint); + hfs_systemfile_unlock(vcb, lockflags); + + if (err == btNotFound) { + // + // If we get here, the desired position is beyond the extents in the FCB, and there are no extents + // in the extents file. Return the FCB's extents and a range error. + // + *extentBTreeHint = kNoHint; + foundExtentKey->keyLength = 0; + (void)GetFCBExtentRecord(fcb, foundExtentData); + // Note: foundExtentIndex and endingFABNPlusOne have already been set as a result of the very + // first SearchExtentRecord call in this function (when searching in the FCB's extents, and + // we got a range error). + + return fxRangeErr; + } + + // + // If we get here, there was either a BTree error, or we found an appropriate record. + // If we found a record, then search it for the correct index into the extents. + // + if (err == noErr) { + // Find appropriate index into extent record + err = SearchExtentRecord(vcb, filePositionBlock, foundExtentData, foundExtentKey->startBlock, + foundExtentIndex, endingFABNPlusOne, &noMoreExtents); + } + +Exit: + return err; +} + + +//============================================================================ +// Routine: UpdateExtentRecord +// +// Function: Write new extent data to an existing extent record with a given key. +// If all of the extents are empty, and the extent record is in the +// extents file, then the record is deleted. +// +// Input: vcb - the volume containing the extents +// fcb - the file that owns the extents +// deleted - whether or not the file is already deleted +// extentFileKey - pointer to extent key record (xkr) +// If the key length is 0, then the extents are actually part +// of the catalog record, stored in the FCB. +// extentData - pointer to extent data record (xdr) +// extentBTreeHint - hint for given key, or kNoHint +// +// Result: noErr = ok +// (other) = error from BTree +//============================================================================ + +static OSErr UpdateExtentRecord (ExtendedVCB *vcb, FCB *fcb, int deleted, + const HFSPlusExtentKey *extentFileKey, + const HFSPlusExtentRecord extentData, + u_int32_t extentBTreeHint) +{ + OSErr err = noErr; + + if (extentFileKey->keyLength == 0) { // keyLength == 0 means the FCB's extent record + BlockMoveData(extentData, fcb->fcbExtents, sizeof(HFSPlusExtentRecord)); + if (!deleted) { + FTOC(fcb)->c_flag |= C_MODIFIED; + } + } + else { + BTreeIterator *btIterator = NULL; + FSBufferDescriptor btRecord; + u_int16_t btRecordSize; + FCB * btFCB; + int lockflags; + + // + // Need to find and change a record in Extents BTree + // + btFCB = GetFileControlBlock(vcb->extentsRefNum); + + btIterator = hfs_mallocz(sizeof(BTreeIterator)); + + /* + * The lock taken by callers of ExtendFileC/TruncateFileC is + * speculative and only occurs when the file already has + * overflow extents. So we need to make sure we have the lock + * here. The extents btree lock can be nested (its recursive) + * so we always take it here. + */ + lockflags = hfs_systemfile_lock(vcb, SFL_EXTENTS, HFS_EXCLUSIVE_LOCK); + + /* HFS+/HFSX */ + HFSPlusExtentRecord foundData; // The extent data actually found + + BlockMoveData(extentFileKey, &btIterator->key, sizeof(HFSPlusExtentKey)); + + btIterator->hint.index = 0; + btIterator->hint.nodeNum = extentBTreeHint; + + btRecord.bufferAddress = &foundData; + btRecord.itemSize = sizeof(HFSPlusExtentRecord); + btRecord.itemCount = 1; + + err = BTSearchRecord(btFCB, btIterator, &btRecord, &btRecordSize, btIterator); + + if (err == noErr) { + BlockMoveData(extentData, &foundData, sizeof(HFSPlusExtentRecord)); + err = BTReplaceRecord(btFCB, btIterator, &btRecord, btRecordSize); + } + (void) BTFlushPath(btFCB); + + hfs_systemfile_unlock(vcb, lockflags); + + hfs_free(btIterator); + } + + return err; +} + +static OSErr GetFCBExtentRecord( + const FCB *fcb, + HFSPlusExtentRecord extents) +{ + + BlockMoveData(fcb->fcbExtents, extents, sizeof(HFSPlusExtentRecord)); + + return noErr; +} + + +//_________________________________________________________________________________ +// +// Routine: ExtentsAreIntegral +// +// Purpose: Ensure that each extent can hold an integral number of nodes +// Called by the NodesAreContiguous function +//_________________________________________________________________________________ + +static Boolean ExtentsAreIntegral( + const HFSPlusExtentRecord extentRecord, + u_int32_t mask, + u_int32_t *blocksChecked, + Boolean *checkedLastExtent) +{ + u_int32_t blocks; + u_int32_t extentIndex; + + *blocksChecked = 0; + *checkedLastExtent = false; + + for(extentIndex = 0; extentIndex < kHFSPlusExtentDensity; extentIndex++) + { + blocks = extentRecord[extentIndex].blockCount; + + if ( blocks == 0 ) + { + *checkedLastExtent = true; + break; + } + + *blocksChecked += blocks; + + if (blocks & mask) + return false; + } + + return true; +} + + +//_________________________________________________________________________________ +// +// Routine: NodesAreContiguous +// +// Purpose: Ensure that all b-tree nodes are contiguous on disk +// Called by BTOpenPath during volume mount +//_________________________________________________________________________________ + +Boolean NodesAreContiguous( + ExtendedVCB *vcb, + FCB *fcb, + u_int32_t nodeSize) +{ + u_int32_t mask; + u_int32_t startBlock; + u_int32_t blocksChecked; + u_int32_t hint; + HFSPlusExtentKey key; + HFSPlusExtentRecord extents; + OSErr result; + Boolean lastExtentReached; + int lockflags; + + + if (vcb->blockSize >= nodeSize) + return TRUE; + + mask = (nodeSize / vcb->blockSize) - 1; + + // check the local extents + (void) GetFCBExtentRecord(fcb, extents); + if ( !ExtentsAreIntegral(extents, mask, &blocksChecked, &lastExtentReached) ) + return FALSE; + + if ( lastExtentReached || + (int64_t)((int64_t)blocksChecked * (int64_t)vcb->blockSize) >= (int64_t)fcb->ff_size) + return TRUE; + + startBlock = blocksChecked; + + lockflags = hfs_systemfile_lock(vcb, SFL_EXTENTS, HFS_EXCLUSIVE_LOCK); + + // check the overflow extents (if any) + while ( !lastExtentReached ) + { + result = FindExtentRecord(vcb, kDataForkType, fcb->ff_cp->c_fileid, startBlock, FALSE, &key, extents, &hint); + if (result) break; + + if ( !ExtentsAreIntegral(extents, mask, &blocksChecked, &lastExtentReached) ) { + hfs_systemfile_unlock(vcb, lockflags); + return FALSE; + } + startBlock += blocksChecked; + } + hfs_systemfile_unlock(vcb, lockflags); + return TRUE; +} diff --git a/livefiles_hfs_plugin/lf_hfs_file_extent_mapping.h b/livefiles_hfs_plugin/lf_hfs_file_extent_mapping.h new file mode 100644 index 0000000..be37956 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_file_extent_mapping.h @@ -0,0 +1,60 @@ +// +// lf_hfs_file_extent_mapping.h +// hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#ifndef lf_hfs_file_extent_mapping_h +#define lf_hfs_file_extent_mapping_h + +/* File Extent Mapping routines*/ +OSErr FlushExtentFile( ExtendedVCB *vcb ); + +int32_t CompareExtentKeysPlus( const HFSPlusExtentKey *searchKey, const HFSPlusExtentKey *trialKey ); + +OSErr SearchExtentFile( ExtendedVCB *vcb, + const FCB *fcb, + int64_t filePosition, + HFSPlusExtentKey *foundExtentKey, + HFSPlusExtentRecord foundExtentData, + u_int32_t *foundExtentDataIndex, + u_int32_t *extentBTreeHint, + u_int32_t *endingFABNPlusOne ); + +OSErr TruncateFileC( ExtendedVCB *vcb, + FCB *fcb, + int64_t peof, + int deleted, + int rsrc, + uint32_t fileid, + Boolean truncateToExtent ); + +OSErr ExtendFileC( ExtendedVCB *vcb, + FCB *fcb, + int64_t bytesToAdd, + u_int32_t blockHint, + u_int32_t flags, + int64_t *actualBytesAdded ); + +OSErr MapFileBlockC( ExtendedVCB *vcb, + FCB *fcb, + size_t numberOfBytes, + off_t offset, + daddr64_t *startBlock, + size_t *availableBytes ); + +OSErr HeadTruncateFile( ExtendedVCB *vcb, + FCB *fcb, + u_int32_t headblks ); + +int AddFileExtent( ExtendedVCB *vcb, + FCB *fcb, + u_int32_t startBlock, + u_int32_t blockCount ); + +Boolean NodesAreContiguous( ExtendedVCB *vcb, + FCB *fcb, + u_int32_t nodeSize ); + +#endif /* lf_hfs_file_extent_mapping_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_file_mgr_internal.h b/livefiles_hfs_plugin/lf_hfs_file_mgr_internal.h new file mode 100644 index 0000000..1d156ba --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_file_mgr_internal.h @@ -0,0 +1,250 @@ +// +// lf_hfs_file_mgr_internal.h +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#ifndef lf_hfs_file_mgr_internal_h +#define lf_hfs_file_mgr_internal_h + +#include +#include "lf_hfs.h" +#include "lf_hfs_defs.h" +#include "lf_hfs_format.h" +#include "lf_hfs_cnode.h" + + +/* CatalogNodeID is used to track catalog objects */ +typedef u_int32_t HFSCatalogNodeID; + +/* internal error codes*/ +#define ERR_BASE -32767 + +enum { + + /* FXM errors*/ + fxRangeErr = ERR_BASE + 16, /* file position beyond mapped range*/ + fxOvFlErr = ERR_BASE + 17, /* extents file overflow*/ + + /* Unicode errors*/ + uniTooLongErr = ERR_BASE + 24, /* Unicode string too long to convert to Str31*/ + uniBufferTooSmallErr = ERR_BASE + 25, /* Unicode output buffer too small*/ + uniNotMappableErr = ERR_BASE + 26, /* Unicode string can't be mapped to given script*/ + + /* BTree Manager errors*/ + btNotFound = ERR_BASE + 32, /* record not found*/ + btExists = ERR_BASE + 33, /* record already exists*/ + btNoSpaceAvail = ERR_BASE + 34, /* no available space*/ + btNoFit = ERR_BASE + 35, /* record doesn't fit in node */ + btBadNode = ERR_BASE + 36, /* bad node detected*/ + btBadHdr = ERR_BASE + 37, /* bad BTree header record detected*/ + dsBadRotate = ERR_BASE + 64, /* bad BTree rotate*/ + + /* Catalog Manager errors*/ + cmNotFound = ERR_BASE + 48, /* CNode not found*/ + cmExists = ERR_BASE + 49, /* CNode already exists*/ + cmNotEmpty = ERR_BASE + 50, /* directory CNode not empty (valence = 0)*/ + cmRootCN = ERR_BASE + 51, /* invalid reference to root CNode*/ + cmBadNews = ERR_BASE + 52, /* detected bad catalog structure*/ + cmFThdDirErr = ERR_BASE + 53, /* thread belongs to a directory not a file*/ + cmFThdGone = ERR_BASE + 54, /* file thread doesn't exist*/ + cmParentNotFound = ERR_BASE + 55, /* CNode for parent ID does not exist*/ + + /* TFS internal errors*/ + fsDSIntErr = -127 /* Internal file system error*/ +}; + + +/* internal flags*/ + +enum { + kEFAllMask = 0x01, /* allocate all requested bytes or none */ + kEFContigMask = 0x02, /* force contiguous allocation */ + kEFReserveMask = 0x04, /* keep block reserve */ + kEFDeferMask = 0x08, /* defer file block allocations */ + kEFNoClumpMask = 0x10, /* don't round up to clump size */ + kEFMetadataMask = 0x20, /* metadata allocation */ + + kTFTrunExtBit = 0, /* truncate to the extent containing new PEOF*/ + kTFTrunExtMask = 1 +}; + +enum { + kUndefinedStrLen = 0, /* Unknown string length */ + kNoHint = 0, + + /* FileIDs variables*/ + kNumExtentsToCache = 4 /* just guessing for ExchangeFiles*/ +}; + + +/* Universal Extent Key */ + +typedef union ExtentKey { + HFSExtentKey hfs; + HFSPlusExtentKey hfsPlus; +} ExtentKey; + +/* Universal extent descriptor */ +typedef union ExtentDescriptor { + HFSExtentDescriptor hfs; + HFSPlusExtentDescriptor hfsPlus; +} ExtentDescriptor; + +/* Universal extent record */ +typedef union ExtentRecord { + HFSExtentRecord hfs; + HFSPlusExtentRecord hfsPlus; +} ExtentRecord; + + +enum { + CMMaxCName = kHFSMaxFileNameChars +}; + + +/* Universal catalog name*/ +typedef union CatalogName { + Str31 pstr; + HFSUniStr255 ustr; +} CatalogName; + + +#define GetFileControlBlock(fref) VTOF((fref)) +#define GetFileRefNumFromFCB(fcb) FTOV((fcb)) + +#define ReturnIfError(result) do { if ( (result) != noErr ) return (result); } while(0) +#define ExitOnError(result) do { if ( (result) != noErr ) goto ErrorExit; } while(0) + + + +/* Catalog Manager Routines (IPI)*/ +OSErr ExchangeFileIDs( ExtendedVCB *volume, + ConstUTF8Param srcName, + ConstUTF8Param destName, + HFSCatalogNodeID srcID, + HFSCatalogNodeID destID, + u_int32_t srcHint, + u_int32_t destHint ); + +OSErr MoveData( ExtendedVCB *vcb, HFSCatalogNodeID srcID, HFSCatalogNodeID destID, int rsrc); + +/* BTree Manager Routines*/ +typedef int32_t (*KeyCompareProcPtr)(void *a, void *b); + +OSErr ReplaceBTreeRecord( FileReference refNum, + const void *key, + u_int32_t hint, + void *newData, + u_int16_t dataSize, + u_int32_t *newHint ); + + +/* Prototypes for exported routines in VolumeAllocation.c*/ + +/* + * Flags for BlockAllocate(), BlockDeallocate() and hfs_block_alloc. + * Some of these are for internal use only. See the comment at the + * top of hfs_alloc_int for more details on the semantics of these + * flags. + */ +#define HFS_ALLOC_FORCECONTIG 0x001 //force contiguous block allocation; minblocks must be allocated +#define HFS_ALLOC_METAZONE 0x002 //can use metazone blocks +#define HFS_ALLOC_SKIPFREEBLKS 0x004 //skip checking/updating freeblocks during alloc/dealloc +#define HFS_ALLOC_FLUSHTXN 0x008 //pick best fit for allocation, even if a jnl flush is req'd +#define HFS_ALLOC_TENTATIVE 0x010 //reserved allocation that can be claimed back +#define HFS_ALLOC_LOCKED 0x020 //reserved allocation that can't be claimed back +#define HFS_ALLOC_IGNORE_TENTATIVE 0x040 //Steal tentative blocks if necessary +#define HFS_ALLOC_IGNORE_RESERVED 0x080 //Ignore tentative/committed blocks +#define HFS_ALLOC_USE_TENTATIVE 0x100 //Use the supplied tentative range (if possible) +#define HFS_ALLOC_COMMIT 0x200 //Commit the supplied extent to disk +#define HFS_ALLOC_TRY_HARD 0x400 //Search hard to try and get maxBlocks; implies HFS_ALLOC_FLUSHTXN +#define HFS_ALLOC_ROLL_BACK 0x800 //Reallocate blocks that were just deallocated +//#define HFS_ALLOC_FAST_DEV 0x1000 //Prefer fast device for allocation + +typedef uint32_t hfs_block_alloc_flags_t; + + +OSErr BlockAllocate( ExtendedVCB *vcb, + u_int32_t startingBlock, + u_int32_t minBlocks, + u_int32_t maxBlocks, + hfs_block_alloc_flags_t flags, + u_int32_t *startBlock, + u_int32_t *actualBlocks ); + +struct rl_entry; +typedef struct hfs_alloc_extra_args { + // Used with HFS_ALLOC_TRY_HARD and HFS_ALLOC_FORCECONTIG + uint32_t max_blocks; + + // Used with with HFS_ALLOC_USE_TENTATIVE & HFS_ALLOC_COMMIT + struct rl_entry **reservation_in; + + // Used with HFS_ALLOC_TENTATIVE & HFS_ALLOC_LOCKED + struct rl_entry **reservation_out; + + /* + * If the maximum cannot be returned, the allocation will be + * trimmed to the specified alignment after taking + * @alignment_offset into account. @alignment and + * @alignment_offset are both in terms of blocks, *not* bytes. + * The result will be such that: + * + * (block_count + @alignment_offset) % @alignment == 0 + * + * Alignment is *not* guaranteed. + * + * One example where alignment might be useful is in the case + * where the page size is greater than the allocation block size + * and I/O is being performed in multiples of the page size. + */ + int alignment; + int alignment_offset; +} hfs_alloc_extra_args_t; + +/* + * Same as BlockAllocate but slightly different API. + * @extent.startBlock is a hint for where to start searching and + * @extent.blockCount is the minimum number of blocks acceptable. + * Additional arguments can be passed in @extra_args and use will + * depend on @flags. See comment at top of hfs_block_alloc_int for + * more information. + */ +errno_t hfs_block_alloc( hfsmount_t *hfsmp, + HFSPlusExtentDescriptor *extent, + hfs_block_alloc_flags_t flags, + hfs_alloc_extra_args_t *extra_args ); + +OSErr BlockDeallocate( ExtendedVCB *vcb, + u_int32_t firstBlock, + u_int32_t numBlocks, + hfs_block_alloc_flags_t flags ); + +OSErr BlockMarkAllocated( ExtendedVCB *vcb, u_int32_t startingBlock, u_int32_t numBlocks ); + +OSErr BlockMarkFree( ExtendedVCB *vcb, u_int32_t startingBlock, u_int32_t numBlocks ); + +OSErr BlockMarkFreeUnused( ExtendedVCB *vcb, u_int32_t startingBlock, u_int32_t numBlocks ); + +u_int32_t MetaZoneFreeBlocks( ExtendedVCB *vcb ); + +u_int32_t ScanUnmapBlocks( struct hfsmount *hfsmp ); + +int hfs_init_summary( struct hfsmount *hfsmp ); + +errno_t hfs_find_free_extents( struct hfsmount *hfsmp, void (*callback)(void *data, off_t), void *callback_arg ); + +void hfs_free_tentative( hfsmount_t *hfsmp, struct rl_entry **reservation ); + +void hfs_free_locked( hfsmount_t *hfsmp, struct rl_entry **reservation ); + +/* Get the current time in UTC (GMT)*/ +u_int32_t GetTimeUTC( void ); + +u_int32_t LocalToUTC( u_int32_t localTime ); + +u_int32_t UTCToLocal( u_int32_t utcTime ); + +#endif /* lf_hfs_file_mgr_internal_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_fileops_handler.c b/livefiles_hfs_plugin/lf_hfs_fileops_handler.c new file mode 100644 index 0000000..44aee23 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_fileops_handler.c @@ -0,0 +1,656 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_fileops_handler.c + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 31/12/2017. +*/ + +#include "lf_hfs_fileops_handler.h" +#include "lf_hfs_dirops_handler.h" +#include "lf_hfs.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_vnode.h" +#include "lf_hfs_raw_read_write.h" +#include "lf_hfs_vnops.h" +#include "lf_hfs_xattr.h" +#include "lf_hfs_cnode.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_file_extent_mapping.h" +#include "lf_hfs_readwrite_ops.h" +#include "lf_hfs_file_mgr_internal.h" + + +int LFHFS_Read ( UVFSFileNode psNode, uint64_t uOffset, size_t iLength, void *pvBuf, size_t *iActuallyRead ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Read (psNode %p, uOffset %llu, iLength %lu)\n", psNode, uOffset, iLength); + VERIFY_NODE_IS_VALID(psNode); + + struct vnode *vp = (vnode_t)psNode; + struct cnode *cp; + struct filefork *fp; + uint64_t filesize; + int retval = 0; + int took_truncate_lock = 0; + *iActuallyRead = 0; + + /* Preflight checks */ + if (!vnode_isreg(vp)) { + /* can only read regular files */ + return ( vnode_isdir(vp) ? EISDIR : EPERM ); + } + + cp = VTOC(vp); + fp = VTOF(vp); + + /* Protect against a size change. */ + hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT); + took_truncate_lock = 1; + + filesize = fp->ff_size; + /* + * Check the file size. Note that per POSIX spec, we return 0 at + * file EOF, so attempting a read at an offset that is too big + * should just return 0 on HFS+. Since the return value was initialized + * to 0 above, we just jump to exit. HFS Standard has its own behavior. + */ + if (uOffset > filesize) + { + LFHFS_LOG( LEVEL_ERROR, "LFHFS_Read: wanted offset is greater then file size\n" ); + goto exit; + } + + // If we asked to read above the file size, adjust the read size; + if ( uOffset + iLength > filesize ) + { + iLength = filesize - uOffset; + } + + uint64_t uReadStartCluster; + retval = raw_readwrite_read( vp, uOffset, pvBuf, iLength, iActuallyRead, &uReadStartCluster ); + + cp->c_touch_acctime = TRUE; + +exit: + if (took_truncate_lock) + { + hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); + } + return retval; +} + + +int LFHFS_Write ( UVFSFileNode psNode, uint64_t uOffset, size_t iLength, const void *pvBuf, size_t *iActuallyWrite ) +{ +#pragma unused (psNode, uOffset, iLength, pvBuf, iActuallyWrite) + + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Write (psNode %p, uOffset %llu, iLength %lu)\n", psNode, uOffset, iLength); + VERIFY_NODE_IS_VALID(psNode); + + *iActuallyWrite = 0; + struct vnode *vp = (vnode_t)psNode; + struct cnode *cp; + struct filefork *fp; + struct hfsmount *hfsmp; + off_t origFileSize; + off_t writelimit; + off_t bytesToAdd = 0; + off_t actualBytesAdded; + off_t filebytes; + int eflags = kEFReserveMask; + int retval = 0; + int lockflags; + int cnode_locked = 0; + + int took_truncate_lock = 0; + size_t iActualLengthToWrite = iLength; + + if (!vnode_isreg(vp)) + { + return ( vnode_isdir(vp) ? EISDIR : EPERM ); /* Can only write regular files */ + } + + cp = VTOC(vp); + fp = VTOF(vp); + hfsmp = VTOHFS(vp); + + /* + * Protect against a size change. + */ + hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT); + took_truncate_lock = 1; + + origFileSize = fp->ff_size; + writelimit = uOffset + iLength; + + /* + * We may need an exclusive truncate lock for several reasons, all + * of which are because we may be writing to a (portion of a) block + * for the first time, and we need to make sure no readers see the + * prior, uninitialized contents of the block. The cases are: + * + * 1. We have unallocated (delayed allocation) blocks. We may be + * allocating new blocks to the file and writing to them. + * (A more precise check would be whether the range we're writing + * to contains delayed allocation blocks.) + * 2. We need to extend the file. The bytes between the old EOF + * and the new EOF are not yet initialized. This is important + * even if we're not allocating new blocks to the file. If the + * old EOF and new EOF are in the same block, we still need to + * protect that range of bytes until they are written for the + * first time. + * + * If we had a shared lock with the above cases, we need to try to upgrade + * to an exclusive lock. If the upgrade fails, we will lose the shared + * lock, and will need to take the truncate lock again; the took_truncate_lock + * flag will still be set, causing us to try for an exclusive lock next time. + */ + if ((cp->c_truncatelockowner == HFS_SHARED_OWNER) && + ((fp->ff_unallocblocks != 0) || + (writelimit > origFileSize))) + { + lf_lck_rw_lock_shared_to_exclusive(&cp->c_truncatelock); + /* Store the owner in the c_truncatelockowner field if we successfully upgrade */ + cp->c_truncatelockowner = pthread_self(); + } + + if ( (retval = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) { + goto exit; + } + cnode_locked = 1; + + filebytes = blk_to_bytes(fp->ff_blocks, hfsmp->blockSize); + + if ((off_t)uOffset > filebytes + && (blk_to_bytes(hfs_freeblks(hfsmp, ISSET(eflags, kEFReserveMask)) , hfsmp->blockSize) < (off_t)uOffset - filebytes)) + { + retval = ENOSPC; + goto exit; + } + + /* Check if we do not need to extend the file */ + if (writelimit <= filebytes) { + goto sizeok; + } + + bytesToAdd = writelimit - filebytes; + if (hfs_start_transaction(hfsmp) != 0) { + retval = EINVAL; + goto exit; + } + + while (writelimit > filebytes) + { + bytesToAdd = writelimit - filebytes; + + /* Protect extents b-tree and allocation bitmap */ + lockflags = SFL_BITMAP; + if (overflow_extents(fp)) + lockflags |= SFL_EXTENTS; + lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK); + + retval = MacToVFSError(ExtendFileC (hfsmp, (FCB*)fp, bytesToAdd, + 0, eflags, &actualBytesAdded)); + + hfs_systemfile_unlock(hfsmp, lockflags); + + if ((actualBytesAdded == 0) && (retval == E_NONE)) + retval = ENOSPC; + if (retval != E_NONE) + break; + filebytes = (off_t)fp->ff_blocks * (off_t)hfsmp->blockSize; + } + + (void) hfs_update(vp, 0); + (void) hfs_volupdate(hfsmp, VOL_UPDATE, 0); + (void) hfs_end_transaction(hfsmp); + + /* + * If we didn't grow the file enough try a partial write. + * POSIX expects this behavior. + */ + if ((retval == ENOSPC) && (filebytes > (off_t)uOffset)) { + retval = 0; + iActualLengthToWrite -= bytesToAdd; + writelimit = filebytes; + } +sizeok: + if (retval == E_NONE) { + off_t filesize; + + if (writelimit > fp->ff_size) { + filesize = writelimit; + struct timeval tv; + rl_add(fp->ff_size, writelimit - 1 , &fp->ff_invalidranges); + microuptime(&tv); + cp->c_zftimeout = (uint32_t)(tv.tv_sec + ZFTIMELIMIT); + } else + filesize = fp->ff_size; + + + // Fill last cluster with zeros. + if ( origFileSize < (off_t)uOffset ) + { + raw_readwrite_zero_fill_last_block_suffix(vp); + } + + if (filesize > fp->ff_size) { + fp->ff_new_size = filesize; + } + + uint64_t uActuallyWritten; + retval = raw_readwrite_write(vp, uOffset, (void*)pvBuf, iActualLengthToWrite, &uActuallyWritten); + *iActuallyWrite = uActuallyWritten; + if (retval) { + fp->ff_new_size = 0; /* no longer extending; use ff_size */ + goto ioerr_exit; + } + + if (filesize > origFileSize) { + fp->ff_size = filesize; + } + fp->ff_new_size = 0; /* ff_size now has the correct size */ + } + + hfs_flush(hfsmp, HFS_FLUSH_CACHE); + +ioerr_exit: + if (!cnode_locked) + { + hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS); + cnode_locked = 1; + } + + if (*iActuallyWrite > 0) + { + cp->c_flag |= C_MODIFIED; + cp->c_touch_chgtime = TRUE; + cp->c_touch_modtime = TRUE; + hfs_incr_gencount(cp); + } + if (retval) + { + (void)hfs_truncate(vp, origFileSize, IO_SYNC, 0); + } + else if (*iActuallyWrite > 0) + { + retval = hfs_update(vp, 0); + } + + /* Updating vcbWrCnt doesn't need to be atomic. */ + hfsmp->vcbWrCnt++; + +exit: + if (retval && took_truncate_lock + && cp->c_truncatelockowner == pthread_self()) { + fp->ff_new_size = 0; + rl_remove(fp->ff_size, RL_INFINITY, &fp->ff_invalidranges); + } + + if (cnode_locked) { + hfs_unlock(cp); + } + + if (took_truncate_lock) { + hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); + } + + return (retval); +} + +int LFHFS_Create ( UVFSFileNode psNode, const char *pcName, const UVFSFileAttributes *psAttr, UVFSFileNode *ppsOutNode ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Create\n"); + VERIFY_NODE_IS_VALID(psNode); + + int iError = 0; + vnode_t psParentVnode = (vnode_t)psNode; + + if (!vnode_isdir(psParentVnode)) + { + iError = ENOTDIR; + goto exit; + } + + //@param cnp Name information for new directory. + struct componentname sNewFileComponentName = {0}; + sNewFileComponentName.cn_nameptr = (char*) pcName; + sNewFileComponentName.cn_namelen = (int) strlen(pcName); + + iError = hfs_vnop_create(psParentVnode, (vnode_t*)ppsOutNode, &sNewFileComponentName, (UVFSFileAttributes *) psAttr); + if (iError) + goto exit; + + //Since hfs_vnop_create doesn’t allocate clusters for new files. + //In case of non-zero given size, we need to call setAttr, after successfully creating the file. + if ((psAttr->fa_validmask & UVFS_FA_VALID_SIZE) != 0 && psAttr->fa_size != 0) + { + iError = hfs_vnop_setattr( (vnode_t) *ppsOutNode, psAttr ); + //In case of a failure in setAttr, need to remove the created file + if (iError) + { + DIROPS_RemoveInternal(psParentVnode, pcName); + LFHFS_Reclaim((vnode_t) *ppsOutNode); + } + } + +exit: + return iError; +} + +int LFHFS_GetAttr ( UVFSFileNode psNode, UVFSFileAttributes *psOutAttr ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_GetAttr\n"); + VERIFY_NODE_IS_VALID(psNode); + + int iErr = 0; + vnode_t vp = (vnode_t)psNode; + + hfs_lock(VTOC(vp),0,0); + vnode_GetAttrInternal(vp, psOutAttr); + hfs_unlock(VTOC(vp)); + + return iErr; +} + +int LFHFS_SetAttr ( UVFSFileNode psNode, const UVFSFileAttributes *psSetAttr, UVFSFileAttributes *psOutAttr ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_SetAttr\n"); + VERIFY_NODE_IS_VALID(psNode); + + vnode_t psVnode = (vnode_t)psNode; + + int iErr = hfs_vnop_setattr( psVnode, psSetAttr ); + if ( iErr != 0 ) + { + goto exit; + } + + iErr = LFHFS_GetAttr( psNode, psOutAttr ); + +exit: + return iErr; +} + +int LFHFS_Reclaim ( UVFSFileNode psNode ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Reclaim\n"); + + int iErr = 0; + vnode_t vp = (vnode_t)psNode; + + if ( psNode != NULL ) + { + VERIFY_NODE_IS_VALID_FOR_RECLAIM(psNode); + + iErr = hfs_vnop_reclaim(vp); + psNode = NULL; + } + + return iErr; +} + +int LFHFS_ReadLink ( UVFSFileNode psNode, void *pvOutBuf, size_t iBufSize, size_t *iActuallyRead, UVFSFileAttributes *psOutAttr ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_ReadLink\n"); + VERIFY_NODE_IS_VALID(psNode); + + int iErr = 0; + *iActuallyRead = 0; + vnode_t vp = (vnode_t)psNode; + + iErr = hfs_vnop_readlink(vp, pvOutBuf, iBufSize, iActuallyRead); + if ( iErr != 0 ) + { + goto exit; + } + + iErr = LFHFS_GetAttr( psNode, psOutAttr ); + if ( iErr != 0 ) + { + goto exit; + } + +exit: + return iErr; +} + +int LFHFS_SymLink ( UVFSFileNode psNode, const char *pcName, const char *psContent, const UVFSFileAttributes *psAttr, UVFSFileNode *ppsOutNode ) +{ + VERIFY_NODE_IS_VALID(psNode); + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_SymLink\n"); + + int iErr = 0; + vnode_t psParentVnode = (vnode_t)psNode; + + if (!vnode_isdir(psParentVnode)) + { + iErr = ENOTDIR; + goto exit; + } + + vnode_t psSymLinkVnode = {0}; + struct componentname sCompName = {0}; + sCompName.cn_nameiop = CREATE; + sCompName.cn_flags = ISLASTCN; + sCompName.cn_pnbuf = (char *)pcName; + sCompName.cn_pnlen = (int)strlen(pcName); + sCompName.cn_nameptr = (char *)pcName; + sCompName.cn_namelen = (int)strlen(pcName); + sCompName.cn_hash = 0; + sCompName.cn_consume = (int)strlen(pcName); + + iErr = hfs_vnop_symlink( psParentVnode, &psSymLinkVnode, &sCompName, (char*)psContent, (UVFSFileAttributes *)psAttr ); + + *ppsOutNode = (UVFSFileNode)psSymLinkVnode; + +exit: + return iErr; +} + +int LFHFS_Rename (UVFSFileNode psFromDirNode, UVFSFileNode psFromNode, const char *pcFromName, UVFSFileNode psToDirNode, UVFSFileNode psToNode, const char *pcToName, uint32_t flags __unused) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Rename\n"); + + VERIFY_NODE_IS_VALID(psFromDirNode); + VERIFY_NODE_IS_VALID(psToDirNode); + if ( psFromNode != NULL ) + { + VERIFY_NODE_IS_VALID(psFromNode); + } + if ( psToNode != NULL ) + { + VERIFY_NODE_IS_VALID(psToNode); + } + + int iErr = 0; + vnode_t psFromParentVnode = (vnode_t)psFromDirNode; + vnode_t psToParentVnode = (vnode_t)psToDirNode; + + if (!vnode_isdir(psFromParentVnode) || !vnode_isdir(psToParentVnode)) + { + iErr = ENOTDIR; + goto exit; + } + + UVFSFileNode psFromFileNode = {0}; + UVFSFileNode psToFileNode = {0}; + bool bGotFromNode = (psFromNode != NULL); + bool bGotToNode = (psToNode != NULL); + + vnode_t psFromVnode = (vnode_t) psFromNode; + + if (!bGotFromNode) + { + iErr = DIROPS_LookupInternal( psFromDirNode, pcFromName, &psFromFileNode ); + if ( iErr != 0 ) + { + goto exit; + } + psFromVnode = (vnode_t)psFromFileNode; + } + + vnode_t psToVnode = psToNode; + if (!bGotToNode) + { + iErr = DIROPS_LookupInternal( psToDirNode, pcToName, &psToFileNode ); + if ( !iErr ) + { + psToVnode = (vnode_t)psToFileNode; + } + else if (iErr != ENOENT) + { + goto exit; + } + } + + // If only one of the vnodes is of type directory, + // we can't allow the rename + if (psToVnode) + { + if (vnode_isdir(psFromVnode) && !vnode_isdir(psToVnode)) + { + iErr = ENOTDIR; + goto exit; + } + + if (!vnode_isdir(psFromVnode) && vnode_isdir(psToVnode)) + { + iErr = EISDIR; + goto exit; + } + } + struct componentname sFromCompName = {0}; + sFromCompName.cn_nameiop = RENAME; + sFromCompName.cn_flags = ISLASTCN; + sFromCompName.cn_pnbuf = (char *)pcFromName; + sFromCompName.cn_pnlen = (int)strlen(pcFromName); + sFromCompName.cn_nameptr = (char *)pcFromName; + sFromCompName.cn_namelen = (int)strlen(pcFromName); + sFromCompName.cn_hash = 0; + sFromCompName.cn_consume = (int)strlen(pcFromName); + + struct componentname sToCompName = {0}; + sToCompName.cn_nameiop = RENAME; + sToCompName.cn_flags = ISLASTCN; + sToCompName.cn_pnbuf = (char *)pcToName; + sToCompName.cn_pnlen = (int)strlen(pcToName); + sToCompName.cn_nameptr = (char *)pcToName; + sToCompName.cn_namelen = (int)strlen(pcToName); + sToCompName.cn_hash = 0; + sToCompName.cn_consume = (int)strlen(pcToName); + + iErr = hfs_vnop_renamex(psFromParentVnode, psFromVnode, &sFromCompName, psToParentVnode, psToVnode, &sToCompName); + + if (!bGotFromNode) + LFHFS_Reclaim(psFromVnode); + if (!bGotToNode && psToVnode) + LFHFS_Reclaim(psToVnode); + +exit: + return iErr; +} + +int LFHFS_Link ( UVFSFileNode psFromNode, UVFSFileNode psToDirNode, const char *pcToName, UVFSFileAttributes* psOutFileAttrs, UVFSFileAttributes* psOutDirAttrs ) +{ + VERIFY_NODE_IS_VALID(psFromNode); + VERIFY_NODE_IS_VALID(psToDirNode); + + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Link\n"); + int iErr = 0; + + vnode_t psFromVnode = (vnode_t)psFromNode; + vnode_t psToDirVnode = (vnode_t)psToDirNode; + + if (!vnode_isdir(psToDirVnode)) + { + return ENOTDIR; + } + + /* Preflight checks */ + if (!vnode_isreg(psFromVnode)) { + /* can only create hardlinks for regular files */ + return ( vnode_isdir(psFromVnode) ? EISDIR : EPERM ); + } + + struct componentname sToCompName = {0}; + sToCompName.cn_nameiop = CREATE; + sToCompName.cn_flags = ISLASTCN; + sToCompName.cn_pnbuf = (char *)pcToName; + sToCompName.cn_pnlen = (int)strlen(pcToName); + sToCompName.cn_nameptr = (char *)pcToName; + sToCompName.cn_namelen = (int)strlen(pcToName); + sToCompName.cn_hash = 0; + sToCompName.cn_consume = (int)strlen(pcToName); + + iErr = hfs_vnop_link(psFromVnode, psToDirVnode, &sToCompName); + if ( iErr != 0 ) + { + goto exit; + } + + iErr = LFHFS_GetAttr( psFromNode, psOutFileAttrs ); + if ( iErr != 0 ) + { + LFHFS_LOG(LEVEL_ERROR, "LFHFS_Link: Failed in getting FromNode Attr\n"); + goto exit; + } + + iErr = LFHFS_GetAttr( psToDirNode, psOutDirAttrs ); + if ( iErr != 0 ) + { + LFHFS_LOG(LEVEL_ERROR, "LFHFS_Link: Failed in getting ToDir Attr\n"); + goto exit; + } + +exit: + return iErr; +} + +int LFHFS_GetXAttr ( UVFSFileNode psNode, const char *pcAttr, void *pvOutBuf, size_t iBufSize, size_t *iActualSize ) +{ + int iErr = 0; + + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_GetXAttr\n"); + + VERIFY_NODE_IS_VALID(psNode); + + iErr = hfs_vnop_getxattr((vnode_t)psNode, pcAttr, pvOutBuf, iBufSize, iActualSize); + + return iErr; +} + +int LFHFS_SetXAttr ( UVFSFileNode psNode, const char *pcAttr, const void *pvInBuf, size_t iBufSize, UVFSXattrHow How ) +{ + int iErr = 0; + + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_SetXAttr\n"); + + VERIFY_NODE_IS_VALID(psNode); + + if (How == UVFSXattrHowRemove) + { + iErr = hfs_vnop_removexattr((vnode_t)psNode, pcAttr); + } + else + { + iErr = hfs_vnop_setxattr((vnode_t)psNode, pcAttr, pvInBuf, iBufSize, How); + } + + return iErr; +} + +int LFHFS_ListXAttr ( UVFSFileNode psNode, void *pvOutBuf, size_t iBufSize, size_t *iActualSize ) +{ + int iErr = 0; + + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_ListXAttr\n"); + + VERIFY_NODE_IS_VALID(psNode); + + iErr = hfs_vnop_listxattr((vnode_t)psNode, pvOutBuf, iBufSize, iActualSize); + + return iErr; +} diff --git a/livefiles_hfs_plugin/lf_hfs_fileops_handler.h b/livefiles_hfs_plugin/lf_hfs_fileops_handler.h new file mode 100644 index 0000000..bf709a7 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_fileops_handler.h @@ -0,0 +1,66 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_fileops_handler.h + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 31/12/2017. + */ + +#ifndef lf_hfs_fileops_handler_h +#define lf_hfs_fileops_handler_h + +#include "lf_hfs_common.h" + +#define VALID_IN_ATTR_MASK ( UVFS_FA_VALID_TYPE | \ + UVFS_FA_VALID_MODE | \ + UVFS_FA_VALID_NLINK | \ + UVFS_FA_VALID_UID | \ + UVFS_FA_VALID_GID | \ + UVFS_FA_VALID_BSD_FLAGS | \ + UVFS_FA_VALID_SIZE | \ + UVFS_FA_VALID_ALLOCSIZE | \ + UVFS_FA_VALID_FILEID | \ + UVFS_FA_VALID_PARENTID | \ + UVFS_FA_VALID_ATIME | \ + UVFS_FA_VALID_MTIME | \ + UVFS_FA_VALID_CTIME | \ + UVFS_FA_VALID_BIRTHTIME) + +#define VALID_OUT_ATTR_MASK ( UVFS_FA_VALID_TYPE | \ + UVFS_FA_VALID_MODE | \ + UVFS_FA_VALID_NLINK | \ + UVFS_FA_VALID_UID | \ + UVFS_FA_VALID_GID | \ + UVFS_FA_VALID_BSD_FLAGS | \ + UVFS_FA_VALID_SIZE | \ + UVFS_FA_VALID_ALLOCSIZE | \ + UVFS_FA_VALID_FILEID | \ + UVFS_FA_VALID_PARENTID | \ + UVFS_FA_VALID_ATIME | \ + UVFS_FA_VALID_MTIME | \ + UVFS_FA_VALID_CTIME | \ + UVFS_FA_VALID_BIRTHTIME) + +#define READ_ONLY_FA_FIELDS ( UVFS_FA_VALID_TYPE | \ + UVFS_FA_VALID_NLINK | \ + UVFS_FA_VALID_ALLOCSIZE | \ + UVFS_FA_VALID_FILEID | \ + UVFS_FA_VALID_PARENTID | \ + UVFS_FA_VALID_CTIME ) + +int LFHFS_Read ( UVFSFileNode psNode, uint64_t uOffset, size_t iLength, void *pvBuf, size_t *iActuallyRead ); +int LFHFS_Write ( UVFSFileNode psNode, uint64_t uOffset, size_t iLength, const void *pvBuf, size_t *iActuallyWrite ); +int LFHFS_Create ( UVFSFileNode psNode, const char *pcName, const UVFSFileAttributes *psAttr, UVFSFileNode *ppsOutNode ); +int LFHFS_GetAttr ( UVFSFileNode psNode, UVFSFileAttributes *psOutAttr ); +int LFHFS_SetAttr ( UVFSFileNode psNode, const UVFSFileAttributes *psSetAttr, UVFSFileAttributes *psOutAttr ); +int LFHFS_Reclaim ( UVFSFileNode psNode ); +int LFHFS_ReadLink ( UVFSFileNode psNode, void *pvOutBuf, size_t iBufSize, size_t *iActuallyRead, UVFSFileAttributes *psOutAttr ); +int LFHFS_SymLink ( UVFSFileNode psNode, const char *pcName, const char *psContent, const UVFSFileAttributes *psAttr, UVFSFileNode *ppsOutNode ); +int LFHFS_Rename ( UVFSFileNode psFromDirNode, UVFSFileNode psFromNode, const char *pcFromName, UVFSFileNode psToDirNode, UVFSFileNode psToNode, const char *pcToName, uint32_t flags); +int LFHFS_Link ( UVFSFileNode psFromNode, UVFSFileNode psToDirNode, const char *pcToName, UVFSFileAttributes* psOutFileAttrs, UVFSFileAttributes* psOutDirAttrs ); + +int LFHFS_GetXAttr ( UVFSFileNode psNode, const char *pcAttr, void *pvOutBuf, size_t iBufSize, size_t *iActualSize ); +int LFHFS_SetXAttr ( UVFSFileNode psNode, const char *pcAttr, const void *pvInBuf, size_t iBufSize, UVFSXattrHow How ); +int LFHFS_ListXAttr ( UVFSFileNode psNode, void *pvOutBuf, size_t iBufSize, size_t *iActualSize ); + +#endif /* lf_hfs_fileops_handler_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_format.h b/livefiles_hfs_plugin/lf_hfs_format.h new file mode 100644 index 0000000..143449b --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_format.h @@ -0,0 +1,623 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_format.h + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#ifndef lf_hfs_format_h +#define lf_hfs_format_h + +#include +#include "lf_hfs_common.h" + +/* + * Files in the "HFS+ Private Data" folder have one of the following prefixes + * followed by a decimal number (no leading zeros) for the file ID. + * + * Note: Earlier version of Mac OS X used a 32 bit random number for the link + * ref number instead of the file id. + * + * e.g. iNode7182000 and temp3296 + */ +#define HFS_INODE_PREFIX "iNode" +#define HFS_DELETE_PREFIX "temp" + +/* + * Files in the ".HFS+ Private Directory Data" folder have the following + * prefix followed by a decimal number (no leading zeros) for the file ID. + * + * e.g. dir_555 + */ +#define HFS_DIRINODE_PREFIX "dir_" + +/* + * Atrributes B-tree Data Record + * + * For small attributes, whose entire value is stored + * within a single B-tree record. + */ +struct HFSPlusAttrData { + u_int32_t recordType; /* == kHFSPlusAttrInlineData */ + u_int32_t reserved[2]; + u_int32_t attrSize; /* size of attribute data in bytes */ + u_int8_t attrData[2]; /* variable length */ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusAttrData HFSPlusAttrData; + +/* + * Hardlink inodes save the head of the link chain in + * an extended attribute named FIRST_LINK_XATTR_NAME. + * The attribute data is the decimal value in ASCII + * of the cnid for the first link in the chain. + * + * This extended attribute is private (i.e. its not + * exported in the getxattr/listxattr POSIX APIs). + */ +#define FIRST_LINK_XATTR_NAME "com.apple.system.hfs.firstlink" +#define FIRST_LINK_XATTR_REC_SIZE (sizeof(HFSPlusAttrData) - 2 + 12) + +/* + * Mac OS X has two special directories on HFS+ volumes for hardlinked files + * and hardlinked directories as well as for open-unlinked files. + * + * These directories and their contents are not exported from the filesystem + * under Mac OS X. + */ +#define HFSPLUSMETADATAFOLDER "\xE2\x90\x80\xE2\x90\x80\xE2\x90\x80\xE2\x90\x80HFS+ Private Data" +#define HFSPLUS_DIR_METADATA_FOLDER ".HFS+ Private Directory Data\xd" + + +/* Signatures used to differentiate between HFS and HFS Plus volumes */ +enum { + kHFSSigWord = 0x4244, /* 'BD' in ASCII */ + kHFSPlusSigWord = 0x482B, /* 'H+' in ASCII */ + kHFSXSigWord = 0x4858, /* 'HX' in ASCII */ + kHFSPlusVersion = 0x0004, /* 'H+' volumes are version 4 only */ + kHFSXVersion = 0x0005, /* 'HX' volumes start with version 5 */ + kHFSPlusMountVersion = 0x31302E30, /* '10.0' for Mac OS X */ + kHFSJMountVersion = 0x4846534a, /* 'HFSJ' for journaled HFS+ on OS X */ + kFSKMountVersion = 0x46534b21 /* 'FSK!' for failed journal replay */ +}; + +/* + * The name space ID for generating an HFS volume UUID + * + * B3E20F39-F292-11D6-97A4-00306543ECAC + */ +#define HFS_UUID_NAMESPACE_ID "\xB3\xE2\x0F\x39\xF2\x92\x11\xD6\x97\xA4\x00\x30\x65\x43\xEC\xAC" + +enum { + kHFSMaxVolumeNameChars = 27, + kHFSMaxFileNameChars = 31, + kHFSPlusMaxFileNameChars = 255 +}; + +/* + * Indirect link files (hard links) have the following type/creator. + */ +enum { + kHardLinkFileType = 0x686C6E6B, /* 'hlnk' */ + kHFSPlusCreator = 0x6866732B /* 'hfs+' */ +}; + +/* + * File type and creator for symbolic links + */ +enum { + kSymLinkFileType = 0x736C6E6B, /* 'slnk' */ + kSymLinkCreator = 0x72686170 /* 'rhap' */ +}; + + +/* Extent overflow file data structures */ + +/* HFS Extent key */ +struct HFSExtentKey { + u_int8_t keyLength; /* length of key, excluding this field */ + u_int8_t forkType; /* 0 = data fork, FF = resource fork */ + u_int32_t fileID; /* file ID */ + u_int16_t startBlock; /* first file allocation block number in this extent */ +} __attribute__((aligned(2), packed)); +typedef struct HFSExtentKey HFSExtentKey; + +/* HFS Plus Extent key */ +struct HFSPlusExtentKey { + u_int16_t keyLength; /* length of key, excluding this field */ + u_int8_t forkType; /* 0 = data fork, FF = resource fork */ + u_int8_t pad; /* make the other fields align on 32-bit boundary */ + u_int32_t fileID; /* file ID */ + u_int32_t startBlock; /* first file allocation block number in this extent */ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusExtentKey HFSPlusExtentKey; + + +/* HFS extent descriptor */ +struct HFSExtentDescriptor { + u_int16_t startBlock; /* first allocation block */ + u_int16_t blockCount; /* number of allocation blocks */ +} __attribute__((aligned(2), packed)); +typedef struct HFSExtentDescriptor HFSExtentDescriptor; + +/* HFS Plus extent descriptor */ +struct HFSPlusExtentDescriptor { + u_int32_t startBlock; /* first allocation block */ + u_int32_t blockCount; /* number of allocation blocks */ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusExtentDescriptor HFSPlusExtentDescriptor; + +enum { + kHFSExtentDensity = 3, + kHFSPlusExtentDensity = 8 +}; + +/* HFS extent record */ +typedef HFSExtentDescriptor HFSExtentRecord[3]; + +/* HFS Plus extent record */ +typedef HFSPlusExtentDescriptor HFSPlusExtentRecord[8]; + +/* Catalog Key Name Comparison Type */ +enum { + kHFSCaseFolding = 0xCF, /* case folding (case-insensitive) */ + kHFSBinaryCompare = 0xBC /* binary compare (case-sensitive) */ +}; + +/* HFS Plus Fork data info - 80 bytes */ +struct HFSPlusForkData { + u_int64_t logicalSize; /* fork's logical size in bytes */ + u_int32_t clumpSize; /* fork's clump size in bytes */ + u_int32_t totalBlocks; /* total blocks used by this fork */ + HFSPlusExtentRecord extents; /* initial set of extents */ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusForkData HFSPlusForkData; + +/* HFS Plus catalog thread record -- 264 bytes */ +struct HFSPlusCatalogThread { + int16_t recordType; /* == kHFSPlusFolderThreadRecord or kHFSPlusFileThreadRecord */ + int16_t reserved; /* reserved - initialized as zero */ + u_int32_t parentID; /* parent ID for this catalog node */ + HFSUniStr255 nodeName; /* name of this catalog node (variable length) */ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusCatalogThread HFSPlusCatalogThread; + +/* Catalog file data structures */ + +enum { + kHFSRootParentID = 1, /* Parent ID of the root folder */ + kHFSRootFolderID = 2, /* Folder ID of the root folder */ + kHFSExtentsFileID = 3, /* File ID of the extents file */ + kHFSCatalogFileID = 4, /* File ID of the catalog file */ + kHFSBadBlockFileID = 5, /* File ID of the bad allocation block file */ + kHFSAllocationFileID = 6, /* File ID of the allocation file (HFS Plus only) */ + kHFSStartupFileID = 7, /* File ID of the startup file (HFS Plus only) */ + kHFSAttributesFileID = 8, /* File ID of the attribute file (HFS Plus only) */ + kHFSAttributeDataFileID = 13, /* Used in Mac OS X runtime for extent based attributes */ + /* kHFSAttributeDataFileID is never stored on disk. */ + kHFSRepairCatalogFileID = 14, /* Used when rebuilding Catalog B-tree */ + kHFSBogusExtentFileID = 15, /* Used for exchanging extents in extents file */ + kHFSFirstUserCatalogNodeID = 16 +}; + +/* HFS Plus catalog key */ +struct HFSPlusCatalogKey { + u_int16_t keyLength; /* key length (in bytes) */ + u_int32_t parentID; /* parent folder ID */ + HFSUniStr255 nodeName; /* catalog node name */ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusCatalogKey HFSPlusCatalogKey; + +/* Catalog record types */ +enum { + kHFSPlusFolderRecord = 1, /* Folder record */ + kHFSPlusFileRecord = 2, /* File record */ + kHFSPlusFolderThreadRecord = 3, /* Folder thread record */ + kHFSPlusFileThreadRecord = 4 /* File thread record */ +}; + +/* Catalog file record flags */ +enum { + kHFSFileLockedBit = 0x0000, /* file is locked and cannot be written to */ + kHFSFileLockedMask = 0x0001, + + kHFSThreadExistsBit = 0x0001, /* a file thread record exists for this file */ + kHFSThreadExistsMask = 0x0002, + + kHFSHasAttributesBit = 0x0002, /* object has extended attributes */ + kHFSHasAttributesMask = 0x0004, + + kHFSHasSecurityBit = 0x0003, /* object has security data (ACLs) */ + kHFSHasSecurityMask = 0x0008, + + kHFSHasFolderCountBit = 0x0004, /* only for HFSX, folder maintains a separate sub-folder count */ + kHFSHasFolderCountMask = 0x0010, /* (sum of folder records and directory hard links) */ + + kHFSHasLinkChainBit = 0x0005, /* has hardlink chain (inode or link) */ + kHFSHasLinkChainMask = 0x0020, + + kHFSHasChildLinkBit = 0x0006, /* folder has a child that's a dir link */ + kHFSHasChildLinkMask = 0x0040, + + kHFSHasDateAddedBit = 0x0007, /* File/Folder has the date-added stored in the finder info. */ + kHFSHasDateAddedMask = 0x0080, + + kHFSFastDevPinnedBit = 0x0008, /* this file has been pinned to the fast-device by the hot-file code on cooperative fusion */ + kHFSFastDevPinnedMask = 0x0100, + + kHFSDoNotFastDevPinBit = 0x0009, /* this file can not be pinned to the fast-device */ + kHFSDoNotFastDevPinMask = 0x0200, + + kHFSFastDevCandidateBit = 0x000a, /* this item is a potential candidate for fast-dev pinning (as are any of its descendents */ + kHFSFastDevCandidateMask = 0x0400, + + kHFSAutoCandidateBit = 0x000b, /* this item was automatically marked as a fast-dev candidate by the kernel */ + kHFSAutoCandidateMask = 0x0800 + + // There are only 4 flag bits remaining: 0x1000, 0x2000, 0x4000, 0x8000 + +}; + +/* + * Atrributes B-tree Data Record + * + * For small attributes, whose entire value is stored + * within a single B-tree record. + */ + +/* Attribute key */ +enum { kHFSMaxAttrNameLen = 127 }; +struct HFSPlusAttrKey { + u_int16_t keyLength; /* key length (in bytes) */ + u_int16_t pad; /* set to zero */ + u_int32_t fileID; /* file associated with attribute */ + u_int32_t startBlock; /* first allocation block number for extents */ + u_int16_t attrNameLen; /* number of unicode characters */ + u_int16_t attrName[kHFSMaxAttrNameLen]; /* attribute name (Unicode) */ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusAttrKey HFSPlusAttrKey; + +#define kHFSPlusAttrKeyMaximumLength (sizeof(HFSPlusAttrKey) - sizeof(u_int16_t)) +#define kHFSPlusAttrKeyMinimumLength (kHFSPlusAttrKeyMaximumLength - kHFSMaxAttrNameLen*sizeof(u_int16_t)) + + +/* Key and node lengths */ +enum { + kHFSPlusExtentKeyMaximumLength = sizeof(HFSPlusExtentKey) - sizeof(u_int16_t), + kHFSExtentKeyMaximumLength = sizeof(HFSExtentKey) - sizeof(u_int8_t), + kHFSPlusCatalogKeyMaximumLength = sizeof(HFSPlusCatalogKey) - sizeof(u_int16_t), + kHFSPlusCatalogKeyMinimumLength = kHFSPlusCatalogKeyMaximumLength - sizeof(HFSUniStr255) + sizeof(u_int16_t), + kHFSPlusCatalogMinNodeSize = 4096, + kHFSPlusExtentMinNodeSize = 512, + kHFSPlusAttrMinNodeSize = 4096 +}; + +/* HFS and HFS Plus volume attribute bits */ +enum { + /* Bits 0-6 are reserved (always cleared by MountVol call) */ + kHFSVolumeHardwareLockBit = 7, /* volume is locked by hardware */ + kHFSVolumeUnmountedBit = 8, /* volume was successfully unmounted */ + kHFSVolumeSparedBlocksBit = 9, /* volume has bad blocks spared */ + kHFSVolumeNoCacheRequiredBit = 10, /* don't cache volume blocks (i.e. RAM or ROM disk) */ + kHFSBootVolumeInconsistentBit = 11, /* boot volume is inconsistent (System 7.6 and later) */ + kHFSCatalogNodeIDsReusedBit = 12, + kHFSVolumeJournaledBit = 13, /* this volume has a journal on it */ + kHFSVolumeInconsistentBit = 14, /* serious inconsistencies detected at runtime */ + kHFSVolumeSoftwareLockBit = 15, /* volume is locked by software */ + /* + * HFS only has 16 bits of attributes in the MDB, but HFS Plus has 32 bits. + * Therefore, bits 16-31 can only be used on HFS Plus. + */ + kHFSUnusedNodeFixBit = 31, /* Unused nodes in the Catalog B-tree have been zero-filled. See Radar #6947811. */ + kHFSContentProtectionBit = 30, /* Volume has per-file content protection */ + + /*** Keep these in sync with the bits above ! ****/ + kHFSVolumeHardwareLockMask = 0x00000080, + kHFSVolumeUnmountedMask = 0x00000100, + kHFSVolumeSparedBlocksMask = 0x00000200, + kHFSVolumeNoCacheRequiredMask = 0x00000400, + kHFSBootVolumeInconsistentMask = 0x00000800, + kHFSCatalogNodeIDsReusedMask = 0x00001000, + kHFSVolumeJournaledMask = 0x00002000, + kHFSVolumeInconsistentMask = 0x00004000, + kHFSVolumeSoftwareLockMask = 0x00008000, + + /* Bits 16-31 are allocated from high to low */ + + kHFSContentProtectionMask = 0x40000000, + kHFSUnusedNodeFixMask = 0x80000000, + + kHFSMDBAttributesMask = 0x8380 +}; + +enum { + kHFSUnusedNodesFixDate = 0xc5ef2480 /* March 25, 2009 */ +}; + +/* Mac OS X has 16 bytes worth of "BSD" info. + * + * Note: Mac OS 9 implementations and applications + * should preserve, but not change, this information. + */ +struct HFSPlusBSDInfo { + u_int32_t ownerID; /* user-id of owner or hard link chain previous link */ + u_int32_t groupID; /* group-id of owner or hard link chain next link */ + u_int8_t adminFlags; /* super-user changeable flags */ + u_int8_t ownerFlags; /* owner changeable flags */ + u_int16_t fileMode; /* file type and permission bits */ + union { + u_int32_t iNodeNum; /* indirect node number (hard links only) */ + u_int32_t linkCount; /* links that refer to this indirect node */ + u_int32_t rawDevice; /* special file device (FBLK and FCHR only) */ + } special; +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusBSDInfo HFSPlusBSDInfo; + +#define hl_firstLinkID reserved1 /* Valid only if HasLinkChain flag is set (indirect nodes only) */ + +#define hl_prevLinkID bsdInfo.ownerID /* Valid only if HasLinkChain flag is set */ +#define hl_nextLinkID bsdInfo.groupID /* Valid only if HasLinkChain flag is set */ + +#define hl_linkReference bsdInfo.special.iNodeNum +#define hl_linkCount bsdInfo.special.linkCount + +/* Finder information */ +struct FndrFileInfo { + u_int32_t fdType; /* file type */ + u_int32_t fdCreator; /* file creator */ + u_int16_t fdFlags; /* Finder flags */ + struct { + int16_t v; /* file's location */ + int16_t h; + } fdLocation; + int16_t opaque; +} __attribute__((aligned(2), packed)); +typedef struct FndrFileInfo FndrFileInfo; + +struct FndrDirInfo { + struct { /* folder's window rectangle */ + int16_t top; + int16_t left; + int16_t bottom; + int16_t right; + } frRect; + unsigned short frFlags; /* Finder flags */ + struct { + u_int16_t v; /* folder's location */ + u_int16_t h; + } frLocation; + int16_t opaque; +} __attribute__((aligned(2), packed)); +typedef struct FndrDirInfo FndrDirInfo; + +struct FndrOpaqueInfo { + int8_t opaque[16]; +} __attribute__((aligned(2), packed)); +typedef struct FndrOpaqueInfo FndrOpaqueInfo; + +struct FndrExtendedDirInfo { + u_int32_t document_id; + u_int32_t date_added; + u_int16_t extended_flags; + u_int16_t reserved3; + u_int32_t write_gen_counter; +} __attribute__((aligned(2), packed)); + +struct FndrExtendedFileInfo { + u_int32_t document_id; + u_int32_t date_added; + u_int16_t extended_flags; + u_int16_t reserved2; + u_int32_t write_gen_counter; +} __attribute__((aligned(2), packed)); + +/* HFS Plus catalog folder record - 88 bytes */ +struct HFSPlusCatalogFolder { + int16_t recordType; /* == kHFSPlusFolderRecord */ + u_int16_t flags; /* file flags */ + u_int32_t valence; /* folder's item count */ + u_int32_t folderID; /* folder ID */ + u_int32_t createDate; /* date and time of creation */ + u_int32_t contentModDate; /* date and time of last content modification */ + u_int32_t attributeModDate; /* date and time of last attribute modification */ + u_int32_t accessDate; /* date and time of last access (MacOS X only) */ + u_int32_t backupDate; /* date and time of last backup */ + HFSPlusBSDInfo bsdInfo; /* permissions (for MacOS X) */ + FndrDirInfo userInfo; /* Finder information */ + FndrOpaqueInfo finderInfo; /* additional Finder information */ + u_int32_t textEncoding; /* hint for name conversions */ + u_int32_t folderCount; /* number of enclosed folders, active when HasFolderCount is set */ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusCatalogFolder HFSPlusCatalogFolder; + + +/* + * These are the types of records in the attribute B-tree. The values were + * chosen so that they wouldn't conflict with the catalog record types. + */ +enum { + kHFSPlusAttrInlineData = 0x10, /* attributes whose data fits in a b-tree node */ + kHFSPlusAttrForkData = 0x20, /* extent based attributes (data lives in extents) */ + kHFSPlusAttrExtents = 0x30 /* overflow extents for large attributes */ +}; + + +/* + * HFSPlusAttrForkData + * For larger attributes, whose value is stored in allocation blocks. + * If the attribute has more than 8 extents, there will be additional + * records (of type HFSPlusAttrExtents) for this attribute. + */ +struct HFSPlusAttrForkData { + u_int32_t recordType; /* == kHFSPlusAttrForkData*/ + u_int32_t reserved; + HFSPlusForkData theFork; /* size and first extents of value*/ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusAttrForkData HFSPlusAttrForkData; + +/* + * HFSPlusAttrExtents + * This record contains information about overflow extents for large, + * fragmented attributes. + */ +struct HFSPlusAttrExtents { + u_int32_t recordType; /* == kHFSPlusAttrExtents*/ + u_int32_t reserved; + HFSPlusExtentRecord extents; /* additional extents*/ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusAttrExtents HFSPlusAttrExtents; + +/* HFSPlusAttrInlineData is obsolete use HFSPlusAttrData instead */ +struct HFSPlusAttrInlineData { + u_int32_t recordType; + u_int32_t reserved; + u_int32_t logicalSize; + u_int8_t userData[2]; +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusAttrInlineData HFSPlusAttrInlineData; + + +/* A generic Attribute Record */ +union HFSPlusAttrRecord { + u_int32_t recordType; + HFSPlusAttrInlineData inlineData; /* NOT USED */ + HFSPlusAttrData attrData; + HFSPlusAttrForkData forkData; + HFSPlusAttrExtents overflowExtents; +}; +typedef union HFSPlusAttrRecord HFSPlusAttrRecord; + + +/* HFS Plus catalog file record - 248 bytes */ +struct HFSPlusCatalogFile { + int16_t recordType; /* == kHFSPlusFileRecord */ + u_int16_t flags; /* file flags */ + u_int32_t reserved1; /* reserved - initialized as zero */ + u_int32_t fileID; /* file ID */ + u_int32_t createDate; /* date and time of creation */ + u_int32_t contentModDate; /* date and time of last content modification */ + u_int32_t attributeModDate; /* date and time of last attribute modification */ + u_int32_t accessDate; /* date and time of last access (MacOS X only) */ + u_int32_t backupDate; /* date and time of last backup */ + HFSPlusBSDInfo bsdInfo; /* permissions (for MacOS X) */ + FndrFileInfo userInfo; /* Finder information */ + FndrOpaqueInfo finderInfo; /* additional Finder information */ + u_int32_t textEncoding; /* hint for name conversions */ + u_int32_t reserved2; /* reserved - initialized as zero */ + + /* Note: these start on double long (64 bit) boundary */ + HFSPlusForkData dataFork; /* size and block data for data fork */ + HFSPlusForkData resourceFork; /* size and block data for resource fork */ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusCatalogFile HFSPlusCatalogFile; + +/* HFS Master Directory Block - 162 bytes */ +/* Stored at sector #2 (3rd sector) and second-to-last sector. */ +struct HFSMasterDirectoryBlock { + u_int16_t drSigWord; /* == kHFSSigWord */ + u_int32_t drCrDate; /* date and time of volume creation */ + u_int32_t drLsMod; /* date and time of last modification */ + u_int16_t drAtrb; /* volume attributes */ + u_int16_t drNmFls; /* number of files in root folder */ + u_int16_t drVBMSt; /* first block of volume bitmap */ + u_int16_t drAllocPtr; /* start of next allocation search */ + u_int16_t drNmAlBlks; /* number of allocation blocks in volume */ + u_int32_t drAlBlkSiz; /* size (in bytes) of allocation blocks */ + u_int32_t drClpSiz; /* default clump size */ + u_int16_t drAlBlSt; /* first allocation block in volume */ + u_int32_t drNxtCNID; /* next unused catalog node ID */ + u_int16_t drFreeBks; /* number of unused allocation blocks */ + u_int8_t drVN[kHFSMaxVolumeNameChars + 1]; /* volume name */ + u_int32_t drVolBkUp; /* date and time of last backup */ + u_int16_t drVSeqNum; /* volume backup sequence number */ + u_int32_t drWrCnt; /* volume write count */ + u_int32_t drXTClpSiz; /* clump size for extents overflow file */ + u_int32_t drCTClpSiz; /* clump size for catalog file */ + u_int16_t drNmRtDirs; /* number of directories in root folder */ + u_int32_t drFilCnt; /* number of files in volume */ + u_int32_t drDirCnt; /* number of directories in volume */ + u_int32_t drFndrInfo[8]; /* information used by the Finder */ + u_int16_t drEmbedSigWord; /* embedded volume signature (formerly drVCSize) */ + HFSExtentDescriptor drEmbedExtent; /* embedded volume location and size (formerly drVBMCSize and drCtlCSize) */ + u_int32_t drXTFlSize; /* size of extents overflow file */ + HFSExtentRecord drXTExtRec; /* extent record for extents overflow file */ + u_int32_t drCTFlSize; /* size of catalog file */ + HFSExtentRecord drCTExtRec; /* extent record for catalog file */ +} __attribute__((aligned(2), packed)); +typedef struct HFSMasterDirectoryBlock HFSMasterDirectoryBlock; + +/* HFS Plus Volume Header - 512 bytes */ +/* Stored at sector #2 (3rd sector) and second-to-last sector. */ +struct HFSPlusVolumeHeader { + u_int16_t signature; /* == kHFSPlusSigWord */ + u_int16_t version; /* == kHFSPlusVersion */ + u_int32_t attributes; /* volume attributes */ + u_int32_t lastMountedVersion; /* implementation version which last mounted volume */ + u_int32_t journalInfoBlock; /* block addr of journal info (if volume is journaled, zero otherwise) */ + + u_int32_t createDate; /* date and time of volume creation */ + u_int32_t modifyDate; /* date and time of last modification */ + u_int32_t backupDate; /* date and time of last backup */ + u_int32_t checkedDate; /* date and time of last disk check */ + + u_int32_t fileCount; /* number of files in volume */ + u_int32_t folderCount; /* number of directories in volume */ + + u_int32_t blockSize; /* size (in bytes) of allocation blocks */ + u_int32_t totalBlocks; /* number of allocation blocks in volume (includes this header and VBM*/ + u_int32_t freeBlocks; /* number of unused allocation blocks */ + + u_int32_t nextAllocation; /* start of next allocation search */ + u_int32_t rsrcClumpSize; /* default resource fork clump size */ + u_int32_t dataClumpSize; /* default data fork clump size */ + u_int32_t nextCatalogID; /* next unused catalog node ID */ + + u_int32_t writeCount; /* volume write count */ + u_int64_t encodingsBitmap; /* which encodings have been use on this volume */ + + u_int8_t finderInfo[32]; /* information used by the Finder */ + + HFSPlusForkData allocationFile; /* allocation bitmap file */ + HFSPlusForkData extentsFile; /* extents B-tree file */ + HFSPlusForkData catalogFile; /* catalog B-tree file */ + HFSPlusForkData attributesFile; /* extended attributes B-tree file */ + HFSPlusForkData startupFile; /* boot file (secondary loader) */ +} __attribute__((aligned(2), packed)); +typedef struct HFSPlusVolumeHeader HFSPlusVolumeHeader; + +/* JournalInfoBlock - Structure that describes where our journal lives */ + +// the original size of the reserved field in the JournalInfoBlock was +// 32*sizeof(u_int32_t). To keep the total size of the structure the +// same we subtract the size of new fields (currently: ext_jnl_uuid and +// machine_uuid). If you add additional fields, place them before the +// reserved field and subtract their size in this macro. +// +#define JIB_RESERVED_SIZE ((32*sizeof(u_int32_t)) - sizeof(uuid_string_t) - 48) + +struct JournalInfoBlock { + u_int32_t flags; + u_int32_t device_signature[8]; // signature used to locate our device. + u_int64_t offset; // byte offset to the journal on the device + u_int64_t size; // size in bytes of the journal + uuid_string_t ext_jnl_uuid; + char machine_serial_num[48]; + char reserved[JIB_RESERVED_SIZE]; +} __attribute__((aligned(2), packed)); +typedef struct JournalInfoBlock JournalInfoBlock; + +enum { + kJIJournalInFSMask = 0x00000001, + kJIJournalOnOtherDeviceMask = 0x00000002, + kJIJournalNeedInitMask = 0x00000004 +}; + +// +// This the content type uuid for "external journal" GPT +// partitions. Each instance of a partition also has a +// uuid that uniquely identifies that instance. +// +#define EXTJNL_CONTENT_TYPE_UUID "4A6F7572-6E61-11AA-AA11-00306543ECAC" + + +#endif /* lf_hfs_format_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_fsops_handler.c b/livefiles_hfs_plugin/lf_hfs_fsops_handler.c new file mode 100644 index 0000000..40acd59 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_fsops_handler.c @@ -0,0 +1,709 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_fsops_handler.c + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 31/12/2017. +*/ + +#include +#include "lf_hfs.h" +#include "lf_hfs_fsops_handler.h" +#include "lf_hfs_dirops_handler.h" +#include "lf_hfs_fileops_handler.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_cnode.h" +#include "lf_hfs_vnode.h" +#include "lf_hfs_endian.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_generic_buf.h" +#include "lf_hfs_raw_read_write.h" +#include "lf_hfs_journal.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_mount.h" + +static int +FSOPS_GetRootVnode(struct vnode* psDevVnode, struct vnode** ppsRootVnode) +{ + return (hfs_vfs_root(psDevVnode->sFSParams.vnfs_mp, ppsRootVnode)); +} + +//---------------------------------- API Implementation ------------------------------------------ + +uint64_t FSOPS_GetOffsetFromClusterNum(vnode_t vp, uint64_t uClusterNum) +{ + return (HFSTOVCB(vp->sFSParams.vnfs_mp->psHfsmount)->hfsPlusIOPosOffset + uClusterNum * HFSTOVCB(vp->sFSParams.vnfs_mp->psHfsmount)->blockSize); +} + +int +LFHFS_Taste ( int iFd ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Taste %d\n", iFd); + + int iError = 0; + u_int32_t log_blksize; + void* pvBuffer = NULL; + + HFSMasterDirectoryBlock *psMasterBlock = hfs_malloc(kMDBSize); + if ( psMasterBlock == NULL ) + { + iError = ENOMEM; + LFHFS_LOG(LEVEL_ERROR, "HFS_Taste: failed to malloc psMasterBlock\n"); + goto exit; + } + + /* Get the logical block size (treated as physical block size everywhere) */ + if (ioctl(iFd, DKIOCGETBLOCKSIZE, &log_blksize)) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: DKIOCGETBLOCKSIZE failed - setting to default -512\n"); + log_blksize = kMDBSize; + } + + if (log_blksize == 0 || log_blksize > 1024*1024*1024) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_mountfs: logical block size 0x%x looks bad. Not mounting.\n", log_blksize); + iError = ENXIO; + goto exit; + } + + if (log_blksize > kMDBSize) + { + pvBuffer = hfs_malloc(log_blksize); + if ( pvBuffer == NULL ) + { + iError = ENOMEM; + LFHFS_LOG(LEVEL_ERROR, "HFS_Taste: failed to malloc pvBuffer\n"); + goto exit; + } + } + else + { + pvBuffer = (void*) psMasterBlock; + } + + // Read VolumeHeader from offset 1024 + off_t uVolHdrOffset = 1024; + off_t uBlockNum = uVolHdrOffset / log_blksize; + off_t uOffsetInBlock = uVolHdrOffset % log_blksize; + + ssize_t iReadBytes = pread(iFd, pvBuffer, log_blksize, uBlockNum * log_blksize); + if ( iReadBytes < uOffsetInBlock + kMDBSize ) { + iError = (iReadBytes < 0) ? errno : EIO; + LFHFS_LOG(LEVEL_ERROR, "HFS_Taste: failed to read Master Directory Block with err %d (%ld)\n", iError, iReadBytes); + + if (log_blksize > kMDBSize) { + hfs_free(pvBuffer); + } + goto exit; + } + + if (log_blksize > kMDBSize) { + memcpy(psMasterBlock, pvBuffer + uOffsetInBlock, kMDBSize); + hfs_free(pvBuffer); + } + + //Validate Signiture + uint32_t drSigWord = SWAP_BE16(psMasterBlock->drSigWord); + if ((drSigWord != kHFSPlusSigWord) && + (drSigWord != kHFSXSigWord)) + { + iError = EINVAL; + LFHFS_LOG(LEVEL_DEBUG, "HFS_Taste: invalid volume signature %d\n", SWAP_BE16(psMasterBlock->drSigWord)); + goto exit; + } + +exit: + if (psMasterBlock) + hfs_free(psMasterBlock); + return iError; +} + +int +LFHFS_ScanVols (int iFd, UVFSScanVolsRequest *psRequest, UVFSScanVolsReply *psReply ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_ScanVols\n"); + + if ( psRequest == NULL || psReply == NULL ) + { + return EINVAL; + } + else if (psRequest->sr_volid > 0) + { + return UVFS_SCANVOLS_EOF_REACHED; + } + + // Tell UVFS that we have a single, non-access controlled volume. + psReply->sr_volid = 0; + psReply->sr_volac = UAC_UNLOCKED; + + return hfs_ScanVolGetVolName(iFd, psReply->sr_volname); +} + +int +LFHFS_Init ( void ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Init\n"); + + int iErr = 0; + + iErr = LFHFS_LoggerInit(); + if ( iErr != 0 ) + { + goto exit; + } + + iErr = raw_readwrite_zero_fill_init(); + if ( iErr != 0 ) + { + goto exit; + } + + hfs_chashinit(); + + // Initializing Buffer cache + lf_hfs_generic_buf_cache_init(); + + BTReserveSetup(); + + journal_init(); + +exit: + return iErr; +} + +void +LFHFS_Fini ( void ) +{ + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Fini\n"); + + raw_readwrite_zero_fill_de_init(); + + // De-Initializing Buffer cache + lf_hfs_generic_buf_cache_deinit(); +} + +int +LFHFS_Mount ( int iFd, UVFSVolumeId puVolId, UVFSMountFlags puMountFlags, + __unused UVFSVolumeCredential *psVolumeCreds, UVFSFileNode *ppsRootNode ) +{ + LFHFS_LOG(LEVEL_DEBUG, "HFS_Mount %d\n", iFd); + int iError = 0; + + struct mount* psMount = hfs_mallocz(sizeof(struct mount)); + struct vnode* psDevVnode = hfs_mallocz(sizeof(struct vnode)); + struct cnode* psDevCnode = hfs_mallocz(sizeof(struct cnode)); + struct filefork* psDevFileFork = hfs_mallocz(sizeof(struct filefork)); + FileSystemRecord_s *psFSRecord = hfs_mallocz(sizeof(FileSystemRecord_s)); + + if ( psMount == NULL || psDevVnode == NULL || psDevCnode == NULL || psDevFileFork == NULL || psFSRecord == NULL ) + { + iError = ENOMEM; + LFHFS_LOG(LEVEL_ERROR, "HFS_Mount: failed to malloc initial system files\n"); + goto fail; + } + + if (puVolId != 0) + { + iError = EINVAL; + LFHFS_LOG(LEVEL_ERROR, "HFS_Mount: unknown volume ID\n"); + goto fail; + } + + psFSRecord->iFD = iFd; + psDevVnode->psFSRecord = psFSRecord; + psDevVnode->sFSParams.vnfs_marksystem = 1; + psDevVnode->bIsMountVnode = true; + + // Initializing inputs for hfs_mount + psDevFileFork->ff_data.cf_blocks = 3; + psDevFileFork->ff_data.cf_extents[0].blockCount = 1; + psDevFileFork->ff_data.cf_extents[0].startBlock = 0; + + psDevVnode->sFSParams.vnfs_fsnode = psDevCnode; + psDevCnode->c_vp = psDevVnode; + psDevVnode->is_rsrc = false; + psDevCnode->c_datafork = psDevFileFork; + psDevVnode->sFSParams.vnfs_mp = psMount; + + psMount->mnt_flag = (puMountFlags == UVFS_MOUNT_RDONLY)? MNT_RDONLY : 0; + // Calling to kext hfs_mount + iError = hfs_mount(psMount, psDevVnode, 0); + if (iError) + goto fail; + + struct vnode* psRootVnode; + // Creating root vnode + iError = FSOPS_GetRootVnode(psDevVnode,&psRootVnode); + if (iError) + goto fail; + *ppsRootNode = (UVFSFileNode) psRootVnode; + + goto end; + +fail: + if (psFSRecord) + hfs_free(psFSRecord); + if (psMount) + hfs_free(psMount); + if (psDevVnode) + hfs_free(psDevVnode); + if (psDevCnode) + hfs_free(psDevCnode); + if (psDevFileFork) + hfs_free(psDevFileFork); +end: + return iError; +} + +int +LFHFS_Unmount ( UVFSFileNode psRootNode, UVFSUnmountHint hint ) +{ + VERIFY_NODE_IS_VALID(psRootNode); + LFHFS_LOG(LEVEL_DEBUG, "HFS_Unmount (psRootNode %p) (hint %u)\n", psRootNode, hint); + + int iError = 0; + struct vnode *psRootVnode = (struct vnode*) psRootNode; + FileSystemRecord_s *psFSRecord = VPTOFSRECORD(psRootVnode); + struct mount *psMount = psRootVnode->sFSParams.vnfs_mp; + struct cnode *psDevCnode = VTOHFS(psRootVnode)->hfs_devvp->sFSParams.vnfs_fsnode; + struct hfsmount *psHfsMp = psMount->psHfsmount; + + #if HFS_CRASH_TEST + CRASH_ABORT(CRASH_ABORT_ON_UNMOUNT, psHfsMp, NULL); + #endif + + hfs_vnop_reclaim(psRootVnode); + + if (!psHfsMp->jnl) { + hfs_flushvolumeheader(psHfsMp, HFS_FVH_SKIP_TRANSACTION | HFS_FVH_MARK_UNMOUNT); + } + + hfs_unmount(psMount); + + hfs_free(psFSRecord); + hfs_free(psMount); + hfs_free(psDevCnode->c_datafork); + hfs_free(psDevCnode); + + return iError; +} + +int +LFHFS_SetFSAttr ( UVFSFileNode psNode, const char *pcAttr, const UVFSFSAttributeValue *psAttrVal, size_t uLen ) +{ +#pragma unused (psNode, pcAttr, psAttrVal, uLen) + VERIFY_NODE_IS_VALID(psNode); + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_SetFSAttr (ENOTSUP)\n"); + + return ENOTSUP; +} + +int +LFHFS_GetFSAttr ( UVFSFileNode psNode, const char *pcAttr, UVFSFSAttributeValue *psAttrVal, size_t uLen, size_t *puRetLen ) +{ + VERIFY_NODE_IS_VALID(psNode); + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_GetFSAttr (psNode %p)\n", psNode); + + int iError = 0; + vnode_t psVnode = (vnode_t)psNode; + struct hfsmount *psMount = psVnode->sFSParams.vnfs_mp->psHfsmount; + + if (strcmp(pcAttr, UVFS_FSATTR_PC_LINK_MAX)==0) + { + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + + if ( vnode_isreg(psVnode) ) + { + psAttrVal->fsa_number = HFS_LINK_MAX; + } + else + { + psAttrVal->fsa_number = 1; + } + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_PC_NAME_MAX)==0) + { + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = MAXPATHLEN; + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_PC_NO_TRUNC)==0) + { + *puRetLen = sizeof(bool); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_bool = true; + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_PC_FILESIZEBITS)==0) + { + // The number of bits used to represent the size (in bytes) of a file + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = 64; + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_PC_XATTR_SIZE_BITS)==0) + { + // The number of bits used to represent the size (in bytes) of an extended attribute. + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = HFS_XATTR_SIZE_BITS; + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_BLOCKSIZE)==0) + { + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = psMount->blockSize; + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_IOSIZE)==0) + { + // Size (in bytes) of the optimal transfer block size + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = 1024*1024*128; + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_TOTALBLOCKS)==0) + { + // Total number of file system blocks + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = psMount->totalBlocks; + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_BLOCKSFREE)==0) + { + // Total number of free file system blocks + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = hfs_freeblks( psMount, 0 ); + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_BLOCKSAVAIL)==0) + { + // Total number of free file system blocks available for allocation to files (in our case - the same as UVFS_FSATTR_BLOCKSFREE) + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = hfs_freeblks( psMount, 1 ); + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_BLOCKSUSED)==0) + { + // Number of file system blocks currently allocated for some use (TOTAL_BLOCKS - BLOCKSAVAIL) + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = psMount->totalBlocks - hfs_freeblks( psMount, 1 ); + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_CNAME)==0) + { + char* pcName; + //The file name + if (IS_ROOT(psVnode)) + pcName = ""; + else + pcName = (char*) psVnode->sFSParams.vnfs_cnp->cn_nameptr; + + if (pcName == NULL) + return EINVAL; + + *puRetLen = strlen(pcName) + 1; + if (uLen < *puRetLen) + { + return E2BIG; + } + strlcpy(psAttrVal->fsa_string, pcName, *puRetLen); + return 0; + } + + if (strcmp(pcAttr, UVFS_FSATTR_FSTYPENAME)==0) + { + *puRetLen = 4; + if (uLen < *puRetLen) + { + return E2BIG; + } + // A string representing the type of file system + strcpy(psAttrVal->fsa_string, "HFS"); + *(psAttrVal->fsa_string+3) = 0; // Must be null terminated + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_FSSUBTYPE)==0) + { +#define HFS_PLUS_STR "HFS Plus" +#define HFS_PLUS_JOURNALED_STR "HFS Plus (Journaled)" +#define HFS_PLUS_CASE_SENS_STR "HFS Plus (Case Sensitive)" +#define HFS_PLUS_CASE_SENS_JOURNALED_STR "HFS Plus (Case Sensitive, Journaled)" + + char* pcFSSubType = HFS_PLUS_STR; + if ( (psMount->hfs_flags & HFS_CASE_SENSITIVE) && psMount->jnl ) + { + pcFSSubType = HFS_PLUS_CASE_SENS_JOURNALED_STR; + } + else if ( psMount->hfs_flags & HFS_CASE_SENSITIVE ) + { + pcFSSubType = HFS_PLUS_CASE_SENS_STR; + } + else if ( psMount->jnl ) + { + pcFSSubType = HFS_PLUS_JOURNALED_STR; + } + + *puRetLen = strlen( pcFSSubType ) + 1; + if ( uLen < *puRetLen ) + { + return E2BIG; + } + + strcpy( psAttrVal->fsa_string, pcFSSubType ); + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_VOLNAME)==0) + { + *puRetLen = strlen((char *)psMount->vcbVN)+1; // Add 1 for the NULL terminator + if (uLen < *puRetLen) + { + return E2BIG; + } + strcpy(psAttrVal->fsa_string, (char *)psMount->vcbVN); + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_VOLUUID)==0) + { + *puRetLen = sizeof(uuid_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + hfs_getvoluuid( psMount, psAttrVal->fsa_opaque ); + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_CAPS_FORMAT)==0) + { + // A bitmask indicating the capabilities of the volume format + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + + psAttrVal->fsa_number = + VOL_CAP_FMT_PERSISTENTOBJECTIDS | + VOL_CAP_FMT_SYMBOLICLINKS | + VOL_CAP_FMT_HARDLINKS | + VOL_CAP_FMT_JOURNAL | + (psMount->jnl ? VOL_CAP_FMT_JOURNAL_ACTIVE : 0) | + (psMount->hfs_flags & HFS_CASE_SENSITIVE ? VOL_CAP_FMT_CASE_SENSITIVE : 0) | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_2TB_FILESIZE | + VOL_CAP_FMT_HIDDEN_FILES | + /* XXX rdar://problem/48128963 VOL_CAP_FMT_PATH_FROM_ID */ 0; + + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_CAPS_INTERFACES)==0) + { + // A bitmask indicating the interface capabilities of the file system + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + + psAttrVal->fsa_number = +#if LF_HFS_NATIVE_SEARCHFS_SUPPORT + VOL_CAP_INT_SEARCHFS | +#endif + VOL_CAP_INT_EXTENDED_ATTR; + + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_LAST_MTIME)==0) + { + // system lsat mounted time + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = psMount->hfs_last_mounted_mtime; + goto end; + } + + if (strcmp(pcAttr, UVFS_FSATTR_MOUNT_TIME)==0) + { + // system mount time + *puRetLen = sizeof(uint64_t); + if (uLen < *puRetLen) + { + return E2BIG; + } + psAttrVal->fsa_number = psMount->hfs_mount_time; + goto end; + } + + iError = ENOTSUP; +end: + return iError; +} + +// kHFSVolumeUnmountedMask: this bit is used to indicate whether the volume is dirty (for which fsck needs to run prior to mount) or clean. +// For non-journaled volumes: +// - Each operation that causes metadata modification clears this bit. +// - A Sync operation that takes place after all 'dirtying' operations are completed sets this bit. +// Syncronization between the 'dirtying' operations and the Sync is performed by the hfs_global_lock(). +// For journaled volumes, the volume is considered clean after a journal has been committed to the media. +int LFHFS_Sync(UVFSFileNode psNode) { + VERIFY_NODE_IS_VALID(psNode); + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Sync (psNode %p)\n", psNode); + + int iErr = 0; + vnode_t psVnode = (vnode_t)psNode; + struct hfsmount *psMount = psVnode->sFSParams.vnfs_mp->psHfsmount; + bool bNeedUnlock = false; + + lf_lck_mtx_lock(&psMount->sync_mutex); + psMount->hfs_syncer_thread = pthread_self(); + + if (psMount->jnl) { + + hfs_flush(psMount, HFS_FLUSH_JOURNAL_META); + + } else { + + if (psMount->hfs_global_lockowner != pthread_self()) { + hfs_lock_global(psMount, HFS_EXCLUSIVE_LOCK); + bNeedUnlock = true; + } + + hfs_flushvolumeheader(psMount, HFS_FVH_SKIP_TRANSACTION | HFS_FVH_MARK_UNMOUNT); + + if (bNeedUnlock) { + hfs_unlock_global(psMount); + } + } + + psMount->hfs_syncer_thread = NULL; + lf_lck_mtx_unlock(&psMount->sync_mutex); + + return(iErr); +} + +int +LFHFS_Check( int fdToCheck , __unused UVFSVolumeId volId, + __unused UVFSVolumeCredential *volumeCreds, check_flags_t how ) +{ + return fsck_hfs(fdToCheck, how); +} + +UVFSFSOps HFS_fsOps = { + .fsops_version = UVFS_FSOPS_VERSION_CURRENT, + + .fsops_init = LFHFS_Init, + .fsops_fini = LFHFS_Fini, + + .fsops_taste = LFHFS_Taste, + .fsops_scanvols = LFHFS_ScanVols, + .fsops_mount = LFHFS_Mount, + .fsops_sync = LFHFS_Sync, + .fsops_unmount = LFHFS_Unmount, + + .fsops_getfsattr = LFHFS_GetFSAttr, + .fsops_setfsattr = LFHFS_SetFSAttr, + + .fsops_getattr = LFHFS_GetAttr, + .fsops_setattr = LFHFS_SetAttr, + .fsops_lookup = LFHFS_Lookup, + .fsops_reclaim = LFHFS_Reclaim, + .fsops_readlink = LFHFS_ReadLink, + .fsops_read = LFHFS_Read, + .fsops_write = LFHFS_Write, + .fsops_create = LFHFS_Create, + .fsops_mkdir = LFHFS_MkDir, + .fsops_symlink = LFHFS_SymLink, + .fsops_remove = LFHFS_Remove, + .fsops_rmdir = LFHFS_RmDir, + .fsops_rename = LFHFS_Rename, + .fsops_readdir = LFHFS_ReadDir, + .fsops_readdirattr = LFHFS_ReadDirAttr, + .fsops_link = LFHFS_Link, + .fsops_check = LFHFS_Check, + + .fsops_getxattr = LFHFS_GetXAttr, + .fsops_setxattr = LFHFS_SetXAttr, + .fsops_listxattr = LFHFS_ListXAttr, + + .fsops_scandir = LFHFS_ScanDir, + .fsops_scanids = LFHFS_ScanIDs +}; + +#if HFS_CRASH_TEST +CrashAbortFunction_FP gpsCrashAbortFunctionArray[CRASH_ABORT_LAST] = {0}; +#endif + +__attribute__((visibility("default"))) +void +livefiles_plugin_init(UVFSFSOps **ops) +{ + if (ops) { + *ops = &HFS_fsOps; + } + + return; +} diff --git a/livefiles_hfs_plugin/lf_hfs_fsops_handler.h b/livefiles_hfs_plugin/lf_hfs_fsops_handler.h new file mode 100644 index 0000000..44f15ca --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_fsops_handler.h @@ -0,0 +1,24 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_fsops_handler.h + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 31/12/2017. +*/ + +#ifndef lf_hfs_fsops_handler_h +#define lf_hfs_fsops_handler_h + +#include "lf_hfs_common.h" +#include "lf_hfs_vnode.h" + +uint64_t FSOPS_GetOffsetFromClusterNum(vnode_t vp, uint64_t uClusterNum); +int LFHFS_Mount (int iFd, UVFSVolumeId puVolId, __unused UVFSMountFlags puMountFlags, + __unused UVFSVolumeCredential *psVolumeCreds, UVFSFileNode *ppsRootNode); +int LFHFS_Unmount (UVFSFileNode psRootNode, UVFSUnmountHint hint); +int LFHFS_ScanVols (int iFd, UVFSScanVolsRequest *psRequest, UVFSScanVolsReply *psReply ); +int LFHFS_Taste ( int iFd ); + +extern UVFSFSOps HFS_fsOps; + +#endif /* lf_hfs_fsops_handler_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_generic_buf.c b/livefiles_hfs_plugin/lf_hfs_generic_buf.c new file mode 100644 index 0000000..383d567 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_generic_buf.c @@ -0,0 +1,800 @@ +// +// lf_hfs_generic_buf.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include "lf_hfs_generic_buf.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_raw_read_write.h" +#include "lf_hfs_rangelist.h" +#include "lf_hfs_locks.h" +#include "lf_hfs_logger.h" +#include +#include + +#define GEN_BUF_ALLOC_DEBUG 0 + +TAILQ_HEAD(buf_cache_head, buf_cache_entry); + +struct buf_cache_entry { + TAILQ_ENTRY(buf_cache_entry) buf_cache_link; + GenericLFBuf sBuf; +}; + +boolean_t buf_cache_state = false; +struct buf_cache_head buf_cache_list; +pthread_mutex_t buf_cache_mutex; /* protects access to buffer cache data */ + + +#define BUF_CACHE_MAX_ENTRIES_UPPER_LIMIT (140) +#define BUF_CACHE_MAX_ENTRIES_LOWER_LIMIT (128) +#define BUF_CACHE_MAX_DATA_UPPER_LIMIT (1536*1024) +#define BUF_CACHE_MAX_DATA_LOWER_LIMIT (1024*1024) + +CacheStats_S gCacheStat = {0}; + +#define IGNORE_MOUNT_FD (INT_MAX) + +void lf_hfs_generic_buf_cache_init( void ); +void lf_hfs_generic_buf_cache_deinit( void ); +struct buf_cache_entry *lf_hfs_generic_buf_cache_find( GenericLFBufPtr psBuf ); +struct buf_cache_entry *lf_hfs_generic_buf_cache_find_by_phy_cluster(int iFD, uint64_t uPhyCluster, uint64_t uBlockSize); +struct buf_cache_entry *lf_hfs_generic_buf_cache_find_gen_buf(GenericLFBufPtr psBuf); +GenericLFBuf *lf_hfs_generic_buf_cache_add( GenericLFBuf *psBuf ); +void lf_hfs_generic_buf_cache_update( GenericLFBufPtr psBuf ); +void lf_hfs_generic_buf_cache_copy( struct buf_cache_entry *entry, GenericLFBufPtr psBuf ); +void lf_hfs_generic_buf_cache_remove( struct buf_cache_entry *entry ); +void lf_hfs_generic_buf_cache_remove_all( int iFD ); +void lf_hfs_generic_buf_ref(GenericLFBuf *psBuf); +void lf_hfs_generic_buf_rele(GenericLFBuf *psBuf); + +// lf_hfs_generic_buf_take_ownership +// Take ownership on this buff. +// When the function returns zero, we own the buffer it is locked by our thread. +// When EAGAIN is returned, another thread raced us to own this buffer. Try again. +// ETIMEDOUT indicates that we timeout waiting for the buffer owner to release it +int lf_hfs_generic_buf_take_ownership(GenericLFBuf *psBuf, pthread_mutex_t *pSem) { + lf_lck_mtx_lock(&psBuf->sLock); + + if ((psBuf->uUseCnt) && (psBuf->sOwnerThread != pthread_self())) { + + // Someone else owns the buffer + if (pSem) { + lf_lck_mtx_unlock(pSem); + } + + // Wait for the buffer to get released + struct timespec sWaitTime = {.tv_sec = 3, .tv_nsec = 0}; + + int iWaitErr = lf_cond_wait_relative(&psBuf->sOwnerCond, &psBuf->sLock, &sWaitTime); + if (iWaitErr == ETIMEDOUT) { + LFHFS_LOG(LEVEL_ERROR, "lf_hfs_generic_buf_take_ownership_retry: ETIMEDOUT on %p", psBuf); + return(ETIMEDOUT); + } else if (iWaitErr) { + LFHFS_LOG(LEVEL_ERROR, "lf_hfs_generic_buf_take_ownership_retry: lf_cond_wait_relative returned %d on %p", iWaitErr, psBuf); + return(EINVAL); + } + + // Buffer owner change, Retry. + lf_lck_mtx_unlock(&psBuf->sLock); + return(EAGAIN); + } + + // We own the buffer + assert(psBuf->uLockCnt == 0); + assert(psBuf->uUseCnt == 0); + psBuf->pLockingThread = pthread_self(); + psBuf->sOwnerThread = pthread_self(); + psBuf->uUseCnt++; + psBuf->uLockCnt++; + return(0); +} + +// Function: lf_hfs_generic_buf_allocate +// Allocate GenericBuff structure and if exists, attach to a previously allocated buffer of the same physical block. +GenericLFBufPtr lf_hfs_generic_buf_allocate( vnode_t psVnode, daddr64_t uBlockN, uint32_t uBlockSize, uint64_t uFlags ) { + + uint64_t uPhyCluster = 0; + uint64_t uInClusterOffset = 0; + GenericLFBufPtr psBuf = NULL; + GenericLFBuf sBuf = {0}; + struct buf_cache_entry *psCacheEntry = NULL; + + assert(psVnode); + + if (uFlags & GEN_BUF_PHY_BLOCK) { + uPhyCluster = uBlockN; + } else { + // Determine PHY block number + uint64_t uStartCluster = 0; + int iError = raw_readwrite_get_cluster_from_offset(psVnode, + uBlockSize*uBlockN, + &uStartCluster, + &uInClusterOffset, + NULL ); + if (iError != 0) { + panic("Error calculating uPhyCluster"); + } + + uint64_t uReadOffset = (HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount)->hfsPlusIOPosOffset + + uStartCluster * HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount)->blockSize) + uInClusterOffset; + + uPhyCluster = uReadOffset / HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount)->hfs_physical_block_size; + } + + #if GEN_BUF_ALLOC_DEBUG + printf("lf_hfs_generic_buf_allocate: psVnode %p, uBlockN %llu, uBlockSize %u, uFlags 0x%llx, uPhyCluster %llu: ", + psVnode, uBlockN, uBlockSize, uFlags, uPhyCluster); + #endif + + // Check buffer cache, if a memory buffer already allocated for this physical block + if ( buf_cache_state && !(uFlags & GEN_BUF_NON_CACHED)) { + retry: + lf_lck_mtx_lock(&buf_cache_mutex); + + psCacheEntry = lf_hfs_generic_buf_cache_find_by_phy_cluster(VNODE_TO_IFD(psVnode), uPhyCluster, uBlockSize); + if (psCacheEntry) { + // buffer exists, share. + TAILQ_REMOVE(&buf_cache_list, psCacheEntry, buf_cache_link); + TAILQ_INSERT_HEAD(&buf_cache_list, psCacheEntry, buf_cache_link); + + psBuf = &psCacheEntry->sBuf; + #if GEN_BUF_ALLOC_DEBUG + printf("Already in cache: %p (UseCnt %u uCacheFlags 0x%llx)\n", psBuf, psBuf->uUseCnt, psBuf->uCacheFlags); + #endif + int iRet = lf_hfs_generic_buf_take_ownership(psBuf, &buf_cache_mutex); + if (iRet == EAGAIN) { + goto retry; + } else if (iRet) { + LFHFS_LOG(LEVEL_ERROR, "lf_hfs_generic_buf_allocate: lf_hfs_generic_buf_take_ownership returned %d.\n", iRet); + return(NULL); + } + + lf_hfs_generic_buf_unlock(psBuf); + lf_lck_mtx_unlock(&buf_cache_mutex); + return(psBuf); + } + + lf_lck_mtx_unlock(&buf_cache_mutex); + } + + // Not found in cache, need to create a GenBuf + sBuf.uBlockN = uBlockN; + sBuf.uDataSize = uBlockSize; + sBuf.psVnode = psVnode; + sBuf.uPhyCluster = uPhyCluster; + sBuf.uCacheFlags = uFlags; + sBuf.uUseCnt = 1; + sBuf.sOwnerThread = pthread_self(); + + if ( buf_cache_state && !(uFlags & GEN_BUF_NON_CACHED)) { + + // Add to cache + lf_lck_mtx_lock(&buf_cache_mutex); + + GenericLFBufPtr psCachedBuf = lf_hfs_generic_buf_cache_add(&sBuf); + + lf_cond_init(&psCachedBuf->sOwnerCond); + lf_lck_mtx_init(&psCachedBuf->sLock); + + if (psCachedBuf) { + if (uFlags & (GEN_BUF_IS_UPTODATE | GEN_BUF_LITTLE_ENDIAN)) { + lf_hfs_generic_buf_lock(psCachedBuf); + lf_hfs_generic_buf_set_cache_flag(psCachedBuf, uFlags & (GEN_BUF_IS_UPTODATE | GEN_BUF_LITTLE_ENDIAN)); + lf_hfs_generic_buf_unlock(psCachedBuf); + } + } + + lf_lck_mtx_unlock(&buf_cache_mutex); + #if GEN_BUF_ALLOC_DEBUG + printf("Added to cache %p\n", psCachedBuf); + #endif + return psCachedBuf; + + } else { + // Alloc memomry for a non-cached buffer + psBuf = hfs_mallocz(sizeof(GenericLFBuf)); + if (!psBuf) { + goto error; + } + memcpy(psBuf, &sBuf, sizeof(*psBuf)); + psBuf->pvData = hfs_mallocz(psBuf->uDataSize); + if (!psBuf->pvData) { + goto error; + } + + lf_cond_init(&psBuf->sOwnerCond); + lf_lck_mtx_init(&psBuf->sLock); + + gCacheStat.gen_buf_uncached++; + if (gCacheStat.gen_buf_uncached > gCacheStat.max_gen_buf_uncached) { + gCacheStat.max_gen_buf_uncached = gCacheStat.gen_buf_uncached; + } + if (uFlags & (GEN_BUF_IS_UPTODATE | GEN_BUF_LITTLE_ENDIAN)) { + lf_hfs_generic_buf_lock(psBuf); + lf_hfs_generic_buf_set_cache_flag(psBuf, uFlags & (GEN_BUF_IS_UPTODATE | GEN_BUF_LITTLE_ENDIAN)); + lf_hfs_generic_buf_unlock(psBuf); + } + + #if GEN_BUF_ALLOC_DEBUG + printf("Provided uncached %p\n", psBuf); + #endif + + return psBuf; + } +error: + if (psBuf && psBuf->pvData) { + hfs_free(psBuf->pvData); + } + if (psBuf) { + hfs_free(psBuf); + } + return(NULL); +} + +errno_t lf_hfs_generic_buf_read( GenericLFBufPtr psBuf ) +{ + errno_t iErr = 0; + uint64_t uActuallyRead = 0; + uint64_t uReadStartCluster = 0; + + #if GEN_BUF_ALLOC_DEBUG + printf("lf_hfs_generic_buf_read: psBuf %p, psVnode %p, uBlockN %llu, uBlockSize %u, uFlags 0x%llx, uPhyCluster %llu: ", + psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster); + #endif + + if (!psBuf) { + return(EINVAL); + } + + if ( buf_cache_state && !(psBuf->uCacheFlags & GEN_BUF_NON_CACHED)) + { + lf_lck_mtx_lock(&buf_cache_mutex); + lf_hfs_generic_buf_cache_update(psBuf); + lf_lck_mtx_unlock(&buf_cache_mutex); + } + + lf_hfs_generic_buf_lock(psBuf); + + assert(psBuf->uUseCnt != 0); + assert(psBuf->sOwnerThread == pthread_self()); + + if (psBuf->uCacheFlags & GEN_BUF_IS_UPTODATE) { + + // The buffer already contains data equals or newer than media. + #if GEN_BUF_ALLOC_DEBUG + printf("already up-to-date.\n"); + #endif + goto exit; + } + + // Cache is disabled or buffer wasn't found, read data from media + iErr = raw_readwrite_read_mount(psBuf->psVnode, + psBuf->uPhyCluster, + HFSTOVCB(psBuf->psVnode->sFSParams.vnfs_mp->psHfsmount)->hfs_physical_block_size, + psBuf->pvData, + psBuf->uDataSize, + &uActuallyRead, + &uReadStartCluster); + + if ( iErr == 0 ) { + psBuf->uValidBytes = (uint32_t)uActuallyRead; + lf_hfs_generic_buf_set_cache_flag(psBuf, GEN_BUF_IS_UPTODATE); + + #if GEN_BUF_ALLOC_DEBUG + uint32_t *puData = psBuf->pvData; + printf("Success. uPhyCluster %llu, Data: 0x%x, 0x%x, 0x%x, 0x%x\n", psBuf->uPhyCluster, puData[0], puData[2], puData[2], puData[3]); + #endif + + } else { + + #if GEN_BUF_ALLOC_DEBUG + printf("Error. uPhyCluster %llu, iErr %d.\n", psBuf->uPhyCluster, iErr); + #endif + } +exit: + lf_hfs_generic_buf_unlock(psBuf); + return iErr; +} + +errno_t lf_hfs_generic_buf_write( GenericLFBufPtr psBuf ) { + errno_t iErr = 0; + + lf_hfs_generic_buf_lock(psBuf); + + #if GEN_BUF_ALLOC_DEBUG + printf("lf_hfs_generic_buf_write: psBuf %p psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n", psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uFlags, psBuf->uPhyCluster, psBuf->uUseCnt); + uint32_t *puData = psBuf->pvData; + printf("psBuf uPhyCluster %llu, Data: 0x%x, 0x%x, 0x%x, 0x%x\n", psBuf->uPhyCluster, puData[0], puData[2], puData[2], puData[3]); + #endif + + assert(psBuf->uUseCnt != 0); + assert(!(psBuf->uCacheFlags & GEN_BUF_WRITE_LOCK)); + assert(psBuf->sOwnerThread == pthread_self()); + + iErr = raw_readwrite_write_mount(psBuf->psVnode, + psBuf->uPhyCluster, + HFSTOVCB(psBuf->psVnode->sFSParams.vnfs_mp->psHfsmount)->hfs_physical_block_size, + psBuf->pvData, + psBuf->uDataSize, + NULL, NULL); + + lf_hfs_generic_buf_unlock(psBuf); + return iErr; +} + +void lf_hfs_generic_buf_clear( GenericLFBufPtr psBuf ) { + memset(psBuf->pvData,0,sizeof(psBuf->uDataSize)); +} + +void lf_hfs_generic_buf_invalidate( GenericLFBuf *psBuf ) { + struct buf_cache_entry *psCacheEntry; + + #if GEN_BUF_ALLOC_DEBUG + printf("lf_hfs_generic_buf_invalidate: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n", + psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster, psBuf->uUseCnt); + #endif + + lf_hfs_generic_buf_lock(psBuf); + lf_hfs_generic_buf_rele(psBuf); + + assert(psBuf->uUseCnt == 0); + assert(psBuf->sOwnerThread == NULL); + + // Check buffer cache, if a memory buffer already allocated for this physical block + if ( buf_cache_state && !(psBuf->uCacheFlags & GEN_BUF_NON_CACHED)) { + + lf_lck_mtx_lock(&buf_cache_mutex); + psCacheEntry = lf_hfs_generic_buf_cache_find_gen_buf(psBuf); + + if (psCacheEntry) { + lf_hfs_generic_buf_cache_remove(psCacheEntry); + } else { + panic("A buffer is marked Cached, but was not found in Cache"); + } + + lf_lck_mtx_unlock(&buf_cache_mutex); + + } else { + // This is a non-cached buffer + gCacheStat.gen_buf_uncached--; + lf_hfs_generic_buf_unlock(psBuf); + lf_cond_destroy(&psBuf->sOwnerCond); + lf_lck_mtx_destroy(&psBuf->sLock); + hfs_free(psBuf->pvData); + hfs_free(psBuf); + } +} + +void lf_hfs_generic_buf_ref(GenericLFBuf *psBuf) { + lf_hfs_generic_buf_lock(psBuf); + assert(psBuf->sOwnerThread == pthread_self()); + psBuf->uUseCnt++; + lf_hfs_generic_buf_unlock(psBuf); +} + +int lf_hfs_generic_buf_validate_owner(GenericLFBuf *psBuf) { + + return(psBuf->sOwnerThread == pthread_self()); +} + +void lf_hfs_generic_buf_rele(GenericLFBuf *psBuf) { + lf_hfs_generic_buf_lock(psBuf); + assert(psBuf->uUseCnt != 0); + assert(psBuf->sOwnerThread == pthread_self()); + psBuf->uUseCnt--; + if (psBuf->uUseCnt == 0) { + psBuf->sOwnerThread = NULL; + lf_cond_wakeup(&psBuf->sOwnerCond); + } + lf_hfs_generic_buf_unlock(psBuf); +} + +void lf_hfs_generic_buf_lock(GenericLFBufPtr psBuf) { + #if GEN_BUF_ALLOC_DEBUG + printf("lf_hfs_generic_buf_lock: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n", + psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster, psBuf->uUseCnt); + #endif + + if (psBuf->pLockingThread == pthread_self()) { + psBuf->uLockCnt++; + } else { + lf_lck_mtx_lock(&psBuf->sLock); + assert(psBuf->uLockCnt == 0); + psBuf->uLockCnt = 1; + psBuf->pLockingThread = pthread_self(); + } +} + +void lf_hfs_generic_buf_unlock(GenericLFBufPtr psBuf) { + #if GEN_BUF_ALLOC_DEBUG + printf("lf_hfs_generic_buf_unlock: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n", + psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster, psBuf->uUseCnt); + #endif + + assert(psBuf->pLockingThread == pthread_self()); + assert(psBuf->uLockCnt); + + psBuf->uLockCnt--; + if (!psBuf->uLockCnt) { + psBuf->pLockingThread = NULL; + lf_lck_mtx_unlock(&psBuf->sLock); + } +} + +void lf_hfs_generic_buf_set_cache_flag(GenericLFBufPtr psBuf, uint64_t uCacheFlags) { + lf_hfs_generic_buf_lock(psBuf); + psBuf->uCacheFlags |= uCacheFlags; + lf_hfs_generic_buf_unlock(psBuf); +} + +void lf_hfs_generic_buf_clear_cache_flag(GenericLFBufPtr psBuf, uint64_t uCacheFlags) { + lf_hfs_generic_buf_lock(psBuf); + psBuf->uCacheFlags &= ~uCacheFlags; + lf_hfs_generic_buf_unlock(psBuf); +} + +static void lf_hfs_buf_free_unused() +{ + //We want to free more then we actually need, so that we won't have to come here every new buf that we allocate + while ( gCacheStat.buf_cache_size > BUF_CACHE_MAX_ENTRIES_LOWER_LIMIT || + gCacheStat.buf_total_allocated_size > BUF_CACHE_MAX_DATA_LOWER_LIMIT) + { + struct buf_cache_entry *last; + + last = TAILQ_LAST(&buf_cache_list, buf_cache_head); + + if (!last) { + break; + } + + lf_hfs_generic_buf_lock(&last->sBuf); + + if ((last->sBuf.uUseCnt) || (last->sBuf.uCacheFlags & GEN_BUF_WRITE_LOCK)) { + // Last buffer in buffer cache is in use. + // Nothing more to free + lf_hfs_generic_buf_unlock(&last->sBuf); + break; + } + + ++gCacheStat.buf_cache_cleanup; + lf_hfs_generic_buf_cache_remove(last); + } +} + +void lf_hfs_generic_buf_release( GenericLFBufPtr psBuf ) +{ + #if GEN_BUF_ALLOC_DEBUG + printf("lf_hfs_generic_buf_release: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n", + psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster, psBuf->uUseCnt); + #endif + + if (!psBuf) { + return; + } + + lf_hfs_generic_buf_rele(psBuf); + + // If Unused and UnCached, free. + if ((psBuf->uCacheFlags & GEN_BUF_NON_CACHED) && (psBuf->uUseCnt == 0)) { + // Buffer not in cache - free it + gCacheStat.gen_buf_uncached--; + lf_cond_destroy(&psBuf->sOwnerCond); + lf_lck_mtx_destroy(&psBuf->sLock); + hfs_free(psBuf->pvData); + hfs_free(psBuf); + return; + } + + // Cleanup unused entries in the cache + int iTry = lf_lck_mtx_try_lock(&buf_cache_mutex); + if (iTry) { + return; + } + + //We want to free more then we actually need, so that we won't have to come here every new buf that we allocate + lf_hfs_buf_free_unused(); + lf_lck_mtx_unlock(&buf_cache_mutex); +} + +// Buffer Cache functions + +void lf_hfs_generic_buf_cache_init( void ) { + gCacheStat.buf_cache_size = 0; + gCacheStat.max_gen_buf_uncached = 0; + gCacheStat.gen_buf_uncached = 0; + lf_lck_mtx_init(&buf_cache_mutex); + TAILQ_INIT(&buf_cache_list); + buf_cache_state = true; +} + +void lf_hfs_generic_buf_cache_deinit( void ) +{ + lf_hfs_generic_buf_cache_remove_all(IGNORE_MOUNT_FD); + + assert(gCacheStat.buf_cache_size == 0); + assert(gCacheStat.gen_buf_uncached == 0); + + buf_cache_state = false; + lf_lck_mtx_destroy(&buf_cache_mutex); +} + +void lf_hfs_generic_buf_cache_clear_by_iFD( int iFD ) +{ + lf_hfs_generic_buf_cache_remove_all(iFD); +} + +boolean_t lf_hfs_generic_buf_match_range( struct buf_cache_entry *entry, GenericLFBufPtr psBuf ) +{ + if ( VTOF(entry->sBuf.psVnode) != VTOF(psBuf->psVnode) ) + { + return false; + } + + uint64_t size_1 = entry->sBuf.uDataSize; + uint64_t start_1 = entry->sBuf.uBlockN * size_1; + uint64_t end_1 = start_1 + size_1 - 1; + uint64_t size_2 = psBuf->uDataSize; + uint64_t start_2 = psBuf->uBlockN * size_2; + uint64_t end_2 = start_2 + size_2 - 1; + + enum rl_overlaptype overlap; + struct rl_entry entry_range = {.rl_start = start_1, .rl_end = end_1}; + + overlap = rl_overlap(&entry_range, start_2, end_2); + + switch (overlap) + { + case RL_MATCHINGOVERLAP: + return true; + case RL_OVERLAPCONTAINSRANGE: + // Make sure we have same start though + assert(start_1 == start_2); + return true; + case RL_NOOVERLAP: + case RL_OVERLAPISCONTAINED: + return false; + case RL_OVERLAPSTARTSBEFORE: + case RL_OVERLAPENDSAFTER: + LFHFS_LOG(LEVEL_ERROR, " lf_hfs_generic_buf_match_range : cache overlap [%d]", overlap); + assert(0); + } +} + +struct buf_cache_entry * lf_hfs_generic_buf_cache_find( GenericLFBufPtr psBuf ) +{ + struct buf_cache_entry *entry, *entry_next; + + TAILQ_FOREACH_SAFE(entry, &buf_cache_list, buf_cache_link, entry_next) + { + if ( lf_hfs_generic_buf_match_range(entry, psBuf) ) + { + break; + } + } + + return entry; +} + +// Run the function pfCallback on all buffers that belongs to node psVnode. +int lf_hfs_generic_buf_write_iterate(vnode_t psVnode, IterateCallback pfCallback, uint32_t uFlags, void *pvArgs) { + + struct buf_cache_entry *psCacheEntry, *psNextCacheEntry; + int iFD = VNODE_TO_IFD(psVnode); + + TAILQ_FOREACH_SAFE(psCacheEntry, &buf_cache_list, buf_cache_link, psNextCacheEntry) { + int iEntryFD = VNODE_TO_IFD(psCacheEntry->sBuf.psVnode); + + if ( (iFD == iEntryFD) && (psCacheEntry->sBuf.psVnode == psVnode)) { + if ((uFlags & BUF_SKIP_LOCKED) && (psCacheEntry->sBuf.uCacheFlags & GEN_BUF_WRITE_LOCK)) { + continue; + } + if ((uFlags & BUF_SKIP_NONLOCKED) && !(psCacheEntry->sBuf.uCacheFlags & GEN_BUF_WRITE_LOCK)) { + continue; + } + pfCallback(&psCacheEntry->sBuf, pvArgs); + } + } + return(0); +} + + +struct buf_cache_entry *lf_hfs_generic_buf_cache_find_by_phy_cluster(int iFD, uint64_t uPhyCluster, uint64_t uBlockSize) { + + struct buf_cache_entry *psCacheEntry, *psNextCacheEntry; + + TAILQ_FOREACH_SAFE(psCacheEntry, &buf_cache_list, buf_cache_link, psNextCacheEntry) { + if (psCacheEntry->sBuf.psVnode) + { + int iEntryFD = VNODE_TO_IFD(psCacheEntry->sBuf.psVnode); + if ( (psCacheEntry->sBuf.uPhyCluster == uPhyCluster) && + (iEntryFD == iFD ) && + (psCacheEntry->sBuf.uDataSize >= uBlockSize ) ) { + break; + } + } + else + { + LFHFS_LOG(LEVEL_ERROR, "lf_hfs_generic_buf_cache_find_by_phy_cluster: got buf with vnode == NULL, cache_flags: 0x%llx, uUseCnt %d", psCacheEntry->sBuf.uCacheFlags, psCacheEntry->sBuf.uUseCnt); + assert(0); + } + + } + return psCacheEntry; +} + +struct buf_cache_entry *lf_hfs_generic_buf_cache_find_gen_buf(GenericLFBufPtr psBuf) { + + struct buf_cache_entry *psCacheEntry, *psNextCacheEntry; + + TAILQ_FOREACH_SAFE(psCacheEntry, &buf_cache_list, buf_cache_link, psNextCacheEntry) { + if ( &psCacheEntry->sBuf == psBuf ) { + break; + } + } + return psCacheEntry; +} + +GenericLFBufPtr lf_hfs_generic_buf_cache_add( GenericLFBufPtr psBuf ) +{ + struct buf_cache_entry *entry; + + //Check if we have enough space to alloc this buffer, unless need to evict something + if (gCacheStat.buf_total_allocated_size + psBuf->uDataSize > BUF_CACHE_MAX_DATA_UPPER_LIMIT || + gCacheStat.buf_cache_size + 1 == BUF_CACHE_MAX_ENTRIES_UPPER_LIMIT) + { + lf_hfs_buf_free_unused(); + } + + entry = hfs_mallocz(sizeof(*entry)); + if (!entry) { + goto error; + } + + memcpy(&entry->sBuf, (void*)psBuf, sizeof(*psBuf)); + entry->sBuf.uCacheFlags &= ~GEN_BUF_NON_CACHED; + + entry->sBuf.pvData = hfs_mallocz(psBuf->uDataSize); + if (!entry->sBuf.pvData) { + goto error; + } + + TAILQ_INSERT_HEAD(&buf_cache_list, entry, buf_cache_link); + + gCacheStat.buf_cache_size++; + gCacheStat.buf_total_allocated_size+=psBuf->uDataSize; + + if (gCacheStat.buf_cache_size > gCacheStat.max_buf_cache_size) { + gCacheStat.max_buf_cache_size = gCacheStat.buf_cache_size; + } + + return(&entry->sBuf); + +error: + if (entry) { + if (entry->sBuf.pvData) { + hfs_free(entry->sBuf.pvData); + } + hfs_free(entry); + } + return(NULL); +} + +void lf_hfs_generic_buf_cache_update( GenericLFBufPtr psBuf ) +{ + struct buf_cache_entry *entry; + + #if GEN_BUF_ALLOC_DEBUG + printf("lf_hfs_generic_buf_cache_update: psBuf %p\n", psBuf); + #endif + + // Check that cache entry still exists and hasn't thrown away + entry = lf_hfs_generic_buf_cache_find(psBuf); + if (!entry) { + return; + } + + TAILQ_REMOVE(&buf_cache_list, entry, buf_cache_link); + TAILQ_INSERT_HEAD(&buf_cache_list, entry, buf_cache_link); +} + +void lf_hfs_generic_buf_cache_copy( struct buf_cache_entry *entry, __unused GenericLFBufPtr psBuf ) +{ + #if GEN_BUF_ALLOC_DEBUG + printf("lf_hfs_generic_buf_cache_copy: psBuf %p\n", psBuf); + #endif + + TAILQ_REMOVE(&buf_cache_list, entry, buf_cache_link); + TAILQ_INSERT_HEAD(&buf_cache_list, entry, buf_cache_link); +} + +void lf_hfs_generic_buf_cache_remove( struct buf_cache_entry *entry ) { + + if (entry->sBuf.uUseCnt != 0) { + LFHFS_LOG(LEVEL_ERROR, "lf_hfs_generic_buf_cache_remove: remove buffer %p with uUseCnt %u", &entry->sBuf, entry->sBuf.uUseCnt); + } + + #if GEN_BUF_ALLOC_DEBUG + GenericLFBuf *psBuf = &entry->sBuf; + printf("lf_hfs_generic_buf_cache_remove: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n", + psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster, psBuf->uUseCnt); + #endif + + TAILQ_REMOVE(&buf_cache_list, entry, buf_cache_link); + --gCacheStat.buf_cache_size; + ++gCacheStat.buf_cache_remove; + gCacheStat.buf_total_allocated_size -= entry->sBuf.uDataSize; + + assert(entry->sBuf.uLockCnt == 1); + + lf_lck_mtx_unlock(&entry->sBuf.sLock); + lf_cond_destroy(&entry->sBuf.sOwnerCond); + lf_lck_mtx_destroy(&entry->sBuf.sLock); + + hfs_free(entry->sBuf.pvData); + hfs_free(entry); +} + +void lf_hfs_generic_buf_cache_remove_all( int iFD ) { + struct buf_cache_entry *entry, *entry_next; + + lf_lck_mtx_lock(&buf_cache_mutex); + + TAILQ_FOREACH_SAFE(entry, &buf_cache_list, buf_cache_link, entry_next) + { + if ( (iFD == IGNORE_MOUNT_FD) || ( VNODE_TO_IFD(entry->sBuf.psVnode) == iFD ) ) + { + if (iFD == IGNORE_MOUNT_FD) { + // Media no longer available, force remove all + TAILQ_REMOVE(&buf_cache_list, entry, buf_cache_link); + --gCacheStat.buf_cache_size; + ++gCacheStat.buf_cache_remove; + gCacheStat.buf_total_allocated_size -= entry->sBuf.uDataSize; + } else { + lf_hfs_generic_buf_lock(&entry->sBuf); + lf_hfs_generic_buf_cache_remove(entry); + } + } + } + + lf_lck_mtx_unlock(&buf_cache_mutex); +} + +/* buf_cache_mutex Should get locked from the caller using lf_hfs_generic_buf_cache_LockBufCache*/ +void lf_hfs_generic_buf_cache_remove_vnode(vnode_t vp) { + + struct buf_cache_entry *entry, *entry_next; + + #if GEN_BUF_ALLOC_DEBUG + printf("lf_hfs_generic_buf_cache_remove_vnode: vp %p: ", vp); + #endif + + TAILQ_FOREACH_SAFE(entry, &buf_cache_list, buf_cache_link, entry_next) { + + if ( entry->sBuf.psVnode == vp ) { + + #if GEN_BUF_ALLOC_DEBUG + printf("&sBuf %p, ", &entry->sBuf); + #endif + + lf_hfs_generic_buf_lock(&entry->sBuf); + lf_hfs_generic_buf_cache_remove(entry); + } + } + + #if GEN_BUF_ALLOC_DEBUG + printf("Done.\n"); + #endif +} + +void lf_hfs_generic_buf_cache_LockBufCache(void) +{ + lf_lck_mtx_lock(&buf_cache_mutex); +} + +void lf_hfs_generic_buf_cache_UnLockBufCache(void) +{ + lf_lck_mtx_unlock(&buf_cache_mutex); +} diff --git a/livefiles_hfs_plugin/lf_hfs_generic_buf.h b/livefiles_hfs_plugin/lf_hfs_generic_buf.h new file mode 100644 index 0000000..111b609 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_generic_buf.h @@ -0,0 +1,87 @@ +// +// lf_hfs_generic_buf.h +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#ifndef lf_hfs_generic_buf_h +#define lf_hfs_generic_buf_h + +#include "lf_hfs.h" + +#define BUF_SKIP_NONLOCKED 0x01 +#define BUF_SKIP_LOCKED 0x02 +#define BUF_SCAN_CLEAN 0x04 /* scan the clean buffers */ +#define BUF_SCAN_DIRTY 0x08 /* scan the dirty buffers */ +#define BUF_NOTIFY_BUSY 0x10 /* notify the caller about the busy pages during the scan */ + +// uCacheFlags: +#define GEN_BUF_WRITE_LOCK 0x00001000 // When set, the buffer does not get written to media. It will be written as part of a journal transaction. +#define GEN_BUF_NON_CACHED 0x00002000 // If set, the buffer is not cached. +#define GEN_BUF_IS_UPTODATE 0x00004000 // Set if memory content is equal or newer than media content +#define GEN_BUF_PHY_BLOCK 0x00008000 // Indicates that the uBlockN field contains a physical block number +#define GEN_BUF_LITTLE_ENDIAN 0x00010000 // When set, the data in the buffer contains small-endian data and should not be written to media + +typedef struct GenericBuffer { + + uint64_t uCacheFlags; + pthread_mutex_t sLock; // Sync access to buffer arguments + data + pthread_t pLockingThread; // Allows recursive lock by the same thread + uint32_t uLockCnt; // Allows recursive lock by the same thread + pthread_t sOwnerThread; // Current owner of buffer. + pthread_cond_t sOwnerCond; // Clicked everytime a buffer owner is released. + uint32_t uUseCnt; // Counts the number of buffer allocations + void* pvData; + uint32_t uDataSize; + uint32_t uValidBytes; + daddr64_t uBlockN; + vnode_t psVnode; + uint64_t uFlags; + uint64_t uPhyCluster; + void (*pfFunc)(struct GenericBuffer *psBuf, void *pvArg); // A function to be called at the last minute before disk-write + void *pvCallbackArgs; // pfFunc args +} GenericLFBuf, *GenericLFBufPtr; + +typedef struct { + uint32_t buf_cache_size; + uint32_t max_buf_cache_size; + uint32_t max_gen_buf_uncached; + uint32_t gen_buf_uncached; + uint32_t buf_cache_remove; + uint32_t buf_cache_cleanup; + + uint64_t buf_total_allocated_size; +} CacheStats_S; + +extern CacheStats_S gCacheStat; + + +GenericLFBufPtr lf_hfs_generic_buf_allocate( vnode_t psVnode, daddr64_t uBlockN, uint32_t uBlockSize, uint64_t uFlags ); +int lf_hfs_generic_buf_take_ownership(GenericLFBuf *psBuf, pthread_mutex_t *pSem); +int lf_hfs_generic_buf_take_ownership_retry(GenericLFBuf *psBuf); +int lf_hfs_generic_buf_validate_owner(GenericLFBuf *psBuf); +GenericLFBufPtr lf_hfs_generic_buf_duplicate(GenericLFBufPtr pBuff, uint32_t uExtraCacheFlags); +errno_t lf_hfs_generic_buf_read( GenericLFBufPtr psBuf ); +errno_t lf_hfs_generic_buf_write( GenericLFBufPtr psBuf ); +void lf_hfs_generic_buf_invalidate( GenericLFBufPtr psBuf ); +void lf_hfs_generic_buf_release( GenericLFBufPtr psBuf ); +void lf_hfs_generic_buf_clear( GenericLFBufPtr psBuf ); +void lf_hfs_generic_buf_set_cache_flag(GenericLFBufPtr psBuf, uint64_t uCacheFlags); +void lf_hfs_generic_buf_clear_cache_flag(GenericLFBufPtr psBuf, uint64_t uCacheFlags); +void lf_hfs_generic_buf_override_owner(GenericLFBufPtr psBuf); +void lf_hfs_generic_buf_lock(GenericLFBufPtr psBuf); +void lf_hfs_generic_buf_unlock(GenericLFBufPtr psBuf); +void lf_hfs_generic_buf_cache_init( void ); +void lf_hfs_generic_buf_cache_deinit( void ); +void lf_hfs_generic_buf_cache_clear_by_iFD( int iFD ); +void lf_hfs_generic_buf_cache_update( GenericLFBufPtr psBuf ); +void lf_hfs_generic_buf_cache_remove_vnode(vnode_t vp); +void lf_hfs_generic_buf_cache_UnLockBufCache(void); +void lf_hfs_generic_buf_cache_LockBufCache(void); + +typedef int (*IterateCallback)(GenericLFBuf *psBuff, void *pvArgs); +int lf_hfs_generic_buf_write_iterate(vnode_t psVnode, IterateCallback pfCallback, uint32_t uFlags, void *pvArgs); + + +#endif /* lf_hfs_generic_buf_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_journal.c b/livefiles_hfs_plugin/lf_hfs_journal.c new file mode 100644 index 0000000..471868d --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_journal.c @@ -0,0 +1,3474 @@ +/* + * Copyright (c) 2002-2015 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +// +// This file implements a simple write-ahead journaling layer. +// In theory any file system can make use of it by calling these +// functions when the fs wants to modify meta-data blocks. See +// hfs_journal.h for a more detailed description of the api and +// data structures. +// +// Dominic Giampaolo (dbg@apple.com) +// Port to Live-Files: Oded Shoshani (oshoshani@apple.com) +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "lf_hfs_locks.h" +#include "lf_hfs_journal.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_raw_read_write.h" +#include "lf_hfs_generic_buf.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_vfsops.h" + +// ************************** Function Definitions *********************** +// number of bytes to checksum in a block_list_header +// NOTE: this should be enough to clear out the header +// fields as well as the first entry of binfo[] + +#define CHECK_JOURNAL(jnl) \ + do { \ + if (jnl == NULL) { \ + printf("%s:%d: null journal ptr?\n", __FILE__, __LINE__); \ + panic("%s:%d: null journal ptr?\n", __FILE__, __LINE__); \ + } \ + if (jnl->jdev == NULL) { \ + printf("%s:%d: jdev is null!\n", __FILE__, __LINE__); \ + panic("%s:%d: jdev is null!\n", __FILE__, __LINE__); \ + } \ + if (jnl->fsdev == NULL) { \ + printf("%s:%d: fsdev is null!\n", __FILE__, __LINE__); \ + panic("%s:%d: fsdev is null!\n", __FILE__, __LINE__); \ + } \ + if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC) { \ + printf("%s:%d: jhdr magic corrupted (0x%x != 0x%x)\n", \ + __FILE__, __LINE__, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC); \ + panic("%s:%d: jhdr magic corrupted (0x%x != 0x%x)\n", \ + __FILE__, __LINE__, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC); \ + } \ + if (jnl->jhdr->start <= 0 || jnl->jhdr->start > jnl->jhdr->size) { \ + printf("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \ + __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size); \ + panic("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \ + __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size); \ + } \ + if (jnl->jhdr->end <= 0 || jnl->jhdr->end > jnl->jhdr->size) { \ + printf("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \ + __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size); \ + panic("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \ + __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size); \ + } \ + } while(0) + +#define CHECK_TRANSACTION(tr) \ + do { \ + if (tr == NULL) { \ + printf("%s:%d: null transaction ptr?\n", __FILE__, __LINE__); \ + panic("%s:%d: null transaction ptr?\n", __FILE__, __LINE__); \ + } \ + if (tr->jnl == NULL) { \ + printf("%s:%d: null tr->jnl ptr?\n", __FILE__, __LINE__); \ + panic("%s:%d: null tr->jnl ptr?\n", __FILE__, __LINE__); \ + } \ + if (tr->blhdr != (block_list_header *)tr->tbuffer) { \ + printf("%s:%d: blhdr (%p) != tbuffer (%p)\n", __FILE__, __LINE__, tr->blhdr, tr->tbuffer); \ + panic("%s:%d: blhdr (%p) != tbuffer (%p)\n", __FILE__, __LINE__, tr->blhdr, tr->tbuffer); \ + } \ + if (tr->total_bytes < 0) { \ + printf("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes); \ + panic("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes); \ + } \ + if (tr->journal_start < 0) { \ + printf("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start); \ + panic("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start); \ + } \ + if (tr->journal_end < 0) { \ + printf("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end); \ + panic("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end); \ + } \ + if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) { \ + printf("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks); \ + panic("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks); \ + } \ + } while(0) + +#define SWAP16(x) OSSwapInt16(x) +#define SWAP32(x) OSSwapInt32(x) +#define SWAP64(x) OSSwapInt64(x) + +#define JNL_WRITE 0x0001 +#define JNL_READ 0x0002 +#define JNL_HEADER 0x8000 + +#define BLHDR_CHECKSUM_SIZE 32 +#define MAX_JOURNAL_SIZE 0x80000000U + +#define STARTING_BUCKETS 256 +typedef struct bucket { + off_t block_num; + uint32_t jnl_offset; + uint32_t block_size; + int32_t cksum; +} bucket; + +static int replay_journal(journal *jnl); +static void free_old_stuff(journal *jnl); +static errno_t journal_allocate_transaction(journal *jnl); +static void get_io_info(struct vnode *devvp, size_t phys_blksz, journal *jnl); +static size_t read_journal_header(journal *jnl, void *data, size_t len); +static size_t do_journal_io(journal *jnl, off_t *offset, void *data, size_t len, int direction); +static unsigned int calc_checksum(const char *ptr, int len); +static void swap_journal_header(journal *jnl); +static int end_transaction(transaction *tr, + int force_it, + errno_t (*callback)(void*), + void *callback_arg, + boolean_t drop_lock); +static void abort_transaction(journal *jnl, transaction *tr); +static void size_up_tbuffer(journal *jnl, uint32_t tbuffer_size, uint32_t phys_blksz); +static void lock_condition(journal *jnl, ConditionalFlag_S *psCondFlag, __unused const char *condition_name); +static void wait_condition(journal *jnl, ConditionalFlag_S *psCondFlag, __unused const char *condition_name); +static void unlock_condition(journal *jnl, ConditionalFlag_S *psCondFlag); +static int write_journal_header(journal *jnl, int updating_start, uint32_t sequence_num); +static size_t read_journal_data(journal *jnl, off_t *offset, void *data, size_t len); +static size_t write_journal_data(journal *jnl, off_t *offset, void *data, size_t len); + + +static __inline__ void lock_oldstart(journal *jnl) { + lf_lck_mtx_lock(&jnl->old_start_lock); +} + +static __inline__ void unlock_oldstart(journal *jnl) { + lf_lck_mtx_unlock(&jnl->old_start_lock); +} + +__inline__ void journal_lock(journal *jnl) { + lf_lck_mtx_lock(&jnl->jlock); + if (jnl->owner) { + panic ("jnl: owner is %p, expected NULL\n", jnl->owner); + } + jnl->owner = pthread_self(); +} + +__inline__ void journal_unlock(journal *jnl) { + jnl->owner = NULL; + lf_lck_mtx_unlock(&jnl->jlock); +} + +static __inline__ void lock_flush(journal *jnl) { + lf_lck_mtx_lock(&jnl->flock); +} + +static __inline__ void unlock_flush(journal *jnl) { + lf_lck_mtx_unlock(&jnl->flock); +} + +// ************************** Global Variables *********************** +// Journal Locking +lck_grp_attr_t *jnl_group_attr = NULL; +lck_attr_t *jnl_lock_attr = NULL; +lck_grp_t *jnl_mutex_group = NULL; + +// By default, we grow the list of extents to trim by 4K at a time. +// We'll opt to flush a transaction if it contains at least +// JOURNAL_FLUSH_TRIM_EXTENTS extents to be trimmed (even if the number +// of modified blocks is small). +enum { + JOURNAL_DEFAULT_TRIM_BYTES = 4096, + JOURNAL_DEFAULT_TRIM_EXTENTS = JOURNAL_DEFAULT_TRIM_BYTES / sizeof(dk_extent_t), + JOURNAL_FLUSH_TRIM_EXTENTS = JOURNAL_DEFAULT_TRIM_EXTENTS * 15 / 16 +}; + +unsigned int jnl_trim_flush_limit = JOURNAL_FLUSH_TRIM_EXTENTS; + +// tbuffer +#define DEFAULT_TRANSACTION_BUFFER_SIZE (128*1024) +#define MAX_TRANSACTION_BUFFER_SIZE (3072*1024) +uint32_t def_tbuffer_size = 0; // XXXdbg - so I can change it in the debugger + +// ************************** Global Functions *********************** +void journal_init(void) { + + jnl_lock_attr = lf_lck_attr_alloc_init(); + jnl_group_attr = lf_lck_grp_attr_alloc_init(); + jnl_mutex_group = lf_lck_grp_alloc_init(); +} + +journal *journal_open(struct vnode *jvp, + off_t offset, + off_t journal_size, + struct vnode *fsvp, + size_t min_fs_blksz, + int32_t flags, + int32_t tbuffer_size, + void (*flush)(void *arg), + void *arg, + struct mount *fsmount) { + journal *jnl; + uint32_t orig_blksz=0; + uint32_t phys_blksz; + u_int32_t min_size = 0; + int orig_checksum, checksum; + + /* Get the real physical block size. */ + if (ioctl(jvp->psFSRecord->iFD, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz)) { + goto cleanup_jdev_name; + } + + if (phys_blksz > min_fs_blksz) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: error: phys blksize %u bigger than min fs blksize %zd\n", + phys_blksz, min_fs_blksz); + goto cleanup_jdev_name; + } + + if (journal_size < (256*1024) || journal_size > (1024*1024*1024)) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: journal size %lld looks bogus.\n", journal_size); + goto cleanup_jdev_name; + } + + min_size = phys_blksz * (phys_blksz / sizeof(block_info)); + /* Reject journals that are too small given the sector size of the device */ + if (journal_size < min_size) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: journal size (%lld) too small given sector size of (%u)\n", + journal_size, phys_blksz); + goto cleanup_jdev_name; + } + + if ((journal_size % phys_blksz) != 0) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: journal size 0x%llx is not an even multiple of block size 0x%x\n", + journal_size, phys_blksz); + goto cleanup_jdev_name; + } + + jnl = hfs_mallocz(sizeof(struct journal)); + + jnl->jdev = jvp; + jnl->jdev_offset = offset; + jnl->jdev_blknum = (uint32_t)(offset / min_fs_blksz); + jnl->fsdev = fsvp; + jnl->flush = flush; + jnl->flush_arg = arg; + jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK); + lf_lck_mtx_init(&jnl->old_start_lock); + lf_cond_init(&jnl->flushing.sCond); + lf_cond_init(&jnl->asyncIO.sCond); + lf_cond_init(&jnl->writing_header.sCond); + + /* We hold the mount to later pass to the throttling code for IO + * accounting. + */ + jnl->fsmount = fsmount; + + get_io_info(jvp, phys_blksz, jnl); + + jnl->header_buf = hfs_malloc(phys_blksz); + jnl->header_buf_size = phys_blksz; + + jnl->jhdr = (journal_header *)jnl->header_buf; + memset(jnl->jhdr, 0, sizeof(journal_header)); + + // we have to set this up here so that do_journal_io() will work + jnl->jhdr->jhdr_size = phys_blksz; + + if (read_journal_header(jnl, jnl->jhdr, phys_blksz) != phys_blksz) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: could not read %u bytes for the journal header.\n", + phys_blksz); + goto bad_journal; + } + + /* + * Check for a bad jhdr size after reading in the journal header. + * The journal header length cannot be zero + */ + if (jnl->jhdr->jhdr_size == 0) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: bad jhdr size (%d) \n", jnl->jhdr->jhdr_size); + goto bad_journal; + } + + orig_checksum = jnl->jhdr->checksum; + jnl->jhdr->checksum = 0; + + if (jnl->jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) { + + // do this before the swap since it's done byte-at-a-time + orig_checksum = SWAP32(orig_checksum); + checksum = calc_checksum((char *)jnl->jhdr, JOURNAL_HEADER_CKSUM_SIZE); + swap_journal_header(jnl); + jnl->flags |= JOURNAL_NEED_SWAP; + + } else { + + checksum = calc_checksum((char *)jnl->jhdr, JOURNAL_HEADER_CKSUM_SIZE); + } + + if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC && jnl->jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: journal magic is bad (0x%x != 0x%x)\n", + jnl->jhdr->magic, JOURNAL_HEADER_MAGIC); + goto bad_journal; + } + + // only check if we're the current journal header magic value + if (jnl->jhdr->magic == JOURNAL_HEADER_MAGIC) { + + if (orig_checksum != checksum) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: journal checksum is bad (0x%x != 0x%x)\n", + orig_checksum, checksum); + + //goto bad_journal; + } + } + + // XXXdbg - convert old style magic numbers to the new one + if (jnl->jhdr->magic == OLD_JOURNAL_HEADER_MAGIC) { + jnl->jhdr->magic = JOURNAL_HEADER_MAGIC; + } + + if (phys_blksz != (size_t)jnl->jhdr->jhdr_size && jnl->jhdr->jhdr_size != 0) { + /* + * The volume has probably been resized (such that we had to adjust the + * logical sector size), or copied to media with a different logical + * sector size. + * + * For us, though, no big deal because we are giving byte offsets to + * pread() and pwrite() to do our I/O, and as long as we use self- + * consistent units, we are all good. + */ + LFHFS_LOG(LEVEL_ERROR, + "jnl: block size mismatch: phys_blksz=%llu, jhdr->jhdr_size=%llu -- COMPENSATING\n", + (unsigned long long)phys_blksz, (unsigned long long)jnl->jhdr->jhdr_size); + orig_blksz = phys_blksz; + } + + if ( jnl->jhdr->start <= 0 + || jnl->jhdr->start > jnl->jhdr->size + || jnl->jhdr->start > 1024*1024*1024) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: jhdr start looks bad (0x%llx max size 0x%llx)\n", + jnl->jhdr->start, jnl->jhdr->size); + goto bad_journal; + } + + if ( jnl->jhdr->end <= 0 + || jnl->jhdr->end > jnl->jhdr->size + || jnl->jhdr->end > 1024*1024*1024) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: jhdr end looks bad (0x%llx max size 0x%llx)\n", + jnl->jhdr->end, jnl->jhdr->size); + goto bad_journal; + } + + if (jnl->jhdr->size < (256*1024) || jnl->jhdr->size > 1024*1024*1024) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: jhdr size looks bad (0x%llx)\n", jnl->jhdr->size); + goto bad_journal; + } + + // XXXdbg - can't do these checks because hfs writes all kinds of + // non-uniform sized blocks even on devices that have a block size + // that is larger than 512 bytes (i.e. optical media w/2k blocks). + // therefore these checks will fail and so we just have to punt and + // do more relaxed checking... + // XXXdbg if ((jnl->jhdr->start % jnl->jhdr->jhdr_size) != 0) { + if ((jnl->jhdr->start % 512) != 0) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: journal start (0x%llx) not a multiple of 512?\n", + jnl->jhdr->start); + goto bad_journal; + } + + //XXXdbg if ((jnl->jhdr->end % jnl->jhdr->jhdr_size) != 0) { + if ((jnl->jhdr->end % 512) != 0) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: journal end (0x%llx) not a multiple of block size (0x%x)?\n", + jnl->jhdr->end, jnl->jhdr->jhdr_size); + goto bad_journal; + } + + if (jnl->jhdr->blhdr_size < 0) { + //throw out invalid sizes + LFHFS_LOG(LEVEL_ERROR, "jnl: open: blhdr size looks bogus! (%d) \n", + jnl->jhdr->blhdr_size); + goto bad_journal; + } + + // take care of replaying the journal if necessary + if (flags & JOURNAL_RESET) { + LFHFS_LOG(LEVEL_ERROR, "jnl: journal start/end pointers reset! (s 0x%llx e 0x%llx)\n", + jnl->jhdr->start, jnl->jhdr->end); + jnl->jhdr->start = jnl->jhdr->end; + } else if (replay_journal(jnl) != 0) { + LFHFS_LOG(LEVEL_ERROR, "jnl: journal_open: Error replaying the journal!\n"); + goto bad_journal; + } + + /* + * When we get here, we know that the journal is empty (jnl->jhdr->start == + * jnl->jhdr->end). If the device's logical block size was different from + * the journal's header size, then we can now restore the device's logical + * block size and update the journal's header size to match. + * + * Note that we also adjust the journal's start and end so that they will + * be aligned on the new block size. We pick a new sequence number to + * avoid any problems if a replay found previous transactions using the old + * journal header size. (See the comments in journal_create(), above.) + */ + + if (orig_blksz != 0) { + LFHFS_LOG(LEVEL_ERROR, "jnl: updating journal header with block size %llu\n", + (unsigned long long)phys_blksz); + + jnl->jhdr->jhdr_size = phys_blksz; + jnl->jhdr->start = phys_blksz; + jnl->jhdr->end = phys_blksz; + jnl->jhdr->sequence_num = (jnl->jhdr->sequence_num + + (journal_size / phys_blksz) + + (random() % 16384)) & 0x00ffffff; + + if (write_journal_header(jnl, 1, jnl->jhdr->sequence_num)) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: failed to update journal header size\n"); + goto bad_journal; + } + } + + // make sure this is in sync! + jnl->active_start = jnl->jhdr->start; + jnl->sequence_num = jnl->jhdr->sequence_num; + + // set this now, after we've replayed the journal + size_up_tbuffer(jnl, tbuffer_size, phys_blksz); + + // TODO: Does this need to change if the device's logical block size changed? + if ((off_t)(jnl->jhdr->blhdr_size/sizeof(block_info)-1) > (jnl->jhdr->size/jnl->jhdr->jhdr_size)) { + LFHFS_LOG(LEVEL_ERROR, "jnl: open: jhdr size and blhdr size are not compatible (0x%llx, %d, %d)\n", jnl->jhdr->size, + jnl->jhdr->blhdr_size, jnl->jhdr->jhdr_size); + goto bad_journal; + } + + lf_lck_mtx_init(&jnl->jlock); + lf_lck_mtx_init(&jnl->flock); + lf_lck_rw_init(&jnl->trim_lock); + + goto journal_open_complete; + +bad_journal: + hfs_free(jnl->header_buf); + hfs_free(jnl); +cleanup_jdev_name: + jnl = NULL; +journal_open_complete: + return jnl; +} + +journal *journal_create(struct vnode *jvp, + off_t offset, + off_t journal_size, + struct vnode *fsvp, + size_t min_fs_blksz, + int32_t flags, + int32_t tbuffer_size, + void (*flush)(void *arg), + void *arg, + struct mount *fsmount) { + + journal *jnl; + uint32_t phys_blksz, new_txn_base; + u_int32_t min_size; + + /* + * Cap the journal max size to 2GB. On HFS, it will attempt to occupy + * a full allocation block if the current size is smaller than the allocation + * block on which it resides. Once we hit the exabyte filesystem range, then + * it will use 2GB allocation blocks. As a result, make the cap 2GB. + */ + + /* Get the real physical block size. */ + if (ioctl(jvp->psFSRecord->iFD, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz)) { + goto cleanup_jdev_name; + } + + if (journal_size < (256*1024) || journal_size > (MAX_JOURNAL_SIZE)) { + LFHFS_LOG(LEVEL_ERROR, "jnl: create: journal size %lld looks bogus.\n", journal_size); + goto cleanup_jdev_name; + } + + min_size = phys_blksz * (phys_blksz / sizeof(block_info)); + /* Reject journals that are too small given the sector size of the device */ + if (journal_size < min_size) { + LFHFS_LOG(LEVEL_ERROR, "jnl: create: journal size (%lld) too small given sector size of (%u)\n", + journal_size, phys_blksz); + goto cleanup_jdev_name; + } + + if (phys_blksz > min_fs_blksz) { + LFHFS_LOG(LEVEL_ERROR, "jnl: create: error: phys blksize %u bigger than min fs blksize %zd\n", + phys_blksz, min_fs_blksz); + goto cleanup_jdev_name; + } + + if ((journal_size % phys_blksz) != 0) { + LFHFS_LOG(LEVEL_ERROR, "jnl: create: journal size 0x%llx is not an even multiple of block size 0x%ux\n", + journal_size, phys_blksz); + goto cleanup_jdev_name; + } + + + jnl = hfs_mallocz(sizeof(struct journal)); + + jnl->jdev = jvp; + jnl->jdev_offset = offset; + jnl->jdev_blknum = (uint32_t)(offset / min_fs_blksz); + jnl->fsdev = fsvp; + jnl->flush = flush; + jnl->flush_arg = arg; + jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK); + lf_lck_mtx_init(&jnl->old_start_lock); + + // Keep a point to the mount around for use in IO throttling. + jnl->fsmount = fsmount; + + get_io_info(jvp, phys_blksz, jnl); + + jnl->header_buf = hfs_malloc(phys_blksz); + jnl->header_buf_size = phys_blksz; + + jnl->jhdr = (journal_header *)jnl->header_buf; + memset(jnl->jhdr, 0, sizeof(journal_header)); + + // we have to set this up here so that do_journal_io() will work + jnl->jhdr->jhdr_size = phys_blksz; + + // + // We try and read the journal header to see if there is already one + // out there. If there is, it's possible that it has transactions + // in it that we might replay if we happen to pick a sequence number + // that is a little less than the old one, there is a crash and the + // last txn written ends right at the start of a txn from the previous + // incarnation of this file system. If all that happens we would + // replay the transactions from the old file system and that would + // destroy your disk. Although it is extremely unlikely for all those + // conditions to happen, the probability is non-zero and the result is + // severe - you lose your file system. Therefore if we find a valid + // journal header and the sequence number is non-zero we write junk + // over the entire journal so that there is no way we will encounter + // any old transactions. This is slow but should be a rare event + // since most tools erase the journal. + // + if ( read_journal_header(jnl, jnl->jhdr, phys_blksz) == phys_blksz + && jnl->jhdr->magic == JOURNAL_HEADER_MAGIC + && jnl->jhdr->sequence_num != 0) { + + new_txn_base = (jnl->jhdr->sequence_num + (journal_size / phys_blksz) + (random() % 16384)) & 0x00ffffff; + LFHFS_LOG(LEVEL_ERROR, "jnl: create: avoiding old sequence number 0x%x (0x%x)\n", jnl->jhdr->sequence_num, new_txn_base); + + } else { + new_txn_base = random() & 0x00ffffff; + } + + memset(jnl->header_buf, 0, phys_blksz); + + jnl->jhdr->magic = JOURNAL_HEADER_MAGIC; + jnl->jhdr->endian = ENDIAN_MAGIC; + jnl->jhdr->start = phys_blksz; // start at block #1, block #0 is for the jhdr itself + jnl->jhdr->end = phys_blksz; + jnl->jhdr->size = journal_size; + jnl->jhdr->jhdr_size = phys_blksz; + size_up_tbuffer(jnl, tbuffer_size, phys_blksz); + + jnl->active_start = jnl->jhdr->start; + + jnl->jhdr->sequence_num = new_txn_base; + + lf_lck_mtx_init(&jnl->jlock); + lf_lck_mtx_init(&jnl->flock); + lf_lck_rw_init(&jnl->trim_lock); + + lf_cond_init(&jnl->flushing.sCond); + lf_cond_init(&jnl->asyncIO.sCond); + lf_cond_init(&jnl->writing_header.sCond); + jnl->flush_aborted = FALSE; + jnl->async_trim = NULL; + jnl->sequence_num = jnl->jhdr->sequence_num; + + if (write_journal_header(jnl, 1, jnl->jhdr->sequence_num) != 0) { + LFHFS_LOG(LEVEL_ERROR, "jnl: journal_create: failed to write journal header.\n"); + goto bad_write; + } + + goto journal_create_complete; + + +bad_write: + hfs_free(jnl->header_buf); + jnl->jhdr = NULL; + hfs_free(jnl); +cleanup_jdev_name: + jnl = NULL; +journal_create_complete: + return jnl; +} + + + +void *journal_owner(journal *jnl) { + return jnl->owner; +} + +/* Is the given cnode either the .journal or .journal_info_block file on + * a volume with an active journal? Many VNOPs use this to deny access + * to those files. + * + * Note: the .journal file on a volume with an external journal still + * returns true here, even though it does not actually hold the contents + * of the volume's journal. + */ +_Bool hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp) { + if (hfsmp->jnl != NULL && + (cp->c_fileid == hfsmp->hfs_jnlinfoblkid || + cp->c_fileid == hfsmp->hfs_jnlfileid)) { + return true; + } else { + return false; + } +} + +bool is_journaled(UVFSFileNode *psRootNode) { + + struct vnode *psRootVnode = *psRootNode; + + if (!psRootNode) { + LFHFS_LOG(LEVEL_DEBUG, "is_journaled: psRootNode is NULL"); + return false; + } + + if (!psRootVnode->sFSParams.vnfs_mp) { + LFHFS_LOG(LEVEL_DEBUG, "is_journaled: psRootVnode->sFSParams.vnfs_mp is NULL"); + return false; + } + + if (psRootVnode->sFSParams.vnfs_mp->psHfsmount->jnl) + return true; + + return false; +} + + +// Media no longer available, clear all memory occupied by the journal +void journal_release(journal *jnl) { + if (jnl->owner != pthread_self()) { + journal_lock(jnl); + } + + if (jnl->active_tr) { + abort_transaction(jnl, jnl->active_tr); + } + + if (jnl->cur_tr) { + abort_transaction(jnl, jnl->cur_tr); + } + + free_old_stuff(jnl); + + hfs_free(jnl->header_buf); + jnl->jhdr = (void *)0xbeefbabe; + + journal_unlock(jnl); + lf_lck_mtx_destroy(&jnl->old_start_lock); + lf_lck_mtx_destroy(&jnl->jlock); + lf_lck_mtx_destroy(&jnl->flock); + hfs_free(jnl); +} + + +void journal_close(journal *jnl) { + volatile off_t *start, *end; + int counter=0; + + CHECK_JOURNAL(jnl); + + // set this before doing anything that would block so that + // we start tearing things down properly. + // + jnl->flags |= JOURNAL_CLOSE_PENDING; + + if (jnl->owner != pthread_self()) { + journal_lock(jnl); + } + + wait_condition(jnl, &jnl->flushing, "journal_close"); + + // + // only write stuff to disk if the journal is still valid + // + if ((jnl->flags & JOURNAL_INVALID) == 0) { + + if (jnl->active_tr) { + /* + * "journal_end_transaction" will fire the flush asynchronously + */ + journal_end_transaction(jnl); + } + + // flush any buffered transactions + if (jnl->cur_tr) { + transaction *tr = jnl->cur_tr; + + jnl->cur_tr = NULL; + /* + * "end_transaction" will wait for any in-progress flush to complete + * before flushing "cur_tr" synchronously("must_wait" == TRUE) + */ + end_transaction(tr, 1, NULL, NULL, FALSE); + } + /* + * if there was an "active_tr", make sure we wait for + * it to flush if there was no "cur_tr" to process + */ + wait_condition(jnl, &jnl->flushing, "journal_close"); + + //start = &jnl->jhdr->start; + start = &jnl->active_start; + end = &jnl->jhdr->end; + + while (*start != *end && counter++ < 5000) { + //printf("jnl: close: flushing the buffer cache (start 0x%llx end 0x%llx)\n", *start, *end); + if (jnl->flush) { + jnl->flush(jnl->flush_arg); + } + usleep(10000); + } + + if (*start != *end) { + LFHFS_LOG(LEVEL_ERROR, "jnl: close: buffer flushing didn't seem to flush out all the transactions! (0x%llx - 0x%llx)\n", + *start, *end); + } + + // make sure this is in sync when we close the journal + jnl->jhdr->start = jnl->active_start; + + // if this fails there's not much we can do at this point... + write_journal_header(jnl, 1, jnl->sequence_num); + } else { + // if we're here the journal isn't valid any more. + // so make sure we don't leave any locked blocks lying around + LFHFS_LOG(LEVEL_ERROR, "jnl: close: journal is invalid. aborting outstanding transactions\n"); + if (jnl->active_tr || jnl->cur_tr) { + transaction *tr; + + if (jnl->active_tr) { + tr = jnl->active_tr; + jnl->active_tr = NULL; + } else { + tr = jnl->cur_tr; + jnl->cur_tr = NULL; + } + abort_transaction(jnl, tr); + + if (jnl->active_tr || jnl->cur_tr) { + panic("jnl: close: jnl @ %p had both an active and cur tr\n", jnl); + } + } + } + wait_condition(jnl, &jnl->asyncIO, "journal_close"); + + free_old_stuff(jnl); + + hfs_free(jnl->header_buf); + jnl->jhdr = (void *)0xbeefbabe; + + journal_unlock(jnl); + lf_lck_mtx_destroy(&jnl->old_start_lock); + lf_lck_mtx_destroy(&jnl->jlock); + lf_lck_mtx_destroy(&jnl->flock); + hfs_free(jnl); +} + +// This function performs the following: +// 1) Checks that we have a valid journal +// 2) locks the journal +// 3) Allocates roon in the journal +int journal_start_transaction(journal *jnl) { + + int ret; + + #if JOURNAL_DEBUG + printf("journal_start_transaction (%u).\n", jnl->nested_count); + #endif + + CHECK_JOURNAL(jnl); + + free_old_stuff(jnl); + + if (jnl->flags & JOURNAL_INVALID) { + return EINVAL; + } + + if (jnl->owner == pthread_self()) { + if (jnl->active_tr == NULL) { + panic("jnl: start_tr: active_tr is NULL (jnl @ %p, owner %p, current_thread %p\n", + jnl, jnl->owner, pthread_self()); + } + jnl->nested_count++; + return 0; + } + + journal_lock(jnl); + + if (jnl->nested_count != 0 || jnl->active_tr != NULL) { + panic("jnl: start_tr: owner %p, nested count %d, active_tr %p jnl @ %p\n", + jnl->owner, jnl->nested_count, jnl->active_tr, jnl); + } + + jnl->nested_count = 1; + + // if there's a buffered transaction, use it. + if (jnl->cur_tr) { + jnl->active_tr = jnl->cur_tr; + jnl->cur_tr = NULL; + + return 0; + } + + ret = journal_allocate_transaction(jnl); + if (ret) { + goto bad_start; + } + + // printf("jnl: start_tr: owner 0x%x new tr @ 0x%x\n", jnl->owner, jnl->active_tr); + + return 0; + +bad_start: + jnl->nested_count = 0; + journal_unlock(jnl); + + return ret; +} +// journal_end_transaction +// This function does the following: +// 1) Validates journal status/state +// 2) +int journal_end_transaction(journal *jnl) { + int ret; + transaction *tr; + +#if JOURNAL_DEBUG + printf("journal_end_transaction (%u).\n", jnl->nested_count-1); +#endif + + CHECK_JOURNAL(jnl); + + free_old_stuff(jnl); + + if ((jnl->flags & JOURNAL_INVALID) && jnl->owner == NULL) { + return 0; + } + + if (jnl->owner != pthread_self()) { + panic("jnl: end_tr: I'm not the owner! jnl %p, owner %p, curact %p\n", + jnl, jnl->owner, pthread_self()); + } + jnl->nested_count--; + + if (jnl->nested_count > 0) { + return 0; + } else if (jnl->nested_count < 0) { + panic("jnl: jnl @ %p has negative nested count (%d). bad boy.\n", jnl, jnl->nested_count); + } + + if (jnl->flags & JOURNAL_INVALID) { + if (jnl->active_tr) { + if (jnl->cur_tr != NULL) { + panic("jnl: journal @ %p has active tr (%p) and cur tr (%p)\n", + jnl, jnl->active_tr, jnl->cur_tr); + } + tr = jnl->active_tr; + jnl->active_tr = NULL; + + abort_transaction(jnl, tr); + } + journal_unlock(jnl); + + return EINVAL; + } + + tr = jnl->active_tr; + CHECK_TRANSACTION(tr); + + // clear this out here so that when check_free_space() calls + // the FS flush function, we don't panic in journal_flush() + // if the FS were to call that. note: check_free_space() is + // called from end_transaction(). + jnl->active_tr = NULL; + + /* Examine the force-journal-flush state in the active txn */ + if (tr->flush_on_completion == TRUE) { + /* + * If the FS requested it, disallow group commit and force the + * transaction out to disk immediately. + */ + ret = end_transaction(tr, 1, NULL, NULL, TRUE); + } + else { + /* in the common path we can simply use the double-buffered journal */ + ret = end_transaction(tr, 0, NULL, NULL, TRUE); + } + + return ret; +} + +// journal_modify_block_start +// This function does the following: +// 1) Makes sure the journal file is on and valid +// 2) Clean up (free previous transactions) +// 3) Validate that the phy-block-size has not changed. +// 4) Locks the buffer. +// Buffer life cycle with journal: +// 1) Client code (ie btrees_io.c) allocates a buffer (ie gains ownership). Other threads will pend on using this buffer until it is released. +// 2) Client code calls journal_modify_block_start which sets the GEN_BUF_WRITE_LOCK uCacheFlag. +// 3) Client code modifies the buffer. +// 4) Client code calls journal_modify_block_end which released the buffer. The GEN_BUF_WRITE_LOCK flag remains set. +// It this point other threads are welcomed to modify the buffer (after executing steps 1 and 2 above). The buffer content will not be written to media before transaction_end, thus only the accumulative change of both threads after transaction_end will be committed. +// 5) transaction-end (called from within client-code or async Sync) obtains ownership on in transaction buffers. By doing that it makes sure no buffer is currently being modified by any Client code. It then prepares the buffer for commiting (ie realigns endianizm), and commits (writes to the t-buffer, write the t-buffer to media, updates journal-info, clears the GEN_BUF_WRITE_LOCK flags and writes the buffers to media). +int journal_modify_block_start(journal *jnl, GenericLFBuf *psGenBuf) { + + transaction *tr; + +#if JOURNAL_DEBUG + printf("journal_modify_block_start: psGenBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uCacheFlags 0x%llx, uPhyCluster %llu, uLockCnt %u\n", + psGenBuf, psGenBuf->psVnode, psGenBuf->uBlockN, psGenBuf->uDataSize, psGenBuf->uCacheFlags ,psGenBuf->uPhyCluster, psGenBuf->uLockCnt); +#endif + + CHECK_JOURNAL(jnl); + + free_old_stuff(jnl); + + if (jnl->flags & JOURNAL_INVALID) { + return EINVAL; + } + + tr = jnl->active_tr; + CHECK_TRANSACTION(tr); + + if (jnl->owner != pthread_self()) { + panic("jnl: modify_block_start: called w/out a transaction! jnl %p, owner %p, curact %p\n", + jnl, jnl->owner, pthread_self()); + } + + //printf("jnl: mod block start (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d; total bytes %d)\n", + // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes); + + // can't allow blocks that aren't an even multiple of the + // underlying block size. + if ((psGenBuf->uDataSize % jnl->jhdr->jhdr_size) != 0) { + uint32_t bad=0; + uint32_t phys_blksz; + + if (ioctl(jnl->jdev->psFSRecord->iFD, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz)) { + bad = 1; + } else if (phys_blksz != (uint32_t)jnl->jhdr->jhdr_size) { + if (phys_blksz < 512) { + panic("jnl: mod block start: phys blksz %d is too small (%d, %d)\n", + phys_blksz, psGenBuf->uDataSize, jnl->jhdr->jhdr_size); + } + + if ((psGenBuf->uDataSize % phys_blksz) != 0) { + bad = 1; + } else if (phys_blksz < (uint32_t)jnl->jhdr->jhdr_size) { + jnl->jhdr->jhdr_size = phys_blksz; + } else { + // the phys_blksz is now larger... need to realloc the jhdr + char *new_header_buf; + + LFHFS_LOG(LEVEL_ERROR, "jnl: phys blksz got bigger (was: %d/%d now %d)\n", + jnl->header_buf_size, jnl->jhdr->jhdr_size, phys_blksz); + new_header_buf = hfs_malloc(phys_blksz); + memcpy(new_header_buf, jnl->header_buf, jnl->header_buf_size); + memset(&new_header_buf[jnl->header_buf_size], 0x18, (phys_blksz - jnl->header_buf_size)); + hfs_free(jnl->header_buf); + jnl->header_buf = new_header_buf; + jnl->header_buf_size = phys_blksz; + + jnl->jhdr = (journal_header *)jnl->header_buf; + jnl->jhdr->jhdr_size = phys_blksz; + } + } else { + bad = 1; + } + + if (bad) { + panic("jnl: mod block start: bufsize %d not a multiple of block size %d\n", + psGenBuf->uDataSize, jnl->jhdr->jhdr_size); + + return -1; + } + } + + // make sure that this transaction isn't bigger than the whole journal + if ((tr->total_bytes+psGenBuf->uDataSize) >= (size_t)(jnl->jhdr->size - jnl->jhdr->jhdr_size)) { + panic("jnl: transaction too big (%d >= %lld bytes, bufsize %d, tr %p bp %p)\n", + tr->total_bytes, (tr->jnl->jhdr->size - jnl->jhdr->jhdr_size), psGenBuf->uDataSize, tr, psGenBuf->pvData); + + return -1; + } + + lf_hfs_generic_buf_set_cache_flag(psGenBuf, GEN_BUF_WRITE_LOCK); + + return 0; +} +// journal_modify_block_end +// This function does the following: +// 1) Makes sure the journal file is on and valid +// 2) Clean up (free previous transactions) +// 3) Check if this block already exists in transaction +// 4) Add block number to transcation. We dont add the block data, nor we release the buffer at this point. +// This will be done later on, at the transaction-end. +int journal_modify_block_end(journal *jnl, GenericLFBuf *psGenBuf, + void (*func)(GenericLFBuf *bp, void *arg), void *arg) { + int i = 1; + size_t tbuffer_offset=0; + block_list_header *blhdr, *prev=NULL; + transaction *tr = NULL; + + #if JOURNAL_DEBUG + printf("journal_modify_block_end: psGenBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uPhyCluster %llu uLockCnt %u\n", + psGenBuf, psGenBuf->psVnode, psGenBuf->uBlockN, psGenBuf->uDataSize, psGenBuf->uPhyCluster, psGenBuf->uLockCnt); + #endif + + CHECK_JOURNAL(jnl); + + free_old_stuff(jnl); + + if (func) { + psGenBuf->pfFunc = func; + psGenBuf->pvCallbackArgs = arg; + } + + if (jnl->flags & JOURNAL_INVALID) { + /* Still need to buf_brelse(). Callers assume we consume the bp. */ + lf_hfs_generic_buf_clear_cache_flag(psGenBuf, GEN_BUF_WRITE_LOCK); + lf_hfs_generic_buf_release(psGenBuf); + return EINVAL; + } + + tr = jnl->active_tr; + CHECK_TRANSACTION(tr); + + if (jnl->owner != pthread_self()) { + panic("jnl: modify_block_end: called w/out a transaction! jnl %p, owner %p, curact %p\n", + jnl, jnl->owner, pthread_self()); + } + + if ((psGenBuf->uCacheFlags & GEN_BUF_WRITE_LOCK) == 0) { + panic("jnl: modify_block_end: bp %p not locked! jnl @ %p\n", psGenBuf, jnl); + } + + // first check if this block is already part of this transaction + for (blhdr = tr->blhdr; blhdr; prev = blhdr, blhdr = (block_list_header *)((long)blhdr->binfo[0].bnum)) { + tbuffer_offset = jnl->jhdr->blhdr_size; + + for (i = 1; i < blhdr->num_blocks; i++) { + GenericLFBuf *bp = (void*)blhdr->binfo[i].u.bp; + if (psGenBuf == bp) { + // Block found in transaction + #if JOURNAL_DEBUG + printf("block_end, already in journal: psGenBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uPhyCluster %llu uLockCnt %u\n", + psGenBuf, psGenBuf->psVnode, psGenBuf->uBlockN, psGenBuf->uDataSize, psGenBuf->uPhyCluster, psGenBuf->uLockCnt); + #endif + break; + } + if (blhdr->binfo[i].bnum != (off_t)-1) { + off_t uSizeOfBuf = ((GenericLFBuf*)(blhdr->binfo[i].u.bp))->uDataSize; + tbuffer_offset += uSizeOfBuf; + } else { + tbuffer_offset += blhdr->binfo[i].u.bi.bsize; + } + } + + if (i < blhdr->num_blocks) { + break; + } + } + + if (blhdr == NULL + && prev + && (prev->num_blocks+1) <= prev->max_blocks + && (prev->bytes_used+psGenBuf->uDataSize) <= (uint32_t)tr->tbuffer_size) { + // Block not found, add to last list + blhdr = prev; + + } else if (blhdr == NULL) { + block_list_header *nblhdr; + if (prev == NULL) { + panic("jnl: modify block end: no way man, prev == NULL?!?, jnl %p, psGenBuf %p\n", jnl, psGenBuf); + } + // Add another tbuffer: + + // we got to the end of the list, didn't find the block and there's + // no room in the block_list_header pointed to by prev + + // we allocate another tbuffer and link it in at the end of the list + // through prev->binfo[0].bnum. that's a skanky way to do things but + // avoids having yet another linked list of small data structures to manage. + + nblhdr = hfs_malloc(tr->tbuffer_size); + + // journal replay code checksum check depends on this. + memset(nblhdr, 0, BLHDR_CHECKSUM_SIZE); + // Fill up the rest of the block with unimportant bytes + memset(nblhdr + BLHDR_CHECKSUM_SIZE, 0x5a, jnl->jhdr->blhdr_size - BLHDR_CHECKSUM_SIZE); + + // initialize the new guy + nblhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1; + nblhdr->num_blocks = 1; // accounts for this header block + nblhdr->bytes_used = (uint32_t)jnl->jhdr->blhdr_size; + nblhdr->flags = BLHDR_CHECK_CHECKSUMS; + + tr->num_blhdrs++; + tr->total_bytes += jnl->jhdr->blhdr_size; + + // then link him in at the end + prev->binfo[0].bnum = (off_t)((long)nblhdr); + + // and finally switch to using the new guy + blhdr = nblhdr; + i = 1; + } + + if ((i+1) > blhdr->max_blocks) { + panic("jnl: modify_block_end: i = %d, max_blocks %d\n", i, blhdr->max_blocks); + } + + // if this is true then this is a new block we haven't seen before + if (i >= blhdr->num_blocks) { + off_t bsize; + bsize = psGenBuf->uDataSize; + + // Add block to list + blhdr->binfo[i].bnum = (off_t)(psGenBuf->uBlockN); + blhdr->binfo[i].u.bp = (void*)psGenBuf; + + blhdr->bytes_used += bsize; + tr->total_bytes += bsize; + + blhdr->num_blocks++; + } + + // We can release the block here to allow other threads to perform operations on it until the next transaction-end. + // The buffer will not be removed from cache since it is write-locked. + lf_hfs_generic_buf_release(psGenBuf); + + return 0; +} + +// This function validates if a block is already registered to a transaction +/* + * Flush the contents of the journal to the disk. + * + * Input: + * wait_for_IO - + * If TRUE, wait to write in-memory journal to the disk + * consistently, and also wait to write all asynchronous + * metadata blocks to its corresponding locations + * consistently on the disk. This means that the journal + * is empty at this point and does not contain any + * transactions. This is overkill in normal scenarios + * but is useful whenever the metadata blocks are required + * to be consistent on-disk instead of just the journal + * being consistent; like before live verification + * and live volume resizing. + * + * If FALSE, only wait to write in-memory journal to the + * disk consistently. This means that the journal still + * contains uncommitted transactions and the file system + * metadata blocks in the journal transactions might be + * written asynchronously to the disk. But there is no + * guarantee that they are written to the disk before + * returning to the caller. Note that this option is + * sufficient for file system data integrity as it + * guarantees consistent journal content on the disk. + */ +int journal_flush(journal *jnl, journal_flush_options_t options) { + boolean_t drop_lock = FALSE; + errno_t error = 0; + uint32_t flush_count = 0; + + CHECK_JOURNAL(jnl); + + free_old_stuff(jnl); + + if (jnl->flags & JOURNAL_INVALID) { + return EINVAL; + } + + if (jnl->owner != pthread_self()) { + journal_lock(jnl); + drop_lock = TRUE; + } + + if (ISSET(options, JOURNAL_FLUSH_FULL)) + flush_count = jnl->flush_counter; + + // if we're not active, flush any buffered transactions + if (jnl->active_tr == NULL && jnl->cur_tr) { + transaction *tr = jnl->cur_tr; + + jnl->cur_tr = NULL; + + if (ISSET(options, JOURNAL_WAIT_FOR_IO)) { + wait_condition(jnl, &jnl->flushing, "journal_flush"); + wait_condition(jnl, &jnl->asyncIO, "journal_flush"); + } + + // As the journal flush changes the MetaData content (update Endianizm), we need to lock the system times. + int lockflags = hfs_systemfile_lock(jnl->fsmount->psHfsmount, SFL_CATALOG | SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + + /* + * "end_transction" will wait for any current async flush + * to complete, before flushing "cur_tr"... because we've + * specified the 'must_wait' arg as TRUE, it will then + * synchronously flush the "cur_tr" + */ + end_transaction(tr, 1, NULL, NULL, drop_lock); // force it to get flushed + + hfs_systemfile_unlock(jnl->fsmount->psHfsmount, lockflags); + + } else { + if (drop_lock == TRUE) { + journal_unlock(jnl); + } + + /* Because of pipelined journal, the journal transactions + * might be in process of being flushed on another thread. + * If there is nothing to flush currently, we should + * synchronize ourselves with the pipelined journal thread + * to ensure that all inflight transactions, if any, are + * flushed before we return success to caller. + */ + wait_condition(jnl, &jnl->flushing, "journal_flush"); + } + if (ISSET(options, JOURNAL_WAIT_FOR_IO)) { + wait_condition(jnl, &jnl->asyncIO, "journal_flush"); + } + + if (ISSET(options, JOURNAL_FLUSH_FULL)) { + + dk_synchronize_t sync_request = { + .options = 0, + }; + + // We need a full cache flush. If it has not been done, do it here. + if (flush_count == jnl->flush_counter) + error = ioctl(jnl->jdev->psFSRecord->iFD, DKIOCSYNCHRONIZE, (caddr_t)&sync_request); + + // If external journal partition is enabled, flush filesystem data partition. + if (jnl->jdev != jnl->fsdev) + error = ioctl(jnl->jdev->psFSRecord->iFD, DKIOCSYNCHRONIZE, (caddr_t)&sync_request); + + } + + return error; +} + + +// ************************** Local Functions *********************** +static int update_fs_block(journal *jnl, void *block_ptr, off_t fs_block, size_t bsize) { + + int iRet = 0; + GenericLFBuf *psGenBuf = NULL; + + // first read the block we want. + psGenBuf = lf_hfs_generic_buf_allocate(jnl->fsmount->psHfsmount->hfs_devvp, + fs_block, + (uint32_t)bsize, + GEN_BUF_PHY_BLOCK | GEN_BUF_NON_CACHED); + if (!psGenBuf) { + LFHFS_LOG(LEVEL_ERROR, "jnl: update_fs_block: error allocating fs block # %lld!\n", fs_block); + iRet = -1; + goto exit; + } + + iRet = lf_hfs_generic_buf_read(psGenBuf); + if (iRet) { + LFHFS_LOG(LEVEL_ERROR, "jnl: update_fs_block: error reading fs block # %lld!\n", fs_block); + goto exit; + } + + // copy the journal data over top of it + memcpy(psGenBuf->pvData, block_ptr, bsize); + + iRet = lf_hfs_generic_buf_write(psGenBuf); + if (iRet) { + LFHFS_LOG(LEVEL_ERROR, "jnl: update_fs_block: failed to write block %lld (ret %d)\n", fs_block, iRet); + goto exit; + } + +exit: + if (psGenBuf) { + lf_hfs_generic_buf_release(psGenBuf); + } + + return iRet; +} + + +static int grow_table(struct bucket **buf_ptr, int num_buckets, int new_size) { + struct bucket *newBuf; + int current_size = num_buckets, i; + + // return if newsize is less than the current size + if (new_size < num_buckets) { + return current_size; + } + + newBuf = hfs_malloc(new_size*sizeof(struct bucket)); + + // printf("jnl: lookup_bucket: expanded co_buf to %d elems\n", new_size); + + // copy existing elements + bcopy(*buf_ptr, newBuf, num_buckets*sizeof(struct bucket)); + + // initialize the new ones + for(i = num_buckets; i < new_size; i++) { + newBuf[i].block_num = (off_t)-1; + } + + // free the old container + hfs_free(*buf_ptr); + + // reset the buf_ptr + *buf_ptr = newBuf; + + return new_size; +} + + +static int insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr, int overwriting) { + + if (!overwriting) { + // grow the table if we're out of space - we may index the table + // with *num_full_ptr (lookup_bucket() can return a maximum value == + // *num_full_ptr), so we need to grow when we hit (*num_buckets_ptr - 1) + // to prevent out-of-bounds indexing + if (*num_full_ptr >= (*num_buckets_ptr - 1)) { + int new_size = *num_buckets_ptr * 2; + int grow_size = grow_table(buf_ptr, *num_buckets_ptr, new_size); + + if (grow_size < new_size) { + LFHFS_LOG(LEVEL_ERROR, "jnl: add_block: grow_table returned an error!\n"); + return -1; + } + + *num_buckets_ptr = grow_size; //update num_buckets to reflect the new size + } + + // if we're not inserting at the end, we need to bcopy + if (blk_index != *num_full_ptr) { + bcopy( (*buf_ptr)+(blk_index), (*buf_ptr)+(blk_index+1), (*num_full_ptr-blk_index)*sizeof(struct bucket) ); + } + + (*num_full_ptr)++; // increment only if we're not overwriting + } + + // sanity check the values we're about to add + if ((off_t)offset >= jnl->jhdr->size) { + offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size); + } + if (size <= 0) { + panic("jnl: insert_block: bad size in insert_block (%zd)\n", size); + } + + (*buf_ptr)[blk_index].block_num = num; + (*buf_ptr)[blk_index].block_size = (uint32_t)size; + (*buf_ptr)[blk_index].jnl_offset = (uint32_t)offset; + (*buf_ptr)[blk_index].cksum = cksum; + + return blk_index; +} + +static int do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, __unused size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr) { + + int num_to_remove, index, i, overwrite, err; + size_t jhdr_size = jnl->jhdr->jhdr_size, new_offset; + off_t overlap, block_start, block_end; + + block_start = block_num*jhdr_size; + block_end = block_start + size; + overwrite = (block_num == (*buf_ptr)[blk_index].block_num && size >= (*buf_ptr)[blk_index].block_size); + + // first, eliminate any overlap with the previous entry + if (blk_index != 0 && !overwrite) { + off_t prev_block_start = (*buf_ptr)[blk_index-1].block_num*jhdr_size; + off_t prev_block_end = prev_block_start + (*buf_ptr)[blk_index-1].block_size; + overlap = prev_block_end - block_start; + if (overlap > 0) { + if (overlap % jhdr_size != 0) { + panic("jnl: do_overlap: overlap with previous entry not a multiple of %zd\n", jhdr_size); + } + + // if the previous entry completely overlaps this one, we need to break it into two pieces. + if (prev_block_end > block_end) { + off_t new_num = block_end / jhdr_size; + size_t new_size = prev_block_end - block_end; + + new_offset = (*buf_ptr)[blk_index-1].jnl_offset + (block_end - prev_block_start); + + err = insert_block(jnl, buf_ptr, blk_index, new_num, new_size, new_offset, cksum, num_buckets_ptr, num_full_ptr, 0); + if (err < 0) { + panic("jnl: do_overlap: error inserting during pre-overlap\n"); + } + } + + // Regardless, we need to truncate the previous entry to the beginning of the overlap + (*buf_ptr)[blk_index-1].block_size = (uint32_t)(block_start - prev_block_start); + (*buf_ptr)[blk_index-1].cksum = 0; // have to blow it away because there's no way to check it + } + } + + // then, bail out fast if there's no overlap with the entries that follow + if (!overwrite && block_end <= (off_t)((*buf_ptr)[blk_index].block_num*jhdr_size)) { + return 0; // no overlap, no overwrite + } else if (overwrite && (blk_index + 1 >= *num_full_ptr || block_end <= (off_t)((*buf_ptr)[blk_index+1].block_num*jhdr_size))) { + + (*buf_ptr)[blk_index].cksum = cksum; // update this + return 1; // simple overwrite + } + + // Otherwise, find all cases of total and partial overlap. We use the special + // block_num of -2 to designate entries that are completely overlapped and must + // be eliminated. The block_num, size, and jnl_offset of partially overlapped + // entries must be adjusted to keep the array consistent. + index = blk_index; + num_to_remove = 0; + while (index < *num_full_ptr && block_end > (off_t)((*buf_ptr)[index].block_num*jhdr_size)) { + if (block_end >= (off_t)(((*buf_ptr)[index].block_num*jhdr_size + (*buf_ptr)[index].block_size))) { + (*buf_ptr)[index].block_num = -2; // mark this for deletion + num_to_remove++; + } else { + overlap = block_end - (*buf_ptr)[index].block_num*jhdr_size; + if (overlap > 0) { + if (overlap % jhdr_size != 0) { + panic("jnl: do_overlap: overlap of %lld is not multiple of %zd\n", overlap, jhdr_size); + } + + // if we partially overlap this entry, adjust its block number, jnl offset, and size + (*buf_ptr)[index].block_num += (overlap / jhdr_size); // make sure overlap is multiple of jhdr_size, or round up + (*buf_ptr)[index].cksum = 0; + + new_offset = (*buf_ptr)[index].jnl_offset + overlap; // check for wrap-around + if ((off_t)new_offset >= jnl->jhdr->size) { + new_offset = jhdr_size + (new_offset - jnl->jhdr->size); + } + (*buf_ptr)[index].jnl_offset = (uint32_t)new_offset; + + (*buf_ptr)[index].block_size -= overlap; // sanity check for negative value + if ((*buf_ptr)[index].block_size <= 0) { + panic("jnl: do_overlap: after overlap, new block size is invalid (%u)\n", (*buf_ptr)[index].block_size); + // return -1; // if above panic is removed, return -1 for error + } + } + + } + + index++; + } + + // bcopy over any completely overlapped entries, starting at the right (where the above loop broke out) + index--; // start with the last index used within the above loop + while (index >= blk_index) { + if ((*buf_ptr)[index].block_num == -2) { + if (index == *num_full_ptr-1) { + (*buf_ptr)[index].block_num = -1; // it's the last item in the table... just mark as free + } else { + bcopy( (*buf_ptr)+(index+1), (*buf_ptr)+(index), (*num_full_ptr - (index + 1)) * sizeof(struct bucket) ); + } + (*num_full_ptr)--; + } + index--; + } + + // eliminate any stale entries at the end of the table + for(i = *num_full_ptr; i < (*num_full_ptr + num_to_remove); i++) { + (*buf_ptr)[i].block_num = -1; + } + + return 0; // if we got this far, we need to insert the entry into the table (rather than overwrite) +} + + +static int lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full) { + int lo, hi, index, matches, i; + + if (num_full == 0) { + return 0; // table is empty, so insert at index=0 + } + + lo = 0; + hi = num_full - 1; + index = -1; + + // perform binary search for block_num + do { + int mid = (hi - lo)/2 + lo; + off_t this_num = (*buf_ptr)[mid].block_num; + + if (block_num == this_num) { + index = mid; + break; + } + + if (block_num < this_num) { + hi = mid; + continue; + } + + if (block_num > this_num) { + lo = mid + 1; + continue; + } + } while (lo < hi); + + // check if lo and hi converged on the match + if (block_num == (*buf_ptr)[hi].block_num) { + index = hi; + } + + // if no existing entry found, find index for new one + if (index == -1) { + index = (block_num < (*buf_ptr)[hi].block_num) ? hi : hi + 1; + } else { + // make sure that we return the right-most index in the case of multiple matches + matches = 0; + i = index + 1; + while (i < num_full && block_num == (*buf_ptr)[i].block_num) { + matches++; + i++; + } + + index += matches; + } + + return index; +} + +// PR-3105942: Coalesce writes to the same block in journal replay +// We coalesce writes by maintaining a dynamic sorted array of physical disk blocks +// to be replayed and the corresponding location in the journal which contains +// the most recent data for those blocks. The array is "played" once the all the +// blocks in the journal have been coalesced. The code for the case of conflicting/ +// overlapping writes to a single block is the most dense. Because coalescing can +// disrupt the existing time-ordering of blocks in the journal playback, care +// is taken to catch any overlaps and keep the array consistent. +static int add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr) { + int blk_index, overwriting; + + // on return from lookup_bucket(), blk_index is the index into the table where block_num should be + // inserted (or the index of the elem to overwrite). + blk_index = lookup_bucket( buf_ptr, block_num, *num_full_ptr); + + // check if the index is within bounds (if we're adding this block to the end of + // the table, blk_index will be equal to num_full) + if (blk_index < 0 || blk_index > *num_full_ptr) { + //printf("jnl: add_block: trouble adding block to co_buf\n"); + return -1; + } // else printf("jnl: add_block: adding block 0x%llx at i=%d\n", block_num, blk_index); + + // Determine whether we're overwriting an existing entry by checking for overlap + overwriting = do_overlap(jnl, buf_ptr, blk_index, block_num, size, offset, cksum, num_buckets_ptr, num_full_ptr); + if (overwriting < 0) { + return -1; // if we got an error, pass it along + } + + // returns the index, or -1 on error + blk_index = insert_block(jnl, buf_ptr, blk_index, block_num, size, offset, cksum, num_buckets_ptr, num_full_ptr, overwriting); + + return blk_index; +} + +static void swap_block_list_header(journal *jnl, block_list_header *blhdr) { + int i; + + blhdr->max_blocks = SWAP16(blhdr->max_blocks); + blhdr->num_blocks = SWAP16(blhdr->num_blocks); + blhdr->bytes_used = SWAP32(blhdr->bytes_used); + blhdr->checksum = SWAP32(blhdr->checksum); + blhdr->flags = SWAP32(blhdr->flags); + + if (blhdr->num_blocks >= ((jnl->jhdr->blhdr_size / sizeof(block_info)) - 1)) { + LFHFS_LOG(LEVEL_ERROR, "jnl: blhdr num blocks looks suspicious (%d / blhdr size %d). not swapping.\n", blhdr->num_blocks, jnl->jhdr->blhdr_size); + return; + } + + for(i = 0; i < blhdr->num_blocks; i++) { + blhdr->binfo[i].bnum = SWAP64(blhdr->binfo[i].bnum); + blhdr->binfo[i].u.bi.bsize = SWAP32(blhdr->binfo[i].u.bi.bsize); + blhdr->binfo[i].u.bi.b.cksum = SWAP32(blhdr->binfo[i].u.bi.b.cksum); + } +} + +static int replay_journal(journal *jnl) { + int i, bad_blocks=0; + unsigned int orig_checksum, checksum, check_block_checksums = 0; + size_t ret; + size_t max_bsize = 0; /* protected by block_ptr */ + block_list_header *blhdr; + off_t offset, txn_start_offset=0, blhdr_offset, orig_jnl_start; + char *buff, *block_ptr=NULL; + struct bucket *co_buf; + int num_buckets = STARTING_BUCKETS, num_full, check_past_jnl_end = 1, in_uncharted_territory = 0; + uint32_t last_sequence_num = 0; + int replay_retry_count = 0; + + LFHFS_LOG(LEVEL_DEFAULT, "replay_journal: start.\n"); + + + // wrap the start ptr if it points to the very end of the journal + if (jnl->jhdr->start == jnl->jhdr->size) { + jnl->jhdr->start = jnl->jhdr->jhdr_size; + } + if (jnl->jhdr->end == jnl->jhdr->size) { + jnl->jhdr->end = jnl->jhdr->jhdr_size; + } + + if (jnl->jhdr->start == jnl->jhdr->end) { + LFHFS_LOG(LEVEL_DEFAULT, "replay_journal: journal empty.\n"); + goto success; + } + + orig_jnl_start = jnl->jhdr->start; + + // allocate memory for the header_block. we'll read each blhdr into this + buff = hfs_malloc(jnl->jhdr->blhdr_size); + + // allocate memory for the coalesce buffer + co_buf = hfs_malloc(num_buckets*sizeof(struct bucket)); + +restart_replay: + + // initialize entries + for(i = 0; i < num_buckets; i++) { + co_buf[i].block_num = -1; + } + num_full = 0; // empty at first + + + while (check_past_jnl_end || jnl->jhdr->start != jnl->jhdr->end) { + offset = blhdr_offset = jnl->jhdr->start; + ret = read_journal_data(jnl, &offset, buff, jnl->jhdr->blhdr_size); + if (ret != (size_t)jnl->jhdr->blhdr_size) { + LFHFS_LOG(LEVEL_ERROR, "jnl: replay_journal: Could not read block list header block @ 0x%llx!\n", offset); + goto bad_txn_handling; + } + + blhdr = (block_list_header *)buff; + + orig_checksum = blhdr->checksum; + blhdr->checksum = 0; + if (jnl->flags & JOURNAL_NEED_SWAP) { + // calculate the checksum based on the unswapped data + // because it is done byte-at-a-time. + orig_checksum = (unsigned int)SWAP32(orig_checksum); + checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE); + swap_block_list_header(jnl, blhdr); + } else { + checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE); + } + + + // + // XXXdbg - if these checks fail, we should replay as much + // we can in the hopes that it will still leave the + // drive in a better state than if we didn't replay + // anything + // + if (checksum != orig_checksum) { + if (check_past_jnl_end && in_uncharted_territory) { + + if (blhdr_offset != jnl->jhdr->end) { + LFHFS_LOG(LEVEL_ERROR, "jnl: Extra txn replay stopped @ %lld / 0x%llx\n", blhdr_offset, blhdr_offset); + } + + check_past_jnl_end = 0; + jnl->jhdr->end = blhdr_offset; + continue; + } + + LFHFS_LOG(LEVEL_ERROR, "jnl: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n", + blhdr_offset, orig_checksum, checksum); + + if (blhdr_offset == orig_jnl_start) { + // if there's nothing in the journal at all, just bail out altogether. + goto bad_replay; + } + + goto bad_txn_handling; + } + + if ( (last_sequence_num != 0) + && (blhdr->binfo[0].u.bi.b.sequence_num != 0) + && (blhdr->binfo[0].u.bi.b.sequence_num != last_sequence_num) + && (blhdr->binfo[0].u.bi.b.sequence_num != last_sequence_num+1)) { + + txn_start_offset = jnl->jhdr->end = blhdr_offset; + + if (check_past_jnl_end) { + check_past_jnl_end = 0; + LFHFS_LOG(LEVEL_ERROR, "jnl: 2: extra replay stopped @ %lld / 0x%llx (seq %d < %d)\n", + blhdr_offset, blhdr_offset, blhdr->binfo[0].u.bi.b.sequence_num, last_sequence_num); + continue; + } + + LFHFS_LOG(LEVEL_ERROR, "jnl: txn sequence numbers out of order in txn @ %lld / %llx! (%d < %d)\n", + blhdr_offset, blhdr_offset, blhdr->binfo[0].u.bi.b.sequence_num, last_sequence_num); + goto bad_txn_handling; + } + last_sequence_num = blhdr->binfo[0].u.bi.b.sequence_num; + + if (blhdr_offset >= jnl->jhdr->end && jnl->jhdr->start <= jnl->jhdr->end) { + if (last_sequence_num == 0) { + check_past_jnl_end = 0; + LFHFS_LOG(LEVEL_ERROR, "jnl: pre-sequence-num-enabled txn's - can not go further than end (%lld %lld).\n", + jnl->jhdr->start, jnl->jhdr->end); + if (jnl->jhdr->start != jnl->jhdr->end) { + jnl->jhdr->start = jnl->jhdr->end; + } + continue; + } + LFHFS_LOG(LEVEL_ERROR, "jnl: examining extra transactions starting @ %lld / 0x%llx\n", blhdr_offset, blhdr_offset); + } + + if ( blhdr->max_blocks <= 0 || blhdr->max_blocks > (jnl->jhdr->size/jnl->jhdr->jhdr_size) + || blhdr->num_blocks <= 0 || blhdr->num_blocks > blhdr->max_blocks) { + LFHFS_LOG(LEVEL_ERROR, "jnl: replay_journal: bad looking journal entry: max: %d num: %d\n", + blhdr->max_blocks, blhdr->num_blocks); + goto bad_txn_handling; + } + + max_bsize = 0; + for (i = 1; i < blhdr->num_blocks; i++) { + if (blhdr->binfo[i].bnum < 0 && blhdr->binfo[i].bnum != (off_t)-1) { + LFHFS_LOG(LEVEL_ERROR, "jnl: replay_journal: bogus block number 0x%llx\n", blhdr->binfo[i].bnum); + goto bad_txn_handling; + } + + if ((size_t)blhdr->binfo[i].u.bi.bsize > max_bsize) { + max_bsize = blhdr->binfo[i].u.bi.bsize; + } + } + + if (blhdr->flags & BLHDR_CHECK_CHECKSUMS) { + check_block_checksums = 1; + block_ptr = hfs_malloc(max_bsize); + } else { + block_ptr = NULL; + } + + if (blhdr->flags & BLHDR_FIRST_HEADER) { + txn_start_offset = blhdr_offset; + } + + //printf("jnl: replay_journal: adding %d blocks in journal entry @ 0x%llx to co_buf\n", + // blhdr->num_blocks-1, jnl->jhdr->start); + bad_blocks = 0; + for (i = 1; i < blhdr->num_blocks; i++) { + int size, ret_val; + off_t number; + + size = blhdr->binfo[i].u.bi.bsize; + number = blhdr->binfo[i].bnum; + + // don't add "killed" blocks + if (number == (off_t)-1) { + //printf("jnl: replay_journal: skipping killed fs block (index %d)\n", i); + } else { + + if (check_block_checksums) { + int32_t disk_cksum; + off_t block_offset; + + block_offset = offset; + + // read the block so we can check the checksum + ret = read_journal_data(jnl, &block_offset, block_ptr, size); + if (ret != (size_t)size) { + LFHFS_LOG(LEVEL_ERROR, "jnl: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", offset); + goto bad_txn_handling; + } + + disk_cksum = calc_checksum(block_ptr, size); + + // there is no need to swap the checksum from disk because + // it got swapped when the blhdr was read in. + if (blhdr->binfo[i].u.bi.b.cksum != 0 && disk_cksum != blhdr->binfo[i].u.bi.b.cksum) { + LFHFS_LOG(LEVEL_ERROR, "jnl: txn starting at %lld (%lld) @ index %3d bnum %lld (%d) with disk cksum != blhdr cksum (0x%.8x 0x%.8x)\n", + txn_start_offset, blhdr_offset, i, number, size, disk_cksum, blhdr->binfo[i].u.bi.b.cksum); + LFHFS_LOG(LEVEL_ERROR, "jnl: 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n", + *(int *)&block_ptr[0*sizeof(int)], *(int *)&block_ptr[1*sizeof(int)], *(int *)&block_ptr[2*sizeof(int)], *(int *)&block_ptr[3*sizeof(int)], + *(int *)&block_ptr[4*sizeof(int)], *(int *)&block_ptr[5*sizeof(int)], *(int *)&block_ptr[6*sizeof(int)], *(int *)&block_ptr[7*sizeof(int)]); + + goto bad_txn_handling; + } + } + + + // add this bucket to co_buf, coalescing where possible + // printf("jnl: replay_journal: adding block 0x%llx\n", number); + ret_val = add_block(jnl, &co_buf, number, size, (size_t) offset, blhdr->binfo[i].u.bi.b.cksum, &num_buckets, &num_full); + + if (ret_val == -1) { + LFHFS_LOG(LEVEL_ERROR, "jnl: replay_journal: trouble adding block to co_buf\n"); + goto bad_replay; + } // else printf("jnl: replay_journal: added block 0x%llx at i=%d\n", number); + } + + // increment offset + offset += size; + + // check if the last block added puts us off the end of the jnl. + // if so, we need to wrap to the beginning and take any remainder + // into account + // + if (offset >= jnl->jhdr->size) { + offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size); + } + } + + if (block_ptr) { + hfs_free(block_ptr); + block_ptr = NULL; + } + + if (bad_blocks) { + bad_txn_handling: + /* Journal replay got error before it found any valid + * transations, abort replay */ + if (txn_start_offset == 0) { + LFHFS_LOG(LEVEL_ERROR, "jnl: no known good txn start offset! aborting journal replay.\n"); + goto bad_replay; + } + + /* Repeated error during journal replay, abort replay */ + if (replay_retry_count == 3) { + LFHFS_LOG(LEVEL_ERROR, "jnl: repeated errors replaying journal! aborting journal replay.\n"); + goto bad_replay; + } + replay_retry_count++; + + /* There was an error replaying the journal (possibly + * EIO/ENXIO from the device). So retry replaying all + * the good transactions that we found before getting + * the error. + */ + jnl->jhdr->start = orig_jnl_start; + jnl->jhdr->end = txn_start_offset; + check_past_jnl_end = 0; + last_sequence_num = 0; + LFHFS_LOG(LEVEL_ERROR, "jnl: restarting journal replay (%lld - %lld)!\n", jnl->jhdr->start, jnl->jhdr->end); + goto restart_replay; + } + + jnl->jhdr->start += blhdr->bytes_used; + if (jnl->jhdr->start >= jnl->jhdr->size) { + // wrap around and skip the journal header block + jnl->jhdr->start = (jnl->jhdr->start % jnl->jhdr->size) + jnl->jhdr->jhdr_size; + } + + if (jnl->jhdr->start == jnl->jhdr->end) { + in_uncharted_territory = 1; + } + } + + if (jnl->jhdr->start != jnl->jhdr->end) { + LFHFS_LOG(LEVEL_ERROR, "jnl: start %lld != end %lld. resetting end.\n", jnl->jhdr->start, jnl->jhdr->end); + jnl->jhdr->end = jnl->jhdr->start; + } + + //printf("jnl: replay_journal: replaying %d blocks\n", num_full); + + /* + * make sure it's at least one page in size, so + * start max_bsize at PAGE_SIZE + */ + for (i = 0, max_bsize = PAGE_SIZE; i < num_full; i++) { + + if (co_buf[i].block_num == (off_t)-1) + continue; + + if (co_buf[i].block_size > max_bsize) + max_bsize = co_buf[i].block_size; + } + /* + * round max_bsize up to the nearest PAGE_SIZE multiple + */ + if (max_bsize & (PAGE_SIZE - 1)) { + max_bsize = (max_bsize + PAGE_SIZE) & ~(PAGE_SIZE - 1); + } + + block_ptr = hfs_malloc(max_bsize); + + // Replay the coalesced entries in the co-buf + for(i = 0; i < num_full; i++) { + size_t size = co_buf[i].block_size; + off_t jnl_offset = (off_t) co_buf[i].jnl_offset; + off_t number = co_buf[i].block_num; + + + // printf("replaying co_buf[%d]: block 0x%llx, size 0x%x, jnl_offset 0x%llx\n", i, co_buf[i].block_num, + // co_buf[i].block_size, co_buf[i].jnl_offset); + + if (number == (off_t)-1) { + // printf("jnl: replay_journal: skipping killed fs block\n"); + } else { + + // do journal read, and set the phys. block + ret = read_journal_data(jnl, &jnl_offset, block_ptr, size); + if (ret != size) { + LFHFS_LOG(LEVEL_ERROR, "jnl: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", jnl_offset); + goto bad_replay; + } + + if (update_fs_block(jnl, block_ptr, number, size) != 0) { + goto bad_replay; + } + } + } + + + // done replaying; update jnl header + if (write_journal_header(jnl, 1, jnl->jhdr->sequence_num) != 0) { + goto bad_replay; + } + + // free block_ptr + if (block_ptr) { + hfs_free(block_ptr); + block_ptr = NULL; + } + + // free the coalesce buffer + hfs_free(co_buf); + co_buf = NULL; + + hfs_free(buff); + +success: + LFHFS_LOG(LEVEL_DEFAULT, "replay_journal: success.\n"); + return 0; + +bad_replay: + hfs_free(block_ptr); + hfs_free(co_buf); + hfs_free(buff); + + LFHFS_LOG(LEVEL_ERROR, "replay_journal: error.\n"); + return -1; +} + +// buffer_written: +// This function get executed after a buffer has been written to its +// final destination. +// This function lets us know when a buffer has been +// flushed to disk. Originally (kext), it was called from deep +// within the driver stack and thus is quite limited in what it could do. +// Notably, it could not initiate any new i/o's or allocate/free memory. +static void buffer_written(transaction *tr, GenericLFBuf *bp) { + + journal *jnl; + transaction *ctr, *prev=NULL, *next; + size_t i; + size_t bufsize, amt_flushed, total_bytes; + + + // snarf out the bits we want + bufsize = bp->uDataSize; + + // then we've already seen it + if (tr == NULL) { + return; + } + + CHECK_TRANSACTION(tr); + + jnl = tr->jnl; + + CHECK_JOURNAL(jnl); + + amt_flushed = tr->num_killed; + total_bytes = tr->total_bytes; + + // update the number of blocks that have been flushed. + // this buf may represent more than one block so take + // that into account. + amt_flushed += tr->num_flushed; + tr->num_flushed += bufsize; + + // if this transaction isn't done yet, just return as + // there is nothing to do. + // + // NOTE: we are careful to not reference anything through + // the tr pointer after doing the OSAddAtomic(). if + // this if statement fails then we are the last one + // and then it's ok to dereference "tr". + // + if ((amt_flushed + bufsize) < total_bytes) { + return; + } + + // this will single thread checking the transaction + lock_oldstart(jnl); + + if (tr->total_bytes == (int)0xfbadc0de) { + // then someone beat us to it... + unlock_oldstart(jnl); + return; + } + + // mark this so that we're the owner of dealing with the + // cleanup for this transaction + tr->total_bytes = 0xfbadc0de; + + if (jnl->flags & JOURNAL_INVALID) + goto transaction_done; + + //printf("jnl: tr 0x%x (0x%llx 0x%llx) in jnl 0x%x completed.\n", + // tr, tr->journal_start, tr->journal_end, jnl); + + // find this entry in the old_start[] index and mark it completed + for(i = 0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) { + + if ((off_t)(jnl->old_start[i] & ~(0x8000000000000000ULL)) == tr->journal_start) { + jnl->old_start[i] &= ~(0x8000000000000000ULL); + break; + } + } + + if (i >= sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) { + panic("jnl: buffer_flushed: did not find tr w/start @ %lld (tr %p, jnl %p)\n", + tr->journal_start, tr, jnl); + } + + + // if we are here then we need to update the journal header + // to reflect that this transaction is complete + if (tr->journal_start == jnl->active_start) { + jnl->active_start = tr->journal_end; + tr->journal_start = tr->journal_end = (off_t)0; + } + + // go through the completed_trs list and try to coalesce + // entries, restarting back at the beginning if we have to. + for (ctr = jnl->completed_trs; ctr; prev=ctr, ctr=next) { + if (ctr->journal_start == jnl->active_start) { + jnl->active_start = ctr->journal_end; + if (prev) { + prev->next = ctr->next; + } + if (ctr == jnl->completed_trs) { + jnl->completed_trs = ctr->next; + } + + next = jnl->completed_trs; // this starts us over again + ctr->next = jnl->tr_freeme; + jnl->tr_freeme = ctr; + ctr = NULL; + + } else if (tr->journal_end == ctr->journal_start) { + ctr->journal_start = tr->journal_start; + next = jnl->completed_trs; // this starts us over again + ctr = NULL; + tr->journal_start = tr->journal_end = (off_t)0; + + } else if (tr->journal_start == ctr->journal_end) { + ctr->journal_end = tr->journal_end; + next = ctr->next; + tr->journal_start = tr->journal_end = (off_t)0; + } else if (ctr->next && ctr->journal_end == ctr->next->journal_start) { + // coalesce the next entry with this one and link the next + // entry in at the head of the tr_freeme list + next = ctr->next; // temporarily use the "next" variable + ctr->journal_end = next->journal_end; + ctr->next = next->next; + next->next = jnl->tr_freeme; // link in the next guy at the head of the tr_freeme list + jnl->tr_freeme = next; + + next = jnl->completed_trs; // this starts us over again + ctr = NULL; + + } else { + next = ctr->next; + } + } + + // if this is true then we didn't merge with anyone + // so link ourselves in at the head of the completed + // transaction list. + if (tr->journal_start != 0) { + // put this entry into the correct sorted place + // in the list instead of just at the head. + + prev = NULL; + for (ctr = jnl->completed_trs; ctr && tr->journal_start > ctr->journal_start; prev=ctr, ctr=ctr->next) { + // just keep looping + } + + if (ctr == NULL && prev == NULL) { + jnl->completed_trs = tr; + tr->next = NULL; + + } else if (ctr == jnl->completed_trs) { + tr->next = jnl->completed_trs; + jnl->completed_trs = tr; + + } else { + tr->next = prev->next; + prev->next = tr; + } + + } else { + // if we're here this tr got merged with someone else so + // put it on the list to be free'd + tr->next = jnl->tr_freeme; + jnl->tr_freeme = tr; + } +transaction_done: + unlock_oldstart(jnl); + + unlock_condition(jnl, &jnl->asyncIO); +} + +static size_t write_journal_data(journal *jnl, off_t *offset, void *data, size_t len) { + return do_journal_io(jnl, offset, data, len, JNL_WRITE); +} + +static size_t read_journal_data(journal *jnl, off_t *offset, void *data, size_t len) { + return do_journal_io(jnl, offset, data, len, JNL_READ); +} + + +// This function sets the size of the tbuffer and the +// size of the blhdr. It assumes that jnl->jhdr->size +// and jnl->jhdr->jhdr_size are already valid. +static void size_up_tbuffer(journal *jnl, uint32_t tbuffer_size, uint32_t phys_blksz) { + // + // one-time initialization based on how much memory + // there is in the machine. + // + if (def_tbuffer_size == 0) { + uint64_t memsize = 0; + size_t l = sizeof(memsize); + sysctlbyname("hw.memsize", &memsize, &l, NULL, 0); + + if (memsize < (256*1024*1024)) { + def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE; + } else if (memsize < (512*1024*1024)) { + def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 2; + } else if (memsize < (1024*1024*1024)) { + def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 3; + } else { + def_tbuffer_size = (uint32_t)(DEFAULT_TRANSACTION_BUFFER_SIZE * (memsize / (256*1024*1024))); + } + } + + // For analyzer + if (!(jnl->jhdr->jhdr_size > 0)) { + panic("jnl->jhdr->jhdr_size is %d", jnl->jhdr->jhdr_size); + } + + // size up the transaction buffer... can't be larger than the number + // of blocks that can fit in a block_list_header block. + if (tbuffer_size == 0) { + jnl->tbuffer_size = def_tbuffer_size; + } else { + // make sure that the specified tbuffer_size isn't too small + if (tbuffer_size < jnl->jhdr->blhdr_size * 2) { + tbuffer_size = jnl->jhdr->blhdr_size * 2; + } + // and make sure it's an even multiple of the block size + if ((tbuffer_size % jnl->jhdr->jhdr_size) != 0) { + tbuffer_size -= (tbuffer_size % jnl->jhdr->jhdr_size); + } + + jnl->tbuffer_size = tbuffer_size; + } + + if (jnl->tbuffer_size > (jnl->jhdr->size / 2)) { + jnl->tbuffer_size = (uint32_t)(jnl->jhdr->size / 2); + } + + if (jnl->tbuffer_size > MAX_TRANSACTION_BUFFER_SIZE) { + jnl->tbuffer_size = MAX_TRANSACTION_BUFFER_SIZE; + } + + jnl->jhdr->blhdr_size = (jnl->tbuffer_size / jnl->jhdr->jhdr_size) * sizeof(block_info); + if (jnl->jhdr->blhdr_size < phys_blksz) { + jnl->jhdr->blhdr_size = phys_blksz; + } else if ((jnl->jhdr->blhdr_size % phys_blksz) != 0) { + // have to round up so we're an even multiple of the physical block size + jnl->jhdr->blhdr_size = (jnl->jhdr->blhdr_size + (phys_blksz - 1)) & ~(phys_blksz - 1); + } +} + + +static int write_journal_header(journal *jnl, int updating_start, uint32_t sequence_num) { + static int num_err_prints = 0; + int ret=0; + off_t jhdr_offset = 0; + + // Flush the track cache if we're not doing force-unit-access + // writes. + if (!updating_start && (jnl->flags & JOURNAL_DO_FUA_WRITES) == 0) { + + dk_synchronize_t sync_request = { + .options = DK_SYNCHRONIZE_OPTION_BARRIER, + }; + + /* + * If device doesn't support barrier-only flush, or + * the journal is on a different device, use full flush. + */ + if (!(jnl->flags & JOURNAL_FEATURE_BARRIER) || (jnl->jdev != jnl->fsdev)) { + sync_request.options = 0; + jnl->flush_counter++; + } + + ret = ioctl(jnl->jdev->psFSRecord->iFD, DKIOCSYNCHRONIZE, (caddr_t)&sync_request); + } + if (ret != 0) { + // + // Only print this error if it's a different error than the + // previous one, or if it's the first time for this device + // or if the total number of printfs is less than 25. We + // allow for up to 25 printfs to insure that some make it + // into the on-disk syslog. Otherwise if we only printed + // one, it's possible it would never make it to the syslog + // for the root volume and that makes debugging hard. + // + if ( ret != jnl->last_flush_err + || (jnl->flags & JOURNAL_FLUSHCACHE_ERR) == 0 + || num_err_prints++ < 25) { + + LFHFS_LOG(LEVEL_ERROR, "jnl: flushing fs disk buffer returned 0x%x\n", ret); + + jnl->flags |= JOURNAL_FLUSHCACHE_ERR; + jnl->last_flush_err = ret; + } + } + + jnl->jhdr->sequence_num = sequence_num; + jnl->jhdr->checksum = 0; + jnl->jhdr->checksum = calc_checksum((char *)jnl->jhdr, JOURNAL_HEADER_CKSUM_SIZE); + + if (do_journal_io(jnl, &jhdr_offset, jnl->header_buf, jnl->jhdr->jhdr_size, JNL_WRITE|JNL_HEADER) != (size_t)jnl->jhdr->jhdr_size) { + LFHFS_LOG(LEVEL_ERROR, "jnl: write_journal_header: error writing the journal header!\n"); + jnl->flags |= JOURNAL_INVALID; + return -1; + } + + // If we're not doing force-unit-access writes, then we + // have to flush after writing the journal header so that + // a future transaction doesn't sneak out to disk before + // the header does and thus overwrite data that the old + // journal header refers to. Saw this exact case happen + // on an IDE bus analyzer with Larry Barras so while it + // may seem obscure, it's not. + // + if (updating_start && (jnl->flags & JOURNAL_DO_FUA_WRITES) == 0) { + + dk_synchronize_t sync_request = { + .options = DK_SYNCHRONIZE_OPTION_BARRIER, + }; + + /* + * If device doesn't support barrier-only flush, or + * the journal is on a different device, use full flush. + */ + if (!(jnl->flags & JOURNAL_FEATURE_BARRIER) || (jnl->jdev != jnl->fsdev)) { + sync_request.options = 0; + jnl->flush_counter++; + } + + ioctl(jnl->jdev->psFSRecord->iFD, DKIOCSYNCHRONIZE, (caddr_t)&sync_request); + } + return 0; +} + +static int journal_binfo_cmp(const void *a, const void *b) { + + const block_info *bi_a = (const struct block_info *)a; + const block_info *bi_b = (const struct block_info *)b; + daddr64_t res; + + if (bi_a->bnum == (off_t)-1) { + return 1; + } + if (bi_b->bnum == (off_t)-1) { + return -1; + } + + // don't have to worry about negative block + // numbers so this is ok to do. + GenericLFBuf *psGenBufA, *psGenBufB; + psGenBufA = (void*)bi_a->u.bp; + psGenBufB = (void*)bi_b->u.bp; + res = psGenBufA->uBlockN - psGenBufB->uBlockN; + + return (int)res; +} + +// finish_end_transaction: + +static int finish_end_transaction(transaction *tr, errno_t (*callback)(void*), void *callback_arg) { + int i; + size_t amt; + size_t ret = 0; + off_t end; + journal *jnl = tr->jnl; + GenericLFBuf *bp = NULL, **bparray = NULL; + block_list_header *blhdr=NULL, *next=NULL; + size_t tbuffer_offset; + int bufs_written = 0; + int ret_val = 0; + + end = jnl->jhdr->end; + + for (blhdr = tr->blhdr; blhdr; blhdr = (block_list_header *)((long)blhdr->binfo[0].bnum)) { + + amt = blhdr->bytes_used; + + blhdr->binfo[0].u.bi.b.sequence_num = tr->sequence_num; + + blhdr->checksum = 0; + blhdr->checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE); + + bparray = hfs_malloc(blhdr->num_blocks * sizeof(buf_t)); + tbuffer_offset = jnl->jhdr->blhdr_size; + + // for each block in the block-header, + for (i = 1; i < blhdr->num_blocks; i++) { + size_t bsize; + + /* + * finish preparing the shadow buf_t before + * calculating the individual block checksums + */ + if (blhdr->binfo[i].bnum != (off_t)-1) { + daddr64_t blkno; + + bp = (void*)blhdr->binfo[i].u.bp; + blkno = bp->uPhyCluster; + // update this so we write out the correct physical block number! + blhdr->binfo[i].bnum = (off_t)(blkno); + + bparray[i] = bp; + bsize = bp->uDataSize; + blhdr->binfo[i].u.bi.bsize = (uint32_t)bsize; + blhdr->binfo[i].u.bi.b.cksum = calc_checksum(&((char *)blhdr)[tbuffer_offset], (uint32_t)bsize); + } else { + bparray[i] = NULL; + bsize = blhdr->binfo[i].u.bi.bsize; + blhdr->binfo[i].u.bi.b.cksum = 0; + } + tbuffer_offset += bsize; + } + + /* + * if we fired off the journal_write_header asynchronously in + * 'end_transaction', we need to wait for its completion + * before writing the actual journal data + */ + wait_condition(jnl, &jnl->writing_header, "finish_end_transaction"); + + if (jnl->write_header_failed == FALSE) + ret = write_journal_data(jnl, &end, blhdr, amt); + else + ret_val = -1; + + #if HFS_CRASH_TEST + CRASH_ABORT(CRASH_ABORT_JOURNAL_AFTER_JOURNAL_DATA, jnl->fsmount->psHfsmount, NULL); + #endif + + /* + * put the bp pointers back so that we can + * make the final pass on them + */ + for (i = 1; i < blhdr->num_blocks; i++) + blhdr->binfo[i].u.bp = (void*)bparray[i]; + + hfs_free(bparray); + + if (ret_val == -1) + goto bad_journal; + + if (ret != amt) { + LFHFS_LOG(LEVEL_ERROR, "jnl: end_transaction: only wrote %zu of %zu bytes to the journal!\n", + ret, amt); + + ret_val = -1; + goto bad_journal; + } + } + jnl->jhdr->end = end; // update where the journal now ends + tr->journal_end = end; // the transaction ends here too + + if (tr->journal_start == 0 || tr->journal_end == 0) { + panic("jnl: end_transaction: bad tr journal start/end: 0x%llx 0x%llx\n", + tr->journal_start, tr->journal_end); + } + + if (write_journal_header(jnl, 0, jnl->saved_sequence_num) != 0) { + ret_val = -1; + goto bad_journal; + } + + #if HFS_CRASH_TEST + CRASH_ABORT(CRASH_ABORT_JOURNAL_AFTER_JOURNAL_HEADER, jnl->fsmount->psHfsmount, NULL); + #endif + + /* + * If the caller supplied a callback, call it now that the blocks have been + * written to the journal. This is used by journal_relocate so, for example, + * the file system can change its pointer to the new journal. + */ + if (callback != NULL && callback(callback_arg) != 0) { + ret_val = -1; + goto bad_journal; + } + + // the buffer_flushed_callback will only be called for the + // real blocks that get flushed so we have to account for + // the block_list_headers here. + // + tr->num_flushed = tr->num_blhdrs * jnl->jhdr->blhdr_size; + + lock_condition(jnl, &jnl->asyncIO, "finish_end_transaction"); + + // + // setup for looping through all the blhdr's. + // + for (blhdr = tr->blhdr; blhdr; blhdr = next) { + uint16_t num_blocks; + + /* + * grab this info ahead of issuing the buf_bawrites... + * once the last one goes out, its possible for blhdr + * to be freed (especially if we get preempted) before + * we do the last check of num_blocks or + * grab the next blhdr pointer... + */ + next = (block_list_header *)((long)blhdr->binfo[0].bnum); + num_blocks = blhdr->num_blocks; + + /* + * we can re-order the buf ptrs because everything is written out already + */ + qsort(&blhdr->binfo[1], num_blocks-1, sizeof(block_info), journal_binfo_cmp); + + /* + * need to make sure that the loop issuing the buf_bawrite's + * does not touch blhdr once the last buf_bawrite has been + * issued... at that point, we no longer have a legitmate + * reference on the associated storage since it will be + * released upon the completion of that last buf_bawrite + */ + for (i = num_blocks-1; i >= 1; i--) { + if (blhdr->binfo[i].bnum != (off_t)-1) + break; + num_blocks--; + } + for (i = 1; i < num_blocks; i++) { + + if ((bp = (void*)blhdr->binfo[i].u.bp)) { + + errno_t ret_val = 0; + + #if JOURNAL_DEBUG + printf("journal write physical: bp %p, psVnode %p, uBlockN %llu, uPhyCluster %llu uLockCnt %u\n", + bp, bp->psVnode, bp->uBlockN, bp->uPhyCluster, bp->uLockCnt); + #endif + + lf_hfs_generic_buf_clear_cache_flag(bp, GEN_BUF_WRITE_LOCK); + ret_val = lf_hfs_generic_buf_write(bp); + + #if HFS_CRASH_TEST + CRASH_ABORT(CRASH_ABORT_JOURNAL_IN_BLOCK_DATA, jnl->fsmount->psHfsmount, NULL); + #endif + + if (ret_val) { + LFHFS_LOG(LEVEL_ERROR, "jnl: raw_readwrite_write_mount inside finish_end_transaction returned %d.\n", ret_val); + } + + buffer_written(tr, bp); + + lf_hfs_generic_buf_unlock(bp); + lf_hfs_generic_buf_release(bp); + + bufs_written++; + } + } + } + #if HFS_CRASH_TEST + CRASH_ABORT(CRASH_ABORT_JOURNAL_AFTER_BLOCK_DATA, jnl->fsmount->psHfsmount, NULL); + #endif + if (bufs_written == 0) { + /* + * since we didn't issue any buf_bawrite's, there is no + * async trigger to cause the memory associated with this + * transaction to be freed... so, move it to the garbage + * list now + */ + lock_oldstart(jnl); + + tr->next = jnl->tr_freeme; + jnl->tr_freeme = tr; + + unlock_oldstart(jnl); + + unlock_condition(jnl, &jnl->asyncIO); + } + + //printf("jnl: end_tr: tr @ 0x%x, jnl-blocks: 0x%llx - 0x%llx. exit!\n", + // tr, tr->journal_start, tr->journal_end); + +bad_journal: + if (ret_val == -1) { + abort_transaction(jnl, tr); // cleans up list of extents to be trimmed + + /* + * 'flush_aborted' is protected by the flushing condition... we need to + * set it before dropping the condition so that it will be + * noticed in 'end_transaction'... we add this additional + * aborted condition so that we can drop the 'flushing' condition + * before grabbing the journal lock... this avoids a deadlock + * in 'end_transaction' which is holding the journal lock while + * waiting for the 'flushing' condition to clear... + * everyone else will notice the JOURNAL_INVALID flag + */ + jnl->flush_aborted = TRUE; + + unlock_condition(jnl, &jnl->flushing); + journal_lock(jnl); + + jnl->flags |= JOURNAL_INVALID; + jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] &= ~0x8000000000000000LL; + + journal_unlock(jnl); + } else + unlock_condition(jnl, &jnl->flushing); + + return (ret_val); +} +static off_t free_space(journal *jnl) { + off_t free_space_offset; + + if (jnl->jhdr->start < jnl->jhdr->end) { + free_space_offset = jnl->jhdr->size - (jnl->jhdr->end - jnl->jhdr->start) - jnl->jhdr->jhdr_size; + } else if (jnl->jhdr->start > jnl->jhdr->end) { + free_space_offset = jnl->jhdr->start - jnl->jhdr->end; + } else { + // journal is completely empty + free_space_offset = jnl->jhdr->size - jnl->jhdr->jhdr_size; + } + + return free_space_offset; +} + +static void dump_journal(journal *jnl) { + transaction *ctr; + + printf(" jdev_offset %.8llx\n", jnl->jdev_offset); + printf(" magic: 0x%.8x\n", jnl->jhdr->magic); + printf(" start: 0x%.8llx\n", jnl->jhdr->start); + printf(" end: 0x%.8llx\n", jnl->jhdr->end); + printf(" size: 0x%.8llx\n", jnl->jhdr->size); + printf(" blhdr size: %d\n", jnl->jhdr->blhdr_size); + printf(" jhdr size: %d\n", jnl->jhdr->jhdr_size); + printf(" chksum: 0x%.8x\n", jnl->jhdr->checksum); + + printf(" completed transactions:\n"); + for (ctr = jnl->completed_trs; ctr; ctr = ctr->next) { + printf(" 0x%.8llx - 0x%.8llx\n", ctr->journal_start, ctr->journal_end); + } +} + +// The journal must be locked on entry to this function. +// The "desired_size" is in bytes. +static int check_free_space( journal *jnl, + int desired_size, + boolean_t *delayed_header_write, + uint32_t sequence_num) { + + size_t i; + int counter=0; + + //printf("jnl: check free space (desired 0x%x, avail 0x%Lx)\n", + // desired_size, free_space(jnl)); + + if (delayed_header_write) + *delayed_header_write = FALSE; + + while (1) { + int old_start_empty; + + // make sure there's space in the journal to hold this transaction + if (free_space(jnl) > desired_size && jnl->old_start[0] == 0) { + break; + } + if (counter++ == 5000) { + dump_journal(jnl); + panic("jnl: check_free_space: buffer flushing isn't working " + "(jnl @ %p s %lld e %lld f %lld [active start %lld]).\n", jnl, + jnl->jhdr->start, jnl->jhdr->end, free_space(jnl), jnl->active_start); + } + if (counter > 7500) { + return ENOSPC; + } + + // here's where we lazily bump up jnl->jhdr->start. we'll consume + // entries until there is enough space for the next transaction. + old_start_empty = 1; + lock_oldstart(jnl); + + for (i = 0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) { + int lcl_counter; + + lcl_counter = 0; + while (jnl->old_start[i] & 0x8000000000000000LL) { + if (lcl_counter++ > 10000) { + panic("jnl: check_free_space: tr starting @ 0x%llx not flushing (jnl %p).\n", + jnl->old_start[i], jnl); + } + + unlock_oldstart(jnl); + if (jnl->flush) { + jnl->flush(jnl->flush_arg); + } + usleep(10000); + lock_oldstart(jnl); + } + + if (jnl->old_start[i] == 0) { + continue; + } + + old_start_empty = 0; + jnl->jhdr->start = jnl->old_start[i]; + jnl->old_start[i] = 0; + + if (free_space(jnl) > desired_size) { + + if (delayed_header_write) + *delayed_header_write = TRUE; + else { + unlock_oldstart(jnl); + write_journal_header(jnl, 1, sequence_num); + lock_oldstart(jnl); + } + break; + } + } + unlock_oldstart(jnl); + + // if we bumped the start, loop and try again + if (i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) { + continue; + } else if (old_start_empty) { + // + // if there is nothing in old_start anymore then we can + // bump the jhdr->start to be the same as active_start + // since it is possible there was only one very large + // transaction in the old_start array. if we didn't do + // this then jhdr->start would never get updated and we + // would wind up looping until we hit the panic at the + // start of the loop. + // + jnl->jhdr->start = jnl->active_start; + + if (delayed_header_write) + *delayed_header_write = TRUE; + else + write_journal_header(jnl, 1, sequence_num); + continue; + } + + + // if the file system gave us a flush function, call it to so that + // it can flush some blocks which hopefully will cause some transactions + // to complete and thus free up space in the journal. + if (jnl->flush) { + jnl->flush(jnl->flush_arg); + } + + // wait for a while to avoid being cpu-bound (this will + // put us to sleep for 10 milliseconds) + usleep(10000); + } + + return 0; +} + +static void lock_condition(journal *jnl, ConditionalFlag_S *psCondFlag, __unused const char *condition_name) { + + lock_flush(jnl); + + while (psCondFlag->uFlag) { + pthread_cond_wait(&psCondFlag->sCond, &jnl->flock); + } + + psCondFlag->uFlag = TRUE; + unlock_flush(jnl); +} + +static void wait_condition(journal *jnl, ConditionalFlag_S *psCondFlag, __unused const char *condition_name) { + + if (!psCondFlag->uFlag) + return; + + lock_flush(jnl); + + while (psCondFlag->uFlag) { + pthread_cond_wait(&psCondFlag->sCond, &jnl->flock); + } + + unlock_flush(jnl); +} + +static void unlock_condition(journal *jnl, ConditionalFlag_S *psCondFlag) { + lock_flush(jnl); + + psCondFlag->uFlag = FALSE; + pthread_cond_broadcast(&psCondFlag->sCond); + + unlock_flush(jnl); +} + +/* + * End a transaction: + * 1) Determine if it is time to commit the transaction or not: + * If the transaction is small enough, and we're not forcing + * a write to disk, the "active" transaction becomes the "current" transaction, + * and will be reused for the next transaction that is started (group commit). + * + * 2) Commit: + * If the transaction gets written to disk (because force_it is true, or no + * group commit, or the transaction is sufficiently full), the blocks get + * written into the journal first, then they are written to their final location + * asynchronously. When those async writes complete, the transaction can be freed + * and removed from the journal. + * + * 3) Callback: + * An optional callback can be supplied. If given, it is called after the + * the blocks have been written to the journal, but before the async writes + * of those blocks to their normal on-disk locations. This is used by + * journal_relocate so that the location of the journal can be changed and + * flushed to disk before the blocks get written to their normal locations. + * Note that the callback is only called if the transaction gets written to + * the journal during this end_transaction call; you probably want to set the + * force_it flag. + * + * 4) Free blocks' Generic Buff. + * + * Inputs: + * tr Transaction to add to the journal + * force_it If true, force this transaction to the on-disk journal immediately. + * callback See description above. Pass NULL for no callback. + * callback_arg Argument passed to callback routine. + * + * Result + * 0 No errors + * -1 An error occurred. The journal is marked invalid. + */ +static int end_transaction(transaction *tr, int force_it, errno_t (*callback)(void*), void *callback_arg, boolean_t drop_lock) { + + block_list_header *blhdr=NULL, *next=NULL; + int i, ret_val = 0; + journal *jnl = tr->jnl; + GenericLFBuf *bp; + size_t tbuffer_offset; + + if (jnl->cur_tr) { + panic("jnl: jnl @ %p already has cur_tr %p, new tr: %p\n", + jnl, jnl->cur_tr, tr); + } + + // if there weren't any modified blocks in the transaction + // just save off the transaction pointer and return. + if (tr->total_bytes == (int)jnl->jhdr->blhdr_size) { + jnl->cur_tr = tr; + goto done; + } + + // if our transaction buffer isn't very full, just hang + // on to it and don't actually flush anything. this is + // what is known as "group commit". we will flush the + // transaction buffer if it's full or if we have more than + // one of them so we don't start hogging too much memory. + // + // We also check the device supports UNMAP/TRIM, and if so, + // the number of extents waiting to be trimmed. If it is + // small enough, then keep accumulating more (so we can + // reduce the overhead of trimming). If there was a prior + // trim error, then we stop issuing trims for this + // volume, so we can also coalesce transactions. + // + if ( force_it == 0 + && (jnl->flags & JOURNAL_NO_GROUP_COMMIT) == 0 + && tr->num_blhdrs < 3 + && (tr->total_bytes <= ((tr->tbuffer_size*tr->num_blhdrs) - tr->tbuffer_size/8)) + && (!(jnl->flags & JOURNAL_USE_UNMAP) || (tr->trim.extent_count < jnl_trim_flush_limit))) { + + jnl->cur_tr = tr; + goto done; + } + + lock_condition(jnl, &jnl->flushing, "end_transaction"); + + /* + * if the previous 'finish_end_transaction' was being run + * asynchronously, it could have encountered a condition + * that caused it to mark the journal invalid... if that + * occurred while we were waiting for it to finish, we + * need to notice and abort the current transaction + */ + if ((jnl->flags & JOURNAL_INVALID) || jnl->flush_aborted == TRUE) { + unlock_condition(jnl, &jnl->flushing); + + abort_transaction(jnl, tr); + ret_val = -1; + goto done; + } + + /* + * Store a pointer to this transaction's trim list so that + * future transactions can find it. + * + * Note: if there are no extents in the trim list, then don't + * bother saving the pointer since nothing can add new extents + * to the list (and other threads/transactions only care if + * there is a trim pending). + */ + lf_lck_rw_lock_exclusive(&jnl->trim_lock); + if (jnl->async_trim != NULL) + panic("jnl: end_transaction: async_trim already non-NULL!"); + if (tr->trim.extent_count > 0) + jnl->async_trim = &tr->trim; + lf_lck_rw_unlock_exclusive(&jnl->trim_lock); + + /* + * snapshot the transaction sequence number while we are still behind + * the journal lock since it will be bumped upon the start of the + * next transaction group which may overlap the current journal flush... + * we pass the snapshot into write_journal_header during the journal + * flush so that it can write the correct version in the header... + * because we hold the 'flushing' condition variable for the duration + * of the journal flush, 'saved_sequence_num' remains stable + */ + jnl->saved_sequence_num = jnl->sequence_num; + + /* + * if we're here we're going to flush the transaction buffer to disk. + * 'check_free_space' will not return untl there is enough free + * space for this transaction in the journal and jnl->old_start[0] + * is avaiable for use + */ + check_free_space(jnl, tr->total_bytes, &tr->delayed_header_write, jnl->saved_sequence_num); + + // range check the end index + if (jnl->jhdr->end <= 0 || jnl->jhdr->end > jnl->jhdr->size) { + panic("jnl: end_transaction: end is bogus 0x%llx (sz 0x%llx)\n", + jnl->jhdr->end, jnl->jhdr->size); + } + + // this transaction starts where the current journal ends + tr->journal_start = jnl->jhdr->end; + + lock_oldstart(jnl); + /* + * Because old_start is locked above, we can cast away the volatile qualifier before passing it to memcpy. + * slide everyone else down and put our latest guy in the last + * entry in the old_start array + */ + memcpy(__CAST_AWAY_QUALIFIER(&jnl->old_start[0], volatile, void *), __CAST_AWAY_QUALIFIER(&jnl->old_start[1], volatile, void *), sizeof(jnl->old_start)-sizeof(jnl->old_start[0])); + jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] = tr->journal_start | 0x8000000000000000LL; + + unlock_oldstart(jnl); + + // go over the blocks in the transaction. + // for each block, call the fpCallback and copy the content into the journal buffer + for (blhdr = tr->blhdr; blhdr; blhdr = next) { + char *blkptr; + size_t bsize; + + tbuffer_offset = jnl->jhdr->blhdr_size; + + for (i = 1; i < blhdr->num_blocks; i++) { + + if (blhdr->binfo[i].bnum != (off_t)-1) { + + bp = (GenericLFBuf*)blhdr->binfo[i].u.bp; + + if (bp == NULL) { + panic("jnl: inconsistent binfo (NULL bp w/bnum %lld; jnl @ %p, tr %p)\n", + blhdr->binfo[i].bnum, jnl, tr); + } + + bsize = bp->uDataSize; + + blkptr = (char *)&((char *)blhdr)[tbuffer_offset]; + + int iRet; + retry: + iRet = lf_hfs_generic_buf_take_ownership(bp, NULL); + if (iRet == EAGAIN) { + goto retry; + } else if (iRet) { + LFHFS_LOG(LEVEL_ERROR, "jnl: end_transaction: lf_hfs_generic_buf_take_ownership returned %d.\n", iRet); + ret_val = -1; + goto done; + } + + if (!(bp->uCacheFlags & GEN_BUF_WRITE_LOCK)) { + panic("GEN_BUF_WRITE_LOCK should be set!"); + } + + // Call the buffer callback + if (bp->pfFunc) { + bp->pfFunc(bp, bp->pvCallbackArgs); + bp->pfFunc = NULL; + } + + if (bp->uCacheFlags & GEN_BUF_LITTLE_ENDIAN) { + panic("We do not want to write a GEN_BUF_LITTLE_ENDIAN buffer to media!"); + } + + // copy the data into the transaction buffer... + memcpy(blkptr, bp->pvData, bsize); + + blhdr->binfo[i].u.bp = (void*)bp; + + } else { + // bnum == -1, only true if a block was "killed" + bsize = blhdr->binfo[i].u.bi.bsize; + } + tbuffer_offset += bsize; + } + next = (block_list_header *)((long)blhdr->binfo[0].bnum); + } + + #if HFS_CRASH_TEST + CRASH_ABORT(CRASH_ABORT_JOURNAL_BEFORE_FINISH, jnl->fsmount->psHfsmount, NULL); + #endif + + ret_val = finish_end_transaction(tr, callback, callback_arg); + +done: + if (drop_lock == TRUE) { + journal_unlock(jnl); + } + return (ret_val); +} + +static void abort_transaction(journal *jnl, transaction *tr) { + + block_list_header *blhdr, *next; + // for each block list header, iterate over the blocks then + // free up the memory associated with the block list. + for (blhdr = tr->blhdr; blhdr; blhdr = next) { + int i; + + for (i = 1; i < blhdr->num_blocks; i++) { + GenericLFBufPtr bp; + + if (blhdr->binfo[i].bnum == (off_t)-1) + continue; + + bp = (void*)blhdr->binfo[i].u.bp; + + // Release the buffers + lf_hfs_generic_buf_clear_cache_flag(bp, GEN_BUF_WRITE_LOCK); + if (lf_hfs_generic_buf_validate_owner(bp)) { // abort_transaction can be called before or after we take ownership + lf_hfs_generic_buf_release(bp); + } + + } + next = (block_list_header *)((long)blhdr->binfo[0].bnum); + + // we can free blhdr here since we won't need it any more + blhdr->binfo[0].bnum = 0xdeadc0de; + hfs_free(blhdr); + } + + /* + * If the transaction we're aborting was the async transaction, then + * tell the current transaction that there is no pending trim + * any more. + */ + lf_lck_rw_lock_exclusive(&jnl->trim_lock); + if (jnl->async_trim == &tr->trim) + jnl->async_trim = NULL; + lf_lck_rw_unlock_exclusive(&jnl->trim_lock); + + + if (tr->trim.extents) { + hfs_free(tr->trim.extents); + } + tr->trim.allocated_count = 0; + tr->trim.extent_count = 0; + tr->trim.extents = NULL; + tr->tbuffer = NULL; + tr->blhdr = NULL; + tr->total_bytes = 0xdbadc0de; + hfs_free(tr); +} + +static void swap_journal_header(journal *jnl) { + jnl->jhdr->magic = SWAP32(jnl->jhdr->magic); + jnl->jhdr->endian = SWAP32(jnl->jhdr->endian); + jnl->jhdr->start = SWAP64(jnl->jhdr->start); + jnl->jhdr->end = SWAP64(jnl->jhdr->end); + jnl->jhdr->size = SWAP64(jnl->jhdr->size); + jnl->jhdr->blhdr_size = SWAP32(jnl->jhdr->blhdr_size); + jnl->jhdr->checksum = SWAP32(jnl->jhdr->checksum); + jnl->jhdr->jhdr_size = SWAP32(jnl->jhdr->jhdr_size); + jnl->jhdr->sequence_num = SWAP32(jnl->jhdr->sequence_num); +} + +// this isn't a great checksum routine but it will do for now. +// we use it to checksum the journal header and the block list +// headers that are at the start of each transaction. +static unsigned int calc_checksum(const char *ptr, int len) { + int i; + unsigned int cksum=0; + + // this is a lame checksum but for now it'll do + for(i = 0; i < len; i++, ptr++) { + cksum = (cksum << 8) ^ (cksum + *(unsigned char *)ptr); + } + + return (~cksum); +} + + +static size_t do_journal_io(journal *jnl, off_t *offset, void *data, size_t len, int direction) { + off_t curlen = len; + size_t io_sz = 0; + off_t max_iosize; +#if 0 // TBD + int err; + buf_t bp; + off_t accumulated_offset = 0; + ExtendedVCB *vcb = HFSTOVCB(jnl->fsmount->psHfsmount); +#endif + + if (*offset < 0 || *offset > jnl->jhdr->size) { + panic("jnl: do_jnl_io: bad offset 0x%llx (max 0x%llx)\n", *offset, jnl->jhdr->size); + } + + if (direction & JNL_WRITE) + max_iosize = jnl->max_write_size; + else if (direction & JNL_READ) + max_iosize = jnl->max_read_size; + else + max_iosize = 128 * 1024; + +again: + + // Determine the Current R/W Length, taking cyclic wrap around into account + if (*offset + curlen > jnl->jhdr->size && *offset != 0 && jnl->jhdr->size != 0) { + if (*offset == jnl->jhdr->size) { + *offset = jnl->jhdr->jhdr_size; + } else { + curlen = jnl->jhdr->size - *offset; + } + } + + if (curlen > max_iosize) { + curlen = max_iosize; + } + + if (curlen <= 0) { + panic("jnl: do_jnl_io: curlen == %lld, offset 0x%llx len %zd\n", curlen, *offset, len); + } + + if (*offset == 0 && (direction & JNL_HEADER) == 0) { + panic("jnl: request for i/o to jnl-header without JNL_HEADER flag set! (len %lld, data %p)\n", curlen, data); + } + + + // Perform the I/O + uint64_t phyblksize = jnl->fsmount->psHfsmount->hfs_physical_block_size; + uint64_t uBlkNum = jnl->jdev_blknum+(*offset)/phyblksize; + + if (direction & JNL_READ) { + raw_readwrite_read_mount(jnl->jdev, uBlkNum, phyblksize, data, curlen, NULL, NULL); + + } else if (direction & JNL_WRITE) { + raw_readwrite_write_mount(jnl->jdev, uBlkNum, phyblksize, data, curlen, NULL, NULL); + } + + // Move to the next section + *offset += curlen; + io_sz += curlen; + + if (io_sz != len) { + // handle wrap-around + data = (char *)data + curlen; + curlen = len - io_sz; + if (*offset >= jnl->jhdr->size) { + *offset = jnl->jhdr->jhdr_size; + } + goto again; + } + + return io_sz; +} + +static size_t read_journal_header(journal *jnl, void *data, size_t len) { + off_t hdr_offset = 0; + + return do_journal_io(jnl, &hdr_offset, data, len, JNL_READ|JNL_HEADER); +} + +static void get_io_info(struct vnode *devvp, size_t phys_blksz, journal *jnl) { + off_t readblockcnt; + off_t writeblockcnt; + off_t readmaxcnt=0, tmp_readmaxcnt; + off_t writemaxcnt=0, tmp_writemaxcnt; + off_t readsegcnt, writesegcnt; + + // First check the max read size via several different mechanisms... + ioctl(devvp->psFSRecord->iFD, DKIOCGETMAXBYTECOUNTREAD, (caddr_t)&readmaxcnt); + + if (ioctl(devvp->psFSRecord->iFD, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t)&readblockcnt) == 0) { + tmp_readmaxcnt = readblockcnt * phys_blksz; + if (readmaxcnt == 0 || (readblockcnt > 0 && tmp_readmaxcnt < readmaxcnt)) { + readmaxcnt = tmp_readmaxcnt; + } + } + + if (ioctl(devvp->psFSRecord->iFD, DKIOCGETMAXSEGMENTCOUNTREAD, (caddr_t)&readsegcnt)) { + readsegcnt = 0; + } + + if (readsegcnt > 0 && (readsegcnt * PAGE_SIZE) < readmaxcnt) { + readmaxcnt = readsegcnt * PAGE_SIZE; + } + + if (readmaxcnt == 0) { + readmaxcnt = 128 * 1024; + } else if (readmaxcnt > UINT32_MAX) { + readmaxcnt = UINT32_MAX; + } + + + // Now check the max writes size via several different mechanisms... + ioctl(devvp->psFSRecord->iFD, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t)&writemaxcnt); + + if (ioctl(devvp->psFSRecord->iFD, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t)&writeblockcnt) == 0) { + tmp_writemaxcnt = writeblockcnt * phys_blksz; + if (writemaxcnt == 0 || (writeblockcnt > 0 && tmp_writemaxcnt < writemaxcnt)) { + writemaxcnt = tmp_writemaxcnt; + } + } + + if (ioctl(devvp->psFSRecord->iFD, DKIOCGETMAXSEGMENTCOUNTWRITE, (caddr_t)&writesegcnt)) { + writesegcnt = 0; + } + + if (writesegcnt > 0 && (writesegcnt * PAGE_SIZE) < writemaxcnt) { + writemaxcnt = writesegcnt * PAGE_SIZE; + } + + if (writemaxcnt == 0) { + writemaxcnt = 128 * 1024; + } else if (writemaxcnt > UINT32_MAX) { + writemaxcnt = UINT32_MAX; + } + + jnl->max_read_size = readmaxcnt; + jnl->max_write_size = writemaxcnt; +} + +// this is a work function used to free up transactions that +// completed. they can't be free'd from buffer_flushed_callback +// because it is called from deep with the disk driver stack +// and thus can't do something that would potentially cause +// paging. it gets called by each of the journal api entry +// points so stuff shouldn't hang around for too long. +static void free_old_stuff(journal *jnl) { + transaction *tr, *next; + block_list_header *blhdr=NULL, *next_blhdr=NULL; + + if (jnl->tr_freeme == NULL) + return; + + lock_oldstart(jnl); + tr = jnl->tr_freeme; + jnl->tr_freeme = NULL; + unlock_oldstart(jnl); + + for(; tr; tr=next) { + for (blhdr = tr->blhdr; blhdr; blhdr = next_blhdr) { + next_blhdr = (block_list_header *)((long)blhdr->binfo[0].bnum); + blhdr->binfo[0].bnum = 0xdeadc0de; + + hfs_free(blhdr); + + KERNEL_DEBUG(0xbbbbc01c, jnl, tr, tr->tbuffer_size, 0, 0); + } + next = tr->next; + hfs_free(tr); + } +} + +// Allocate a new active transaction. +// The function does the following: +// 1) mallocs memory for a transaction structure and a buffer +// 2) initializes the transaction structure and the buffer (invalid CRC + 0x5a) +static errno_t journal_allocate_transaction(journal *jnl) { + transaction *tr; + + tr = hfs_mallocz(sizeof(transaction)); + + tr->tbuffer_size = jnl->tbuffer_size; + + tr->tbuffer = hfs_malloc(tr->tbuffer_size); + + // journal replay code checksum check depends on this. + memset(tr->tbuffer, 0, BLHDR_CHECKSUM_SIZE); + // Fill up the rest of the block with unimportant bytes (0x5a 'Z' chosen for visibility) + memset(tr->tbuffer + BLHDR_CHECKSUM_SIZE, 0x5a, jnl->jhdr->blhdr_size - BLHDR_CHECKSUM_SIZE); + + tr->blhdr = (block_list_header *)tr->tbuffer; + tr->blhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1; + tr->blhdr->num_blocks = 1; // accounts for this header block + tr->blhdr->bytes_used = jnl->jhdr->blhdr_size; + tr->blhdr->flags = BLHDR_CHECK_CHECKSUMS | BLHDR_FIRST_HEADER; + + tr->sequence_num = ++jnl->sequence_num; + tr->num_blhdrs = 1; + tr->total_bytes = jnl->jhdr->blhdr_size; + tr->jnl = jnl; + + jnl->active_tr = tr; + + return 0; +} + +int journal_kill_block(journal *jnl, GenericLFBuf *psGenBuf) { + int i; + uint64_t uflags; + block_list_header *blhdr; + transaction *tr; + + #if JOURNAL_DEBUG + printf("journal_kill_block: psGenBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uPhyCluster %llu uLockCnt %u\n", + psGenBuf, psGenBuf->psVnode, psGenBuf->uBlockN, psGenBuf->uDataSize ,psGenBuf->uPhyCluster, psGenBuf->uLockCnt); + #endif + + CHECK_JOURNAL(jnl); + free_old_stuff(jnl); + + if (jnl->flags & JOURNAL_INVALID) { + lf_hfs_generic_buf_clear_cache_flag(psGenBuf, GEN_BUF_WRITE_LOCK); + lf_hfs_generic_buf_release(psGenBuf); + return 0; + } + + tr = jnl->active_tr; + CHECK_TRANSACTION(tr); + + if (jnl->owner != pthread_self()) { + panic("jnl: journal_kill_block: called w/out a transaction! jnl %p, owner %p, curact %p\n", + jnl, jnl->owner, pthread_self()); + } + + uflags = psGenBuf->uCacheFlags; + + if ( !(uflags & GEN_BUF_WRITE_LOCK)) + panic("jnl: journal_kill_block: called with bp not B_LOCKED"); + + /* + * bp must be BL_BUSY and B_LOCKED + * first check if it's already part of this transaction + */ + for (blhdr = tr->blhdr; blhdr; blhdr = (block_list_header *)((long)blhdr->binfo[0].bnum)) { + + for (i = 1; i < blhdr->num_blocks; i++) { + if (psGenBuf == (void*)blhdr->binfo[i].u.bp) { + + // if the block has the DELWRI and FILTER bits sets, then + // things are seriously weird. if it was part of another + // transaction then journal_modify_block_start() should + // have force it to be written. + // + //if ((bflags & B_DELWRI) && (bflags & B_FILTER)) { + // panic("jnl: kill block: this defies all logic! bp 0x%x\n", bp); + //} else { + tr->num_killed += psGenBuf->uDataSize; + //} + blhdr->binfo[i].bnum = (off_t)-1; + blhdr->binfo[i].u.bp = NULL; + blhdr->binfo[i].u.bi.bsize = psGenBuf->uDataSize; + + lf_hfs_generic_buf_clear_cache_flag(psGenBuf, GEN_BUF_WRITE_LOCK); + lf_hfs_generic_buf_release(psGenBuf); + + return 0; + } + } + } + + /* + * We did not find the block in any transaction buffer but we still + * need to release it or else it will be left locked forever. + */ + lf_hfs_generic_buf_clear_cache_flag(psGenBuf, GEN_BUF_WRITE_LOCK); + lf_hfs_generic_buf_release(psGenBuf); + + return 0; +} + +int journal_is_clean(struct vnode *jvp, + off_t offset, + off_t journal_size, + struct vnode *fsvp, + size_t min_fs_block_size, + struct mount *fsmount) { + + journal jnl; + uint32_t phys_blksz; + int ret; + int orig_checksum, checksum; + + /* Get the real physical block size. */ + if (ioctl(jvp->psFSRecord->iFD, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz)) { + LFHFS_LOG(LEVEL_ERROR, "jnl: journal_is_clean: failed to get device block size.\n"); + ret = EINVAL; + goto cleanup_jdev_name; + } + + if (phys_blksz > (uint32_t)min_fs_block_size) { + LFHFS_LOG(LEVEL_ERROR, "jnl: journal_is_clean: error: phys blksize %d bigger than min fs blksize %zd\n", + phys_blksz, min_fs_block_size); + ret = EINVAL; + goto cleanup_jdev_name; + } + + if (journal_size < (256*1024) || journal_size > (MAX_JOURNAL_SIZE)) { + LFHFS_LOG(LEVEL_ERROR, "jnl: journal_is_clean: journal size %lld looks bogus.\n", journal_size); + ret = EINVAL; + goto cleanup_jdev_name; + } + + if ((journal_size % phys_blksz) != 0) { + LFHFS_LOG(LEVEL_ERROR, "jnl: journal_is_clean: journal size 0x%llx is not an even multiple of block size 0x%x\n", + journal_size, phys_blksz); + ret = EINVAL; + goto cleanup_jdev_name; + } + + memset(&jnl, 0, sizeof(jnl)); + + jnl.header_buf = hfs_malloc(phys_blksz); + jnl.header_buf_size = phys_blksz; + + // Keep a point to the mount around for use in IO throttling. + jnl.fsmount = fsmount; + + get_io_info(jvp, phys_blksz, &jnl); + + jnl.jhdr = (journal_header *)jnl.header_buf; + memset(jnl.jhdr, 0, sizeof(journal_header)); + + jnl.jdev = jvp; + jnl.jdev_offset = offset; + jnl.jdev_blknum = (uint32_t)(offset / phys_blksz); + jnl.fsdev = fsvp; + + // we have to set this up here so that do_journal_io() will work + jnl.jhdr->jhdr_size = phys_blksz; + + if (read_journal_header(&jnl, jnl.jhdr, phys_blksz) != (unsigned)phys_blksz) { + LFHFS_LOG(LEVEL_ERROR, "jnl: journal_is_clean: could not read %d bytes for the journal header.\n", + phys_blksz); + ret = EINVAL; + goto get_out; + } + + orig_checksum = jnl.jhdr->checksum; + jnl.jhdr->checksum = 0; + + if (jnl.jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) { + // do this before the swap since it's done byte-at-a-time + orig_checksum = SWAP32(orig_checksum); + checksum = calc_checksum((char *)jnl.jhdr, JOURNAL_HEADER_CKSUM_SIZE); + swap_journal_header(&jnl); + jnl.flags |= JOURNAL_NEED_SWAP; + } else { + checksum = calc_checksum((char *)jnl.jhdr, JOURNAL_HEADER_CKSUM_SIZE); + } + + if (jnl.jhdr->magic != JOURNAL_HEADER_MAGIC && jnl.jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) { + LFHFS_LOG(LEVEL_ERROR, "jnl: journal_is_clean: journal magic is bad (0x%x != 0x%x)\n", + jnl.jhdr->magic, JOURNAL_HEADER_MAGIC); + ret = EINVAL; + goto get_out; + } + + if (orig_checksum != checksum) { + LFHFS_LOG(LEVEL_ERROR, "jnl: journal_is_clean: journal checksum is bad (0x%x != 0x%x)\n", orig_checksum, checksum); + ret = EINVAL; + goto get_out; + } + + // + // if the start and end are equal then the journal is clean. + // otherwise it's not clean and therefore an error. + // + if (jnl.jhdr->start == jnl.jhdr->end) { + ret = 0; + } else { + ret = EBUSY; // so the caller can differentiate an invalid journal from a "busy" one + } + +get_out: + hfs_free(jnl.header_buf); +cleanup_jdev_name: + return ret; +} + +uint32_t journal_current_txn(journal *jnl) { + return jnl->sequence_num + (jnl->active_tr || jnl->cur_tr ? 0 : 1); +} + diff --git a/livefiles_hfs_plugin/lf_hfs_journal.h b/livefiles_hfs_plugin/lf_hfs_journal.h new file mode 100644 index 0000000..f40f90a --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_journal.h @@ -0,0 +1,379 @@ +// +// lf_hfs_journal.h +// livefiles_hfs +// +// Created by Or Haimovich on 22/3/18. +// + +#ifndef lf_hfs_journal_h +#define lf_hfs_journal_h + +#include +#include "lf_hfs_generic_buf.h" + +#define JOURNAL_DEBUG 0 + +typedef struct _blk_info { + int32_t bsize; + union { + int32_t cksum; + uint32_t sequence_num; + } b; +} _blk_info; + +typedef struct block_info { + off_t bnum; // block # on the file system device + union { + _blk_info bi; + struct buf *bp; + } u; +} __attribute__((__packed__)) block_info; + +typedef struct block_list_header { + u_int16_t max_blocks; // max number of blocks in this chunk + u_int16_t num_blocks; // number of valid block numbers in block_nums + int32_t bytes_used; // how many bytes of this tbuffer are used + uint32_t checksum; // on-disk: checksum of this header and binfo[0] + int32_t flags; // check-checksums, initial blhdr, etc + block_info binfo[1]; // so we can reference them by name +} block_list_header; + +#define BLHDR_CHECK_CHECKSUMS 0x0001 +#define BLHDR_FIRST_HEADER 0x0002 + + +struct journal; + +struct jnl_trim_list { + uint32_t allocated_count; + uint32_t extent_count; + dk_extent_t *extents; +}; + +typedef void (*jnl_trim_callback_t)(void *arg, uint32_t extent_count, const dk_extent_t *extents); + +typedef struct transaction { + int tbuffer_size; // in bytes + char *tbuffer; // memory copy of the transaction + block_list_header *blhdr; // points to the first byte of tbuffer + int num_blhdrs; // how many buffers we've allocated + int total_bytes; // total # of bytes in transaction + int num_flushed; // how many bytes have been flushed + int num_killed; // how many bytes were "killed" + off_t journal_start; // where in the journal this transaction starts + off_t journal_end; // where in the journal this transaction ends + struct journal *jnl; // ptr back to the journal structure + struct transaction *next; // list of tr's (either completed or to be free'd) + uint32_t sequence_num; + struct jnl_trim_list trim; + boolean_t delayed_header_write; + boolean_t flush_on_completion; //flush transaction immediately upon txn end. +} transaction; + + +/* + * This is written to block zero of the journal and it + * maintains overall state about the journal. + */ +typedef struct journal_header { + int32_t magic; + int32_t endian; + volatile off_t start; // zero-based byte offset of the start of the first transaction + volatile off_t end; // zero-based byte offset of where free space begins + off_t size; // size in bytes of the entire journal + uint32_t blhdr_size; // size in bytes of each block_list_header in the journal + uint32_t checksum; + int32_t jhdr_size; // block size (in bytes) of the journal header + uint32_t sequence_num; // NEW FIELD: a monotonically increasing value assigned to all txn's +} journal_header; + +#define JOURNAL_HEADER_MAGIC 0x4a4e4c78 // 'JNLx' +#define ENDIAN_MAGIC 0x12345678 + +// +// we only checksum the original size of the journal_header to remain +// backwards compatible. the size of the original journal_heade is +// everything up to the the sequence_num field, hence we use the +// offsetof macro to calculate the size. +// +#define JOURNAL_HEADER_CKSUM_SIZE (offsetof(struct journal_header, sequence_num)) + +#define OLD_JOURNAL_HEADER_MAGIC 0x4a484452 // 'JHDR' + +typedef struct { + pthread_cond_t sCond; + uint32_t uFlag; +} ConditionalFlag_S; + +/* + * In memory structure about the journal. + */ +typedef struct journal { + pthread_mutex_t jlock; // protects the struct journal data + pthread_mutex_t flock; // serializes flushing of journal + pthread_rwlock_t trim_lock; // protects the async_trim field, below + + struct vnode *jdev; // vnode of the device where the journal lives + off_t jdev_offset; // byte offset to the start of the journal + uint32_t jdev_blknum; // Physical block number of the journal + //const char *jdev_name; + + struct vnode *fsdev; // vnode of the file system device + struct mount *fsmount; // mount of the file system + + void (*flush)(void *arg); // fs callback to flush meta data blocks + void *flush_arg; // arg that's passed to flush() + + int32_t flags; + uint32_t tbuffer_size; // default transaction buffer size + ConditionalFlag_S flushing; + ConditionalFlag_S asyncIO; + ConditionalFlag_S writing_header; + boolean_t flush_aborted; + boolean_t write_header_failed; + + struct jnl_trim_list *async_trim; // extents to be trimmed by transaction being asynchronously flushed + jnl_trim_callback_t trim_callback; + void *trim_callback_arg; + + char *header_buf; // in-memory copy of the journal header + int32_t header_buf_size; + journal_header *jhdr; // points to the first byte of header_buf + + uint32_t saved_sequence_num; + uint32_t sequence_num; + + off_t max_read_size; + off_t max_write_size; + + transaction *cur_tr; // for group-commit + transaction *completed_trs; // out-of-order transactions that completed + transaction *active_tr; // for nested transactions + int32_t nested_count; // for nested transactions + void *owner; // a ptr that's unique to the calling process + + transaction *tr_freeme; // transaction structs that need to be free'd + + volatile off_t active_start; // the active start that we only keep in memory + pthread_mutex_t old_start_lock; // protects the old_start + volatile off_t old_start[16]; // this is how we do lazy start update + + int last_flush_err; // last error from flushing the cache + uint32_t flush_counter; // a monotonically increasing value assigned on track cache flush +} journal; + +/* internal-only journal flags (top 16 bits) */ +#define JOURNAL_CLOSE_PENDING 0x00010000 +#define JOURNAL_INVALID 0x00020000 +#define JOURNAL_FLUSHCACHE_ERR 0x00040000 // means we already printed this err +#define JOURNAL_NEED_SWAP 0x00080000 // swap any data read from disk +#define JOURNAL_DO_FUA_WRITES 0x00100000 // do force-unit-access writes +#define JOURNAL_USE_UNMAP 0x00200000 // device supports UNMAP (TRIM) +#define JOURNAL_FEATURE_BARRIER 0x00400000 // device supports barrier-only flush + + +/* journal_open/create options are always in the low-16 bits */ +#define JOURNAL_OPTION_FLAGS_MASK 0x0000ffff + +__BEGIN_DECLS +/* + * Prototypes. + */ + +/* + * Call journal_init() to initialize the journaling code (sets up lock attributes) + */ +void journal_init(void); + +/* + * Call journal_open() when mounting an existing file system + * that has a previously created journal. It will take care + * of validating the journal and replaying it if necessary. + * + * The "jvp" argument is the vnode where the journal is written. + * The journal starts at "offset" and is "journal_size" bytes long. + * + * The "fsvp" argument is the vnode of your file system. It may be + * the same as "jvp". + * + * The "min_fs_block_size" argument is the minimum block size + * (in bytes) that the file system will ever write. Typically + * this is the block size of the file system (1k, 4k, etc) but + * on HFS+ it is the minimum block size of the underlying device. + * + * The flags argument lets you disable group commit if you + * want tighter guarantees on transactions (in exchange for + * lower performance). + * + * The tbuffer_size is the size of the transaction buffer + * used by the journal. If you specify zero, the journal code + * will use a reasonable defaults. The tbuffer_size should + * be an integer multiple of the min_fs_block_size. + * + * Returns a valid journal pointer of NULL if it runs into + * trouble reading/playing back the journal. + */ +journal *journal_open(struct vnode *jvp, + off_t offset, + off_t journal_size, + struct vnode *fsvp, + size_t min_fs_block_size, + int32_t flags, + int32_t tbuffer_size, + void (*flush)(void *arg), + void *arg, + struct mount *fsmount); +/* + * Call journal_create() to create a new journal. You only + * call this once, typically at file system creation time. + * + * The "jvp" argument is the vnode where the journal is written. + * The journal starts at "offset" and is "journal_size" bytes long. + * + * The "fsvp" argument is the vnode of your file system. It may be + * the same as "jvp". + * + * The "min_fs_block_size" argument is the minimum block size + * (in bytes) that the file system will ever write. Typically + * this is the block size of the file system (1k, 4k, etc) but + * on HFS+ it is the minimum block size of the underlying device. + * + * The flags argument lets you disable group commit if you + * want tighter guarantees on transactions (in exchange for + * lower performance). + * + * The tbuffer_size is the size of the transaction buffer + * used by the journal. If you specify zero, the journal code + * will use a reasonable defaults. The tbuffer_size should + * be an integer multiple of the min_fs_block_size. + * + * Returns a valid journal pointer or NULL if one could not + * be created. + */ +journal *journal_create(struct vnode *jvp, + off_t offset, + off_t journal_size, + struct vnode *fsvp, + size_t min_fs_block_size, + int32_t flags, + int32_t tbuffer_size, + void (*flush)(void *arg), + void *arg, + struct mount *fsmount); + +/* + * Test whether the journal is clean or not. This is intended + * to be used when you're mounting read-only. If the journal + * is not clean for some reason then you should not mount the + * volume as your data structures may be in an unknown state. + */ +int journal_is_clean(struct vnode *jvp, + off_t offset, + off_t journal_size, + struct vnode *fsvp, + size_t min_fs_block_size, + struct mount *fsmount); + + + +/* + * Call journal_release() to release all buffers held by the journal. + * This is used incase of live-files unmount, since the media is no longer + * available at this time. + */ +void journal_release(journal *jnl); + +/* + * Call journal_close() just before your file system is unmounted. + * It flushes any outstanding transactions and makes sure the + * journal is in a consistent state. + */ +void journal_close(journal *journalp); + +/* + * flags for journal_create/open. only can use + * the low 16 bits for flags because internal + * bits go in the high 16. + */ +#define JOURNAL_NO_GROUP_COMMIT 0x00000001 +#define JOURNAL_RESET 0x00000002 + +/* + * Transaction related functions. + * + * Before you start modifying file system meta data, you + * should call journal_start_transaction(). Then before + * you modify each block, call journal_modify_block_start() + * and when you're done, journal_modify_block_end(). When + * you've modified the last block as part of a transaction, + * call journal_end_transaction() to commit the changes. + * + * If you decide to abort the modifications to a block you + * should call journal_modify_block_abort(). + * + * If as part of a transaction you need want to throw out + * any previous copies of a block (because it got deleted) + * then call journal_kill_block(). This will mark it so + * that the journal does not play it back (effectively + * dropping it). + * + * journal_trim_add_extent() marks a range of bytes on the device which should + * be trimmed (invalidated, unmapped). journal_trim_remove_extent() marks a + * range of bytes which should no longer be trimmed. Accumulated extents + * will be trimmed when the transaction is flushed to the on-disk journal. + */ +int journal_start_transaction(journal *jnl); +int journal_modify_block_start(journal *jnl, GenericLFBuf *psGenBuf); +int journal_modify_block_abort(journal *jnl, struct buf *bp); +int journal_modify_block_end(journal *jnl, GenericLFBuf *psGenBuf, void (*func)(GenericLFBuf *bp, void *arg), void *arg); +int journal_kill_block(journal *jnl, GenericLFBuf *bp); +int journal_trim_add_extent(journal *jnl, uint64_t offset, uint64_t length); +int journal_trim_remove_extent(journal *jnl, uint64_t offset, uint64_t length); +void journal_trim_set_callback(journal *jnl, jnl_trim_callback_t callback, void *arg); +int journal_trim_extent_overlap (journal *jnl, uint64_t offset, uint64_t length, uint64_t *end); +/* Mark state in the journal that requests an immediate journal flush upon txn completion */ +int journal_request_immediate_flush (journal *jnl); +int journal_end_transaction(journal *jnl); + +int journal_active(journal *jnl); + +typedef enum journal_flush_options { + JOURNAL_WAIT_FOR_IO = 0x01, // Flush journal and metadata blocks, wait for async IO to complete. + JOURNAL_FLUSH_FULL = 0x02, // Flush track cache to media +} journal_flush_options_t; + +int journal_flush(journal *jnl, journal_flush_options_t options); +void *journal_owner(journal *jnl); // compare against current_thread() +int journal_uses_fua(journal *jnl); +void journal_lock(journal *jnl); +void journal_unlock(journal *jnl); +uint32_t journal_current_txn(journal *jnl); + + +/* + * Relocate the journal. + * + * You provide the new starting offset and size for the journal. You may + * optionally provide a new tbuffer_size; passing zero defaults to not + * changing the tbuffer size except as needed to fit within the new journal + * size. + * + * You must have already started a transaction. The transaction may contain + * modified blocks (such as those needed to deallocate the old journal, + * allocate the new journal, and update the location and size of the journal + * in filesystem-private structures). Any transactions prior to the active + * transaction will be flushed to the old journal. The new journal will be + * initialized, and the blocks from the active transaction will be written to + * the new journal. The caller will need to update the structures that + * identify the location and size of the journal from the callback routine. + */ +int journal_relocate(journal *jnl, off_t offset, off_t journal_size, int32_t tbuffer_size, + errno_t (*callback)(void *), void *callback_arg); + +uint32_t journal_current_txn(journal *jnl); +_Bool hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp); +bool is_journaled(UVFSFileNode *psRootNode); + +__END_DECLS + +#endif /* lf_hfs_journal_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_link.c b/livefiles_hfs_plugin/lf_hfs_link.c new file mode 100644 index 0000000..f765476 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_link.c @@ -0,0 +1,950 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_link.c + * livefiles_hfs + * + * Created by Or Haimovich on 17/05/2018. + */ + +#include "lf_hfs_link.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_vnops.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_btrees_internal.h" +#include "lf_hfs_xattr.h" +#include "lf_hfs_endian.h" +#include "lf_hfs_format.h" +#include "lf_hfs_defs.h" + +/* + * Private directories where hardlink inodes reside. + */ +const char *hfs_private_names[] = { + HFSPLUSMETADATAFOLDER, /* FILE HARDLINKS */ + HFSPLUS_DIR_METADATA_FOLDER /* DIRECTORY HARDLINKS */ +}; + +static int getfirstlink(struct hfsmount * hfsmp, cnid_t fileid, cnid_t *firstlink); +static int setfirstlink(struct hfsmount * hfsmp, cnid_t fileid, cnid_t firstlink); +/* + * Set the first link attribute for a given file id. + * + * The attributes b-tree must already be locked. + * If journaling is enabled, a transaction must already be started. + */ +static int +setfirstlink(struct hfsmount * hfsmp, cnid_t fileid, cnid_t firstlink) +{ + FCB * btfile; + BTreeIterator * iterator; + FSBufferDescriptor btdata; + u_int8_t attrdata[FIRST_LINK_XATTR_REC_SIZE]; + HFSPlusAttrData *dataptr; + int result; + u_int16_t datasize; + + if (hfsmp->hfs_attribute_cp == NULL) { + return (EPERM); + } + iterator = hfs_mallocz(sizeof(*iterator)); + if (iterator == NULL) + return ENOMEM; + + result = hfs_buildattrkey(fileid, FIRST_LINK_XATTR_NAME, (HFSPlusAttrKey *)&iterator->key); + if (result) { + goto out; + } + dataptr = (HFSPlusAttrData *)&attrdata[0]; + dataptr->recordType = kHFSPlusAttrInlineData; + dataptr->reserved[0] = 0; + dataptr->reserved[1] = 0; + + /* + * Since attrData is variable length, we calculate the size of + * attrData by subtracting the size of all other members of + * structure HFSPlusAttData from the size of attrdata. + */ + (void)snprintf((char *)&dataptr->attrData[0], sizeof(dataptr) - (4 * sizeof(uint32_t)), "%lu", (unsigned long)firstlink); + + dataptr->attrSize = (u_int32_t)( 1 + strlen((char *)&dataptr->attrData[0])); + + /* Calculate size of record rounded up to multiple of 2 bytes. */ + datasize = sizeof(HFSPlusAttrData) - 2 + dataptr->attrSize + ((dataptr->attrSize & 1) ? 1 : 0); + + btdata.bufferAddress = dataptr; + btdata.itemSize = datasize; + btdata.itemCount = 1; + + btfile = hfsmp->hfs_attribute_cp->c_datafork; + + /* Insert the attribute. */ + result = BTInsertRecord(btfile, iterator, &btdata, datasize); + if (result == btExists) { + result = BTReplaceRecord(btfile, iterator, &btdata, datasize); + } + (void) BTFlushPath(btfile); +out: + hfs_free(iterator); + + return MacToVFSError(result); +} + +/* + * Get the first link attribute for a given file id. + * + * The attributes b-tree must already be locked. + */ +static int +getfirstlink(struct hfsmount * hfsmp, cnid_t fileid, cnid_t *firstlink) +{ + FCB * btfile; + BTreeIterator * iterator; + FSBufferDescriptor btdata; + u_int8_t attrdata[FIRST_LINK_XATTR_REC_SIZE]; + HFSPlusAttrData *dataptr; + int result = 0; + + if (hfsmp->hfs_attribute_cp == NULL) { + return (EPERM); + } + iterator = hfs_mallocz(sizeof(*iterator)); + if (iterator == NULL) + return ENOMEM; + + result = hfs_buildattrkey(fileid, FIRST_LINK_XATTR_NAME, (HFSPlusAttrKey *)&iterator->key); + if (result) + goto out; + + dataptr = (HFSPlusAttrData *)&attrdata[0]; + + btdata.bufferAddress = dataptr; + btdata.itemSize = sizeof(attrdata); + btdata.itemCount = 1; + + btfile = hfsmp->hfs_attribute_cp->c_datafork; + + result = BTSearchRecord(btfile, iterator, &btdata, NULL, NULL); + if (result) + goto out; + + if (dataptr->attrSize < 3) { + result = ENOENT; + goto out; + } + *firstlink = (cnid_t) strtoul((char*)&dataptr->attrData[0], NULL, 10); +out: + hfs_free(iterator); + + return MacToVFSError(result); +} + +/* Find the oldest / last hardlink in the link chain */ +int +hfs_lookup_lastlink (struct hfsmount *hfsmp, cnid_t linkfileid, cnid_t *lastid, struct cat_desc *cdesc) { + int lockflags; + int error; + + *lastid = 0; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + + error = cat_lookup_lastlink(hfsmp, linkfileid, lastid, cdesc); + + hfs_systemfile_unlock(hfsmp, lockflags); + + /* + * cat_lookup_lastlink will zero out the lastid/cdesc arguments as needed + * upon error cases. + */ + return error; +} + +/* + * Release a specific origin for a directory or file hard link + * + * cnode must be lock on entry + */ +void +hfs_relorigin(struct cnode *cp, cnid_t parentcnid) +{ + linkorigin_t *origin, *prev; + pthread_t thread = pthread_self(); + + TAILQ_FOREACH_SAFE(origin, &cp->c_originlist, lo_link, prev) + { + if (origin->lo_thread == thread) { + TAILQ_REMOVE(&cp->c_originlist, origin, lo_link); + hfs_free(origin); + break; + } else if (origin->lo_parentcnid == parentcnid) { + /* + * If the threads don't match, then we don't want to + * delete the entry because that might cause other threads + * to fall back and use whatever happens to be in + * c_parentcnid or the wrong link ID. By setting the + * values to zero here, it should serve as an indication + * that the path is no longer valid and that's better than + * using a random parent ID or link ID. + */ + origin->lo_parentcnid = 0; + origin->lo_cnid = 0; + } + } +} + +/* + * Remove a link to a hardlink file/dir. + * + * Note: dvp and vp cnodes are already locked. + */ +int +hfs_unlink(struct hfsmount *hfsmp, struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int skip_reserve) +{ + struct cnode *cp; + struct cnode *dcp; + struct cat_desc cndesc; + char inodename[32]; + cnid_t prevlinkid; + cnid_t nextlinkid; + int lockflags = 0; + int started_tr; + int error; + + cp = VTOC(vp); + dcp = VTOC(dvp); + + dcp->c_flag |= C_DIR_MODIFICATION; + + if ((error = hfs_start_transaction(hfsmp)) != 0) { + started_tr = 0; + goto out; + } + started_tr = 1; + + /* + * Protect against a race with rename by using the component + * name passed in and parent id from dvp (instead of using + * the cp->c_desc which may have changed). + * + * Re-lookup the component name so we get the correct cnid + * for the name (as opposed to the c_cnid in the cnode which + * could have changed before the cnode was locked). + */ + cndesc.cd_flags = vnode_isdir(vp) ? CD_ISDIR : 0; + cndesc.cd_encoding = cp->c_desc.cd_encoding; + cndesc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; + cndesc.cd_namelen = cnp->cn_namelen; + cndesc.cd_parentcnid = dcp->c_fileid; + cndesc.cd_hint = dcp->c_childhint; + + lockflags = SFL_CATALOG | SFL_ATTRIBUTE; + if (cndesc.cd_flags & CD_ISDIR) { + /* We'll be removing the alias resource allocation blocks. */ + lockflags |= SFL_BITMAP; + } + lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK); + + if ((error = cat_lookuplink(hfsmp, &cndesc, &cndesc.cd_cnid, &prevlinkid, &nextlinkid))) { + goto out; + } + + /* Reserve some space in the catalog file. */ + if (!skip_reserve && (error = cat_preflight(hfsmp, 2 * CAT_DELETE, NULL))) { + goto out; + } + + /* Purge any cached origin entries for a directory or file hard link. */ + hfs_relorigin(cp, dcp->c_fileid); + if (dcp->c_fileid != dcp->c_cnid) { + hfs_relorigin(cp, dcp->c_cnid); + } + + /* Delete the link record. */ + if ((error = cat_deletelink(hfsmp, &cndesc))) { + goto out; + } + + /* Update the parent directory. */ + if (dcp->c_entries > 0) { + dcp->c_entries--; + } + if (cndesc.cd_flags & CD_ISDIR) { + DEC_FOLDERCOUNT(hfsmp, dcp->c_attr); + } + dcp->c_dirchangecnt++; + hfs_incr_gencount(dcp); + + struct timeval tv; + microtime(&tv); + dcp->c_touch_chgtime = dcp->c_touch_modtime = true; + dcp->c_flag |= C_MODIFIED; + hfs_update(dcp->c_vp, 0); + + /* + * If this is the last link then we need to process the inode. + * Otherwise we need to fix up the link chain. + */ + --cp->c_linkcount; + if (cp->c_linkcount < 1) { + char delname[32]; + struct cat_desc to_desc; + struct cat_desc from_desc; + + /* + * If a file inode or directory inode is being deleted, rename + * it to an open deleted file. This ensures that deletion + * of inode and its corresponding extended attributes does + * not overflow the journal. This inode will be deleted + * either in hfs_vnop_inactive() or in hfs_remove_orphans(). + * Note: a rename failure here is not fatal. + */ + bzero(&from_desc, sizeof(from_desc)); + bzero(&to_desc, sizeof(to_desc)); + if (vnode_isdir(vp)) { + if (cp->c_entries != 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs_unlink: dir not empty (id %d, %d entries)", cp->c_fileid, cp->c_entries); + hfs_assert(0); + } + MAKE_DIRINODE_NAME(inodename, sizeof(inodename), + cp->c_attr.ca_linkref); + from_desc.cd_parentcnid = hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid; + from_desc.cd_flags = CD_ISDIR; + to_desc.cd_flags = CD_ISDIR; + } else { + MAKE_INODE_NAME(inodename, sizeof(inodename), + cp->c_attr.ca_linkref); + from_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; + from_desc.cd_flags = 0; + to_desc.cd_flags = 0; + } + from_desc.cd_nameptr = (const u_int8_t *)inodename; + from_desc.cd_namelen = strlen(inodename); + from_desc.cd_cnid = cp->c_fileid; + + MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid); + to_desc.cd_nameptr = (const u_int8_t *)delname; + to_desc.cd_namelen = strlen(delname); + to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; + to_desc.cd_cnid = cp->c_fileid; + + error = cat_rename(hfsmp, &from_desc, &hfsmp->hfs_private_desc[FILE_HARDLINKS], + &to_desc, (struct cat_desc *)NULL); + if (error == 0) { + cp->c_flag |= C_DELETED; + cp->c_attr.ca_recflags &= ~kHFSHasLinkChainMask; + cp->c_attr.ca_firstlink = 0; + if (vnode_isdir(vp)) { + hfsmp->hfs_private_attr[DIR_HARDLINKS].ca_entries--; + DEC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[DIR_HARDLINKS]); + + hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++; + INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]); + + (void)cat_update(hfsmp, &hfsmp->hfs_private_desc[DIR_HARDLINKS], + &hfsmp->hfs_private_attr[DIR_HARDLINKS], NULL, NULL); + (void)cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS], + &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL); + } + } else { + error = 0; /* rename failure here is not fatal */ + } + } else /* Still some links left */ { + cnid_t firstlink = 0; + + /* + * Update the start of the link chain. + * Note: Directory hard links store the first link in an attribute. + */ + if (IS_DIR(vp) && + getfirstlink(hfsmp, cp->c_fileid, &firstlink) == 0 && + firstlink == cndesc.cd_cnid) { + if (setfirstlink(hfsmp, cp->c_fileid, nextlinkid) == 0) + cp->c_attr.ca_recflags |= kHFSHasAttributesMask; + } else if (cp->c_attr.ca_firstlink == cndesc.cd_cnid) { + cp->c_attr.ca_firstlink = nextlinkid; + } + /* Update previous link. */ + if (prevlinkid) { + (void) cat_update_siblinglinks(hfsmp, prevlinkid, HFS_IGNORABLE_LINK, nextlinkid); + } + /* Update next link. */ + if (nextlinkid) { + (void) cat_update_siblinglinks(hfsmp, nextlinkid, prevlinkid, HFS_IGNORABLE_LINK); + } + } + + /* + * The call to cat_releasedesc below will only release the name + * buffer; it does not zero out the rest of the fields in the + * 'cat_desc' data structure. + * + * As a result, since there are still other links at this point, + * we need to make the current cnode descriptor point to the raw + * inode. If a path-based system call comes along first, it will + * replace the descriptor with a valid link ID. If a userland + * process already has a file descriptor open, then they will + * bypass that lookup, though. Replacing the descriptor CNID with + * the raw inode will force it to generate a new full path. + */ + cp->c_cnid = cp->c_fileid; + + /* Push new link count to disk. */ + cp->c_ctime = tv.tv_sec; + (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL); + + /* All done with the system files. */ + hfs_systemfile_unlock(hfsmp, lockflags); + lockflags = 0; + + /* Update file system stats. */ + hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID)); + + /* + * All done with this cnode's descriptor... + * + * Note: all future catalog calls for this cnode may be + * by fileid only. This is OK for HFS (which doesn't have + * file thread records) since HFS doesn't support hard links. + */ + cat_releasedesc(&cp->c_desc); + +out: + if (lockflags) { + hfs_systemfile_unlock(hfsmp, lockflags); + } + if (started_tr) { + hfs_end_transaction(hfsmp); + } + + dcp->c_flag &= ~C_DIR_MODIFICATION; + //TBD - We have wakeup here but can't see anyone who's msleeping on c_flag... + //wakeup((caddr_t)&dcp->c_flag); + + return (error); +} + +/* + * Cache the origin of a directory or file hard link + * + * cnode must be lock on entry + */ +void +hfs_savelinkorigin(cnode_t *cp, cnid_t parentcnid) +{ + linkorigin_t *origin = NULL, *next = NULL; + pthread_t thread = pthread_self(); + int count = 0; + int maxorigins = (S_ISDIR(cp->c_mode)) ? MAX_CACHED_ORIGINS : MAX_CACHED_FILE_ORIGINS; + /* + * Look for an existing origin first. If not found, create/steal one. + */ + TAILQ_FOREACH_SAFE(origin, &cp->c_originlist, lo_link, next) { + ++count; + if (origin->lo_thread == thread) { + TAILQ_REMOVE(&cp->c_originlist, origin, lo_link); + break; + } + } + if (origin == NULL) { + /* Recycle the last (i.e., the oldest) if we have too many. */ + if (count > maxorigins) { + origin = TAILQ_LAST(&cp->c_originlist, hfs_originhead); + TAILQ_REMOVE(&cp->c_originlist, origin, lo_link); + } else { + origin = hfs_malloc(sizeof(linkorigin_t)); + } + origin->lo_thread = thread; + } + origin->lo_cnid = cp->c_cnid; + origin->lo_parentcnid = parentcnid; + TAILQ_INSERT_HEAD(&cp->c_originlist, origin, lo_link); +} + +/* + * Initialize the HFS+ private system directories. + * + * These directories are used to hold the inodes + * for file and directory hardlinks as well as + * open-unlinked files. + * + * If they don't yet exist they will get created. + * + * This call is assumed to be made during mount. + */ +void +hfs_privatedir_init(struct hfsmount * hfsmp, enum privdirtype type) +{ + struct vnode * dvp = NULL; + struct cnode * dcp = NULL; + struct cat_desc *priv_descp; + struct cat_attr *priv_attrp; + struct timeval tv; + int lockflags; + int trans = 0; + int error; + + priv_descp = &hfsmp->hfs_private_desc[type]; + priv_attrp = &hfsmp->hfs_private_attr[type]; + + /* Check if directory already exists. */ + if (priv_descp->cd_cnid != 0) { + return; + } + + priv_descp->cd_parentcnid = kRootDirID; + priv_descp->cd_nameptr = (const u_int8_t *)hfs_private_names[type]; + priv_descp->cd_namelen = strlen((const char *)priv_descp->cd_nameptr); + priv_descp->cd_flags = CD_ISDIR | CD_DECOMPOSED; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + error = cat_lookup(hfsmp, priv_descp, 0, NULL, priv_attrp, NULL, NULL); + + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error == 0) { + if (type == FILE_HARDLINKS) { + hfsmp->hfs_metadata_createdate = (uint32_t) priv_attrp->ca_itime; + } + priv_descp->cd_cnid = priv_attrp->ca_fileid; + goto exit; + } + + /* Directory is missing, if this is read-only then we're done. */ + if (hfsmp->hfs_flags & HFS_READ_ONLY) { + goto exit; + } + + /* Grab the root directory so we can update it later. */ + if (hfs_vget(hfsmp, kRootDirID, &dvp, 0, 0) != 0) { + goto exit; + } + + dcp = VTOC(dvp); + + /* Setup the default attributes */ + bzero(priv_attrp, sizeof(struct cat_attr)); + priv_attrp->ca_flags = UF_IMMUTABLE | UF_HIDDEN; + priv_attrp->ca_mode = S_IFDIR; + if (type == DIR_HARDLINKS) { + priv_attrp->ca_mode |= S_ISVTX | S_IRUSR | S_IXUSR | S_IRGRP | + S_IXGRP | S_IROTH | S_IXOTH; + } + priv_attrp->ca_linkcount = 1; + priv_attrp->ca_itime = hfsmp->hfs_itime; + priv_attrp->ca_recflags = kHFSHasFolderCountMask; + + //TBD - Probebly need to adjust for files app and not for finder.... + struct FndrDirInfo * fndrinfo; + fndrinfo = (struct FndrDirInfo *)&priv_attrp->ca_finderinfo; + fndrinfo->frLocation.v = SWAP_BE16(16384); + fndrinfo->frLocation.h = SWAP_BE16(16384); + fndrinfo->frFlags = SWAP_BE16(kIsInvisible + kNameLocked); + + if (hfs_start_transaction(hfsmp) != 0) { + goto exit; + } + trans = 1; + + /* Need the catalog and EA b-trees for CNID acquisition */ + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK); + + /* Make sure there's space in the Catalog file. */ + if (cat_preflight(hfsmp, CAT_CREATE, NULL) != 0) { + hfs_systemfile_unlock(hfsmp, lockflags); + goto exit; + } + + /* Get the CNID for use */ + cnid_t new_id; + if ((error = cat_acquire_cnid(hfsmp, &new_id))) { + hfs_systemfile_unlock (hfsmp, lockflags); + goto exit; + } + + /* Create the private directory on disk. */ + error = cat_create(hfsmp, new_id, priv_descp, priv_attrp, NULL); + if (error == 0) { + priv_descp->cd_cnid = priv_attrp->ca_fileid; + + /* Update the parent directory */ + dcp->c_entries++; + INC_FOLDERCOUNT(hfsmp, dcp->c_attr); + dcp->c_dirchangecnt++; + hfs_incr_gencount(dcp); + microtime(&tv); + dcp->c_ctime = tv.tv_sec; + dcp->c_mtime = tv.tv_sec; + (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL); + } + + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error) { + goto exit; + } + if (type == FILE_HARDLINKS) { + hfsmp->hfs_metadata_createdate = (uint32_t) priv_attrp->ca_itime; + } + hfs_volupdate(hfsmp, VOL_MKDIR, 1); +exit: + if (trans) { + hfs_end_transaction(hfsmp); + } + if (dvp) { + hfs_unlock(dcp); + hfs_vnop_reclaim(dvp); + } + + //Curently disable -need to understand how much we need this... +// if ((error == 0) && (type == DIR_HARDLINKS)) { +// hfs_xattr_init(hfsmp); +// } +} + +/* + * Release any cached origins for a directory or file hard link + * + * cnode must be lock on entry + */ +void +hfs_relorigins(struct cnode *cp) +{ + linkorigin_t *origin, *prev; + + TAILQ_FOREACH_SAFE(origin, &cp->c_originlist, lo_link, prev) { + hfs_free(origin); + } + TAILQ_INIT(&cp->c_originlist); +} + +/* + * Obtain the current parent cnid of a directory or file hard link + * + * cnode must be lock on entry + */ +cnid_t +hfs_currentparent(cnode_t *cp, bool have_lock) +{ + if (cp->c_flag & C_HARDLINK) { + if (!have_lock) + hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_ALWAYS); + + linkorigin_t *origin; + pthread_t thread = pthread_self(); + + TAILQ_FOREACH(origin, &cp->c_originlist, lo_link) { + if (origin->lo_thread == thread) { + if (!have_lock) + hfs_unlock(cp); + return (origin->lo_parentcnid); + } + } + + if (!have_lock) + hfs_unlock(cp); + } + return (cp->c_parentcnid); +} + +/* + * Create a new catalog link record + * + * An indirect link is a reference to an inode (the real + * file or directory record). + * + * All the indirect links for a given inode are chained + * together in a doubly linked list. + * + * Pre-Leopard file hard links do not have kHFSHasLinkChainBit + * set and do not have first/prev/next link IDs i.e. the values + * are zero. If a new link is being added to an existing + * pre-Leopard file hard link chain, do not set kHFSHasLinkChainBit. + */ +static int +createindirectlink(struct hfsmount *hfsmp, u_int32_t linknum, struct cat_desc *descp, + cnid_t nextcnid, cnid_t *linkcnid, int is_inode_linkchain_set) +{ + struct FndrFileInfo *fip; + struct cat_attr attr; + + if (linknum == 0) { + LFHFS_LOG(LEVEL_ERROR, "createindirectlink: linknum is zero!\n"); + return (EINVAL); + } + + /* Setup the default attributes */ + bzero(&attr, sizeof(attr)); + + /* Links are matched to inodes by link ID and to volumes by create date */ + attr.ca_linkref = linknum; + attr.ca_itime = hfsmp->hfs_metadata_createdate; + attr.ca_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH; + attr.ca_recflags = kHFSHasLinkChainMask | kHFSThreadExistsMask; + attr.ca_flags = UF_IMMUTABLE; + fip = (struct FndrFileInfo *)&attr.ca_finderinfo; + + if (descp->cd_flags & CD_ISDIR) { + fip->fdType = SWAP_BE32 (kHFSAliasType); + fip->fdCreator = SWAP_BE32 (kHFSAliasCreator); + fip->fdFlags = SWAP_BE16 (kIsAlias); + } else /* file */ { + fip->fdType = SWAP_BE32 (kHardLinkFileType); + fip->fdCreator = SWAP_BE32 (kHFSPlusCreator); + fip->fdFlags = SWAP_BE16 (kHasBeenInited); + /* If the file inode does not have kHFSHasLinkChainBit set + * and the next link chain ID is zero, assume that this + * is pre-Leopard file inode. Therefore clear the bit. + */ + if ((is_inode_linkchain_set == 0) && (nextcnid == 0)) { + attr.ca_recflags &= ~kHFSHasLinkChainMask; + } + } + /* Create the indirect link directly in the catalog */ + return cat_createlink(hfsmp, descp, &attr, nextcnid, linkcnid); +} + + +/* + * Make a link to the cnode cp in the directory dp + * using the name in cnp. src_vp is the vnode that + * corresponds to 'cp' which was part of the arguments to + * hfs_vnop_link. + * + * The cnodes cp and dcp must be locked. + */ +int +hfs_makelink(struct hfsmount *hfsmp, struct vnode *src_vp, struct cnode *cp,struct cnode *dcp, struct componentname *cnp) +{ + u_int32_t indnodeno = 0; + char inodename[32]; + struct cat_desc to_desc; + struct cat_desc link_desc; + int newlink = 0; + int retval = 0; + cnid_t linkcnid = 0; + cnid_t orig_firstlink = 0; + enum privdirtype type = S_ISDIR(cp->c_mode) ? DIR_HARDLINKS : FILE_HARDLINKS; + + if (hfsmp->cur_link_id == 0) { + hfsmp->cur_link_id = ((random() & 0x3fffffff) + 100); + } + + /* We don't allow link nodes in our private system directories. */ + if (dcp->c_fileid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid || + dcp->c_fileid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) { + return (EPERM); + } + + cat_cookie_t cookie; + bzero(&cookie, sizeof(cat_cookie_t)); + /* Reserve some space in the Catalog file. */ + if ((retval = cat_preflight(hfsmp, (2 * CAT_CREATE)+ CAT_RENAME, &cookie))) { + return (retval); + } + + int lockflags = SFL_CATALOG | SFL_ATTRIBUTE; + /* Directory hard links allocate space for a symlink. */ + if (type == DIR_HARDLINKS) { + lockflags |= SFL_BITMAP; + } + lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK); + + /* Save the current cnid value so we restore it if an error occurs. */ + cnid_t orig_cnid = cp->c_desc.cd_cnid; + + /* + * If this is a new hardlink then we need to create the inode + * and replace the original file/dir object with a link node. + */ + if ((cp->c_linkcount == 2) && !(cp->c_flag & C_HARDLINK)) { + newlink = 1; + bzero(&to_desc, sizeof(to_desc)); + to_desc.cd_parentcnid = hfsmp->hfs_private_desc[type].cd_cnid; + to_desc.cd_cnid = cp->c_fileid; + to_desc.cd_flags = (type == DIR_HARDLINKS) ? CD_ISDIR : 0; + + do { + if (type == DIR_HARDLINKS) { + /* Directory hardlinks always use the cnid. */ + indnodeno = cp->c_fileid; + MAKE_DIRINODE_NAME(inodename, sizeof(inodename), + indnodeno); + } else { + /* Get a unique indirect node number */ + if (retval == 0) { + indnodeno = cp->c_fileid; + } else { + indnodeno = hfsmp->cur_link_id++; + } + MAKE_INODE_NAME(inodename, sizeof(inodename), + indnodeno); + } + /* Move original file/dir to data node directory */ + to_desc.cd_nameptr = (const u_int8_t *)inodename; + to_desc.cd_namelen = strlen(inodename); + + retval = cat_rename(hfsmp, &cp->c_desc, &hfsmp->hfs_private_desc[type], + &to_desc, NULL); + + if (retval != 0 && retval != EEXIST) { + LFHFS_LOG(LEVEL_ERROR, "hfs_makelink: cat_rename to %s failed (%d) fileid=%d, vol=%s\n", + inodename, retval, cp->c_fileid, hfsmp->vcbVN); + } + } while ((retval == EEXIST) && (type == FILE_HARDLINKS)); + if (retval) + goto out; + + /* + * Replace original file/dir with a link record. + */ + + bzero(&link_desc, sizeof(link_desc)); + link_desc.cd_nameptr = cp->c_desc.cd_nameptr; + link_desc.cd_namelen = cp->c_desc.cd_namelen; + link_desc.cd_parentcnid = cp->c_parentcnid; + link_desc.cd_flags = S_ISDIR(cp->c_mode) ? CD_ISDIR : 0; + + retval = createindirectlink(hfsmp, indnodeno, &link_desc, 0, &linkcnid, true); + if (retval) + { + int err; + + /* Restore the cnode's cnid. */ + cp->c_desc.cd_cnid = orig_cnid; + + /* Put the original file back. */ + err = cat_rename(hfsmp, &to_desc, &dcp->c_desc, &cp->c_desc, NULL); + if (err) { + if (err != EIO && err != ENXIO) + LFHFS_LOG(LEVEL_ERROR, "hfs_makelink: error %d from cat_rename backout 1", err); + hfs_mark_inconsistent(hfsmp, HFS_ROLLBACK_FAILED); + } + if (retval != EIO && retval != ENXIO) { + LFHFS_LOG(LEVEL_ERROR, "hfs_makelink: createindirectlink (1) failed: %d\n", retval); + retval = EIO; + } + goto out; + } + cp->c_attr.ca_linkref = indnodeno; + cp->c_desc.cd_cnid = linkcnid; + /* Directory hard links store the first link in an attribute. */ + if (type == DIR_HARDLINKS) { + if (setfirstlink(hfsmp, cp->c_fileid, linkcnid) == 0) + cp->c_attr.ca_recflags |= kHFSHasAttributesMask; + } else /* FILE_HARDLINKS */ { + cp->c_attr.ca_firstlink = linkcnid; + } + cp->c_attr.ca_recflags |= kHFSHasLinkChainMask; + } else { + indnodeno = cp->c_attr.ca_linkref; + } + + /* + * Create a catalog entry for the new link (parentID + name). + */ + + bzero(&link_desc, sizeof(link_desc)); + link_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; + link_desc.cd_namelen = strlen(cnp->cn_nameptr); + link_desc.cd_parentcnid = dcp->c_fileid; + link_desc.cd_flags = S_ISDIR(cp->c_mode) ? CD_ISDIR : 0; + + /* Directory hard links store the first link in an attribute. */ + if (type == DIR_HARDLINKS) { + retval = getfirstlink(hfsmp, cp->c_fileid, &orig_firstlink); + } else /* FILE_HARDLINKS */ { + orig_firstlink = cp->c_attr.ca_firstlink; + } + if (retval == 0) + retval = createindirectlink(hfsmp, indnodeno, &link_desc, orig_firstlink, &linkcnid, (cp->c_attr.ca_recflags & kHFSHasLinkChainMask)); + + if (retval && newlink) { + int err; + + /* Get rid of new link */ + (void) cat_delete(hfsmp, &cp->c_desc, &cp->c_attr); + + /* Restore the cnode's cnid. */ + cp->c_desc.cd_cnid = orig_cnid; + + /* Put the original file back. */ + err = cat_rename(hfsmp, &to_desc, &dcp->c_desc, &cp->c_desc, NULL); + if (err) { + if (err != EIO && err != ENXIO) + LFHFS_LOG(LEVEL_ERROR, "hfs_makelink: error %d from cat_rename backout 2", err); + hfs_mark_inconsistent(hfsmp, HFS_ROLLBACK_FAILED); + } + + cp->c_attr.ca_linkref = 0; + + if (retval != EIO && retval != ENXIO) { + LFHFS_LOG(LEVEL_ERROR, "hfs_makelink: createindirectlink (2) failed: %d\n", retval); + retval = EIO; + } + goto out; + } else if (retval == 0) { + + /* Update the original first link to point back to the new first link. */ + if (cp->c_attr.ca_recflags & kHFSHasLinkChainMask) { + (void) cat_update_siblinglinks(hfsmp, orig_firstlink, linkcnid, HFS_IGNORABLE_LINK); + + /* Update the inode's first link value. */ + if (type == DIR_HARDLINKS) { + if (setfirstlink(hfsmp, cp->c_fileid, linkcnid) == 0) + cp->c_attr.ca_recflags |= kHFSHasAttributesMask; + } else { + cp->c_attr.ca_firstlink = linkcnid; + } + } + /* + * Finally, if this is a new hardlink then: + * - update the private system directory + * - mark the cnode as a hard link + */ + if (newlink) { + + hfsmp->hfs_private_attr[type].ca_entries++; + /* From application perspective, directory hard link is a + * normal directory. Therefore count the new directory + * hard link for folder count calculation. + */ + if (type == DIR_HARDLINKS) { + INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[type]); + } + retval = cat_update(hfsmp, &hfsmp->hfs_private_desc[type], &hfsmp->hfs_private_attr[type], NULL, NULL); + if (retval) { + if (retval != EIO && retval != ENXIO) { + LFHFS_LOG(LEVEL_ERROR, "hfs_makelink: cat_update of privdir failed! (%d)\n", retval); + retval = EIO; + } + hfs_mark_inconsistent(hfsmp, HFS_OP_INCOMPLETE); + } + cp->c_flag |= C_HARDLINK; + + vnode_t vp; + if ((vp = cp->c_vp) != NULL) { + if (vp != src_vp) { + cp->c_flag |= C_NEED_DVNODE_PUT; + } + } + if ((vp = cp->c_rsrc_vp) != NULL) { + if (vp != src_vp) { + cp->c_flag |= C_NEED_RVNODE_PUT; + } + } + cp->c_flag |= C_MODIFIED; + cp->c_touch_chgtime = TRUE; + } + } +out: + hfs_systemfile_unlock(hfsmp, lockflags); + + cat_postflight(hfsmp, &cookie); + + if (retval == 0 && newlink) { + hfs_volupdate(hfsmp, VOL_MKFILE, 0); + } + return (retval); +} diff --git a/livefiles_hfs_plugin/lf_hfs_link.h b/livefiles_hfs_plugin/lf_hfs_link.h new file mode 100644 index 0000000..7245d41 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_link.h @@ -0,0 +1,24 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_link.h + * livefiles_hfs + * + * Created by Or Haimovich on 17/05/2018. + */ + +#ifndef lf_hfs_link_h +#define lf_hfs_link_h + +#include "lf_hfs_catalog.h" +#include "lf_hfs_cnode.h" +#include "lf_hfs.h" + +void hfs_relorigin(struct cnode *cp, cnid_t parentcnid); +void hfs_savelinkorigin(cnode_t *cp, cnid_t parentcnid); +void hfs_privatedir_init(struct hfsmount * hfsmp, enum privdirtype type); +int hfs_lookup_lastlink (struct hfsmount *hfsmp, cnid_t linkfileid, cnid_t *lastid, struct cat_desc *cdesc); +int hfs_unlink(struct hfsmount *hfsmp, struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int skip_reserve); +void hfs_relorigins(struct cnode *cp); +int hfs_makelink(struct hfsmount *hfsmp, struct vnode *src_vp, struct cnode *cp,struct cnode *dcp, struct componentname *cnp); +cnid_t hfs_currentparent(cnode_t *cp, bool have_lock); +#endif /* lf_hfs_link_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_locks.c b/livefiles_hfs_plugin/lf_hfs_locks.c new file mode 100644 index 0000000..b39ffad --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_locks.c @@ -0,0 +1,186 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_locks.c + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#include "lf_hfs_locks.h" +#include +#include + +void lf_lck_rw_init( pthread_rwlock_t* lck ) +{ + errno_t err = pthread_rwlock_init( lck, NULL ); + assert( err == 0 ); +} + +void lf_lck_rw_destroy( pthread_rwlock_t* lck ) +{ + errno_t err = pthread_rwlock_destroy( lck ); + assert( err == 0 ); +} + +void lf_lck_rw_unlock_shared( pthread_rwlock_t* lck ) +{ + errno_t err = pthread_rwlock_unlock( lck ); + assert( err == 0 ); +} + +void lf_lck_rw_lock_shared( pthread_rwlock_t* lck ) +{ + errno_t err = pthread_rwlock_rdlock( lck ); + assert( err == 0 ); +} + +void lf_lck_rw_lock_exclusive( pthread_rwlock_t* lck ) +{ + errno_t err = pthread_rwlock_wrlock( lck ); + assert( err == 0 ); +} + +void lf_lck_rw_unlock_exclusive( pthread_rwlock_t* lck ) +{ + errno_t err = pthread_rwlock_unlock( lck ); + assert( err == 0 ); +} + +bool lf_lck_rw_try_lock( pthread_rwlock_t* lck, lck_rwlock_type_e which ) +{ + bool trylock; + + if ( which == LCK_RW_TYPE_SHARED ) + { + trylock = pthread_rwlock_tryrdlock( lck ); + } + else if ( which == LCK_RW_TYPE_EXCLUSIVE ) + { + trylock = pthread_rwlock_trywrlock( lck ); + } + else + { + assert(0); + } + + return trylock; +} + +void lf_lck_rw_lock_exclusive_to_shared( pthread_rwlock_t* lck) +{ + lf_lck_rw_unlock_exclusive( lck ); + lf_lck_rw_lock_shared( lck ); +} + +bool lf_lck_rw_lock_shared_to_exclusive( pthread_rwlock_t* lck) +{ + lf_lck_rw_unlock_shared( lck ); + lf_lck_rw_lock_exclusive( lck ); + + return true; +} + +void lf_cond_init( pthread_cond_t* cond ) +{ + errno_t err = pthread_cond_init( cond, NULL ); + assert( err == 0 ); +} + +void lf_cond_destroy( pthread_cond_t* cond ) +{ + errno_t err = pthread_cond_destroy( cond ); + assert( err == 0 ); +} + +int lf_cond_wait_relative(pthread_cond_t *pCond, pthread_mutex_t *pMutex, struct timespec *pTime) { + + int iErr = pthread_cond_timedwait_relative_np(pCond, pMutex, pTime); + assert((iErr == 0) || (iErr == ETIMEDOUT)); + return(iErr); +} + +void lf_cond_wakeup(pthread_cond_t *pCond) { + + int iErr = pthread_cond_signal(pCond); + assert(iErr == 0); +} + +void lf_lck_mtx_init( pthread_mutex_t* lck ) +{ + errno_t err = pthread_mutex_init( lck, NULL ); + assert( err == 0 ); +} + +void lf_lck_mtx_destroy( pthread_mutex_t *lck ) +{ + errno_t err = pthread_mutex_destroy( lck ); + assert( err == 0 ); +} + +void lf_lck_mtx_lock( pthread_mutex_t* lck ) +{ + errno_t err = pthread_mutex_lock( lck ); + assert( err == 0 ); +} + +void lf_lck_mtx_unlock( pthread_mutex_t* lck ) +{ + errno_t err = pthread_mutex_unlock( lck ); + assert( err == 0 ); +} + +void lf_lck_mtx_lock_spin( pthread_mutex_t *lck ) +{ + // No real spin lock + lf_lck_mtx_lock( lck ); +} + +int lf_lck_mtx_try_lock(pthread_mutex_t *lck) { + errno_t err = pthread_mutex_trylock(lck); + return err; +} + +//void lf_lck_mtx_convert_spin( pthread_mutex_t *lck ) +//{ +// // No real spin lock +//} + +void lf_lck_spin_init( pthread_mutex_t *lck ) +{ + errno_t err = pthread_mutex_init( lck, NULL ); + assert( err == 0 ); +} + +void lf_lck_spin_destroy( pthread_mutex_t *lck ) +{ + errno_t err = pthread_mutex_destroy( lck ); + assert( err == 0 ); +} + +void lf_lck_spin_lock( pthread_mutex_t *lck ) +{ + errno_t err = pthread_mutex_lock( lck ); + assert( err == 0 ); +} + +void lf_lck_spin_unlock( pthread_mutex_t *lck ) +{ + errno_t err = pthread_mutex_unlock( lck ); + assert( err == 0 ); +} + +lck_attr_t *lf_lck_attr_alloc_init( void ) +{ + static lck_attr_t attr = {0}; + return &attr; +} +lck_grp_attr_t *lf_lck_grp_attr_alloc_init( void ) +{ + static lck_grp_attr_t group_attr = {0}; + return &group_attr; +} +lck_grp_t *lf_lck_grp_alloc_init( void ) +{ + static lck_grp_t group = {0}; + return &group; +} diff --git a/livefiles_hfs_plugin/lf_hfs_locks.h b/livefiles_hfs_plugin/lf_hfs_locks.h new file mode 100644 index 0000000..442aa5f --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_locks.h @@ -0,0 +1,65 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_locks.h + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#ifndef lf_hfs_locks_h +#define lf_hfs_locks_h + +#include +#include +#include +#include + +typedef enum +{ + LCK_RW_TYPE_SHARED, + LCK_RW_TYPE_EXCLUSIVE + +} lck_rwlock_type_e; + +typedef uint8_t lck_attr_t; +typedef uint8_t lck_grp_attr_t; +typedef uint8_t lck_grp_t; + +// Read/Write locks. +void lf_lck_rw_init ( pthread_rwlock_t* lck ); +void lf_lck_rw_destroy ( pthread_rwlock_t* lck ); +void lf_lck_rw_unlock_shared ( pthread_rwlock_t* lck ); +void lf_lck_rw_lock_shared ( pthread_rwlock_t* lck ); +void lf_lck_rw_lock_exclusive ( pthread_rwlock_t* lck ); +void lf_lck_rw_unlock_exclusive ( pthread_rwlock_t* lck ); +bool lf_lck_rw_try_lock ( pthread_rwlock_t* lck, lck_rwlock_type_e which ); +void lf_lck_rw_lock_exclusive_to_shared ( pthread_rwlock_t* lck); +bool lf_lck_rw_lock_shared_to_exclusive ( pthread_rwlock_t* lck); + +// Mutex locks. +void lf_lck_mtx_init ( pthread_mutex_t* lck ); +void lf_lck_mtx_destroy ( pthread_mutex_t *lck ); +void lf_lck_mtx_lock ( pthread_mutex_t* lck ); +void lf_lck_mtx_unlock ( pthread_mutex_t* lck ); +void lf_lck_mtx_lock_spin ( pthread_mutex_t *lck ); +int lf_lck_mtx_try_lock ( pthread_mutex_t *lck ); +void lf_lck_mtx_convert_spin ( pthread_mutex_t *lck ); + +//Cond +void lf_cond_destroy( pthread_cond_t* cond ); +void lf_cond_init( pthread_cond_t* cond ); +int lf_cond_wait_relative(pthread_cond_t *pCond, pthread_mutex_t *pMutex, struct timespec *pTime); +void lf_cond_wakeup(pthread_cond_t *pCond); + +// Spin locks. +void lf_lck_spin_init ( pthread_mutex_t *lck ); +void lf_lck_spin_destroy ( pthread_mutex_t *lck ); +void lf_lck_spin_lock ( pthread_mutex_t *lck ); +void lf_lck_spin_unlock ( pthread_mutex_t *lck ); + +// Init +lck_attr_t *lf_lck_attr_alloc_init ( void ); +lck_grp_attr_t *lf_lck_grp_attr_alloc_init ( void ); +lck_grp_t *lf_lck_grp_alloc_init ( void ); + +#endif /* lf_hfs_locks_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_logger.c b/livefiles_hfs_plugin/lf_hfs_logger.c new file mode 100644 index 0000000..208fef0 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_logger.c @@ -0,0 +1,33 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_logger.c + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 31/12/2017. +*/ + +#include "lf_hfs_logger.h" + +os_log_t gpsHFSLog; +bool gbIsLoggerInit = false; + +const os_log_type_t gpeHFSToOsLevel [LEVEL_AMOUNT] = { + [ LEVEL_DEBUG ] = OS_LOG_TYPE_DEBUG, + [ LEVEL_DEFAULT ] = OS_LOG_TYPE_DEFAULT, + [ LEVEL_ERROR ] = OS_LOG_TYPE_ERROR +}; + +int +LFHFS_LoggerInit( void ) +{ + int iErr = 0; + if ( (gpsHFSLog = os_log_create("com.apple.filesystems.livefiles_hfs_plugin", "plugin")) == NULL) { + iErr = 1; + } + else + { + gbIsLoggerInit = true; + } + + return iErr; +} diff --git a/livefiles_hfs_plugin/lf_hfs_logger.h b/livefiles_hfs_plugin/lf_hfs_logger.h new file mode 100644 index 0000000..5ea1a2d --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_logger.h @@ -0,0 +1,38 @@ +/* Copyright © 2018 Apple Inc. All rights reserved. + * + * lf_hfs_logger.h + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 20/03/2018. + */ + +#ifndef lf_hfs_logger_h +#define lf_hfs_logger_h + +#include + +typedef enum +{ + LEVEL_DEBUG, + LEVEL_DEFAULT, + LEVEL_ERROR, + LEVEL_AMOUNT, + +} HFSLogLevel_e; + +extern os_log_t gpsHFSLog; +extern const os_log_type_t gpeHFSToOsLevel[ LEVEL_AMOUNT ]; +extern bool gbIsLoggerInit; + + +#define LFHFS_LOG( _level, ... ) \ + do { \ + if ( gbIsLoggerInit ) \ + { \ + os_log_with_type((gpsHFSLog), gpeHFSToOsLevel[_level], ##__VA_ARGS__); \ + } \ + } while(0) + +int LFHFS_LoggerInit( void ); + +#endif /* lf_hfs_logger_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_lookup.c b/livefiles_hfs_plugin/lf_hfs_lookup.c new file mode 100644 index 0000000..dd4b4bd --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_lookup.c @@ -0,0 +1,254 @@ +// +// lf_hfs_lookup.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 25/03/2018. +// + +#include "lf_hfs.h" +#include "lf_hfs_lookup.h" +#include "lf_hfs_cnode.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_link.h" + +static int +hfs_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, int *cnode_locked) +{ + struct cnode *dcp; /* cnode for directory being searched */ + struct vnode *tvp; /* target vnode */ + struct hfsmount *hfsmp; + int flags; + int nameiop; + int retval = 0; + struct cat_desc desc; + struct cat_desc cndesc; + struct cat_attr attr; + struct cat_fork fork; + int lockflags; + int newvnode_flags = 0; + +retry: + newvnode_flags = 0; + dcp = NULL; + hfsmp = VTOHFS(dvp); + *vpp = NULL; + *cnode_locked = 0; + tvp = NULL; + nameiop = cnp->cn_nameiop; + flags = cnp->cn_flags; + bzero(&desc, sizeof(desc)); + + if (hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) { + retval = ENOENT; /* The parent no longer exists ? */ + goto exit; + } + dcp = VTOC(dvp); + + /* + * Need to understand if we need this check.. as we took exclusive lock.. + */ + if (dcp->c_flag & C_DIR_MODIFICATION){ + hfs_unlock(dcp); + usleep( 1000 ); + goto retry; + } + + /* + * We shouldn't need to go to the catalog if there are no children. + * However, in the face of a minor disk corruption where the valence of + * the directory is off, we could infinite loop here if we return ENOENT + * even though there are actually items in the directory. (create will + * see the ENOENT, try to create something, which will return with + * EEXIST over and over again). As a result, always check the catalog. + */ + + bzero(&cndesc, sizeof(cndesc)); + cndesc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; + cndesc.cd_namelen = cnp->cn_namelen; + cndesc.cd_parentcnid = dcp->c_fileid; + cndesc.cd_hint = dcp->c_childhint; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + retval = cat_lookup(hfsmp, &cndesc, 0, &desc, &attr, &fork, NULL); + hfs_systemfile_unlock(hfsmp, lockflags); + + if (retval == 0) { + dcp->c_childhint = desc.cd_hint; + /* + * Note: We must drop the parent lock here before calling + * hfs_getnewvnode (which takes the child lock). + */ + hfs_unlock(dcp); + dcp = NULL; + + /* Verify that the item just looked up isn't one of the hidden directories. */ + if (desc.cd_cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid || + desc.cd_cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) { + retval = ENOENT; + goto exit; + } + goto found; + } + + if (retval == HFS_ERESERVEDNAME) { + /* + * We found the name in the catalog, but it is unavailable + * to us. The exact error to return to our caller depends + * on the operation, and whether we've already reached the + * last path component. In all cases, avoid a negative + * cache entry, since someone else may be able to access + * the name if their lookup is configured differently. + */ + + cnp->cn_flags &= ~MAKEENTRY; + + if (((flags & ISLASTCN) == 0) || ((nameiop == LOOKUP) || (nameiop == DELETE))) { + /* A reserved name for a pure lookup is the same as the path not being present */ + retval = ENOENT; + } else { + /* A reserved name with intent to create must be rejected as impossible */ + retval = EEXIST; + } + } + if (retval != ENOENT) + goto exit; + /* + * This is a non-existing entry + * + * If creating, and at end of pathname and current + * directory has not been removed, then can consider + * allowing file to be created. + */ + if ((nameiop == CREATE || nameiop == RENAME) && + (flags & ISLASTCN) && + !(ISSET(dcp->c_flag, C_DELETED | C_NOEXISTS))) { + retval = EJUSTRETURN; + goto exit; + } + + goto exit; + +found: + if (flags & ISLASTCN) { + switch(nameiop) { + case DELETE: + cnp->cn_flags &= ~MAKEENTRY; + break; + + case RENAME: + cnp->cn_flags &= ~MAKEENTRY; + break; + default: + break; + } + } + + int type = (attr.ca_mode & S_IFMT); + + if (!(flags & ISLASTCN) && (type != S_IFDIR) && (type != S_IFLNK)) { + retval = ENOTDIR; + goto exit; + } + /* Don't cache directory hardlink names. */ + if (attr.ca_recflags & kHFSHasLinkChainMask) { + cnp->cn_flags &= ~MAKEENTRY; + } + /* Names with composed chars are not cached. */ + if (cnp->cn_namelen != desc.cd_namelen) + cnp->cn_flags &= ~MAKEENTRY; + + retval = hfs_getnewvnode(hfsmp, dvp, cnp, &desc, 0, &attr, &fork, &tvp, &newvnode_flags); + + if (retval) { + /* + * If this was a create/rename operation lookup, then by this point + * we expected to see the item returned from hfs_getnewvnode above. + * In the create case, it would probably eventually bubble out an EEXIST + * because the item existed when we were trying to create it. In the + * rename case, it would let us know that we need to go ahead and + * delete it as part of the rename. However, if we hit the condition below + * then it means that we found the element during cat_lookup above, but + * it is now no longer there. We simply behave as though we never found + * the element at all and return EJUSTRETURN. + */ + if ((retval == ENOENT) && + ((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)) && + (flags & ISLASTCN)) { + retval = EJUSTRETURN; + } + + /* + * If this was a straight lookup operation, we may need to redrive the entire + * lookup starting from cat_lookup if the element was deleted as the result of + * a rename operation. Since rename is supposed to guarantee atomicity, then + * lookups cannot fail because the underlying element is deleted as a result of + * the rename call -- either they returned the looked up element prior to rename + * or return the newer element. If we are in this region, then all we can do is add + * workarounds to guarantee the latter case. The element has already been deleted, so + * we just re-try the lookup to ensure the caller gets the most recent element. + */ + if ((retval == ENOENT) && (cnp->cn_nameiop == LOOKUP) && + (newvnode_flags & (GNV_CHASH_RENAMED | GNV_CAT_DELETED))) { + if (dcp) { + hfs_unlock (dcp); + } + /* get rid of any name buffers that may have lingered from the cat_lookup call */ + cat_releasedesc (&desc); + goto retry; + } + + /* Also, re-drive the lookup if the item we looked up was a hardlink, and the number + * or name of hardlinks has changed in the interim between the cat_lookup above, and + * our call to hfs_getnewvnode. hfs_getnewvnode will validate the cattr we passed it + * against what is actually in the catalog after the cnode is created. If there were + * any issues, it will bubble out ERECYCLE, which we need to swallow and use as the + * key to redrive as well. We need to special case this below because in this case, + * it needs to occur regardless of the type of lookup we're doing here. + */ + if ((retval == ERECYCLE) && (newvnode_flags & GNV_CAT_ATTRCHANGED)) { + if (dcp) { + hfs_unlock (dcp); + } + /* get rid of any name buffers that may have lingered from the cat_lookup call */ + cat_releasedesc (&desc); + goto retry; + } + + /* skip to the error-handling code if we can't retry */ + goto exit; + } + + /* + * Save the origin info for file and directory hardlinks. Directory hardlinks + * need the origin for '..' lookups, and file hardlinks need it to ensure that + * competing lookups do not cause us to vend different hardlinks than the ones requested. + */ + if (ISSET(VTOC(tvp)->c_flag, C_HARDLINK)) + hfs_savelinkorigin(VTOC(tvp), VTOC(dvp)->c_fileid); + + *cnode_locked = 1; + *vpp = tvp; + +exit: + if (dcp) { + hfs_unlock(dcp); + } + cat_releasedesc(&desc); + + return (retval); +} + +int +hfs_vnop_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) +{ + int error = 0; + int cnode_locked = 0; + *vpp = NULL; + + error = hfs_lookup(dvp, vpp, cnp, &cnode_locked); + + if (cnode_locked) + hfs_unlock(VTOC(*vpp)); + + return (error); +} diff --git a/livefiles_hfs_plugin/lf_hfs_lookup.h b/livefiles_hfs_plugin/lf_hfs_lookup.h new file mode 100644 index 0000000..c34399a --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_lookup.h @@ -0,0 +1,16 @@ +// +// lf_hfs_lookup.h +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 25/03/2018. +// + +#ifndef lf_hfs_lookup_h +#define lf_hfs_lookup_h + +#include "lf_hfs_vnode.h" +#include "lf_hfs_vnops.h" + +int hfs_vnop_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp); + +#endif /* lf_hfs_lookup_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_mount.h b/livefiles_hfs_plugin/lf_hfs_mount.h new file mode 100644 index 0000000..0393752 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_mount.h @@ -0,0 +1,71 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_mount.h + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#ifndef lf_hfs_mount_h +#define lf_hfs_mount_h + +#include + +/* + * Arguments to mount HFS-based filesystems + */ + +#define OVERRIDE_UNKNOWN_PERMISSIONS 0 + +#define UNKNOWNUID ((uid_t)99) +#define UNKNOWNGID ((gid_t)99) +#define UNKNOWNPERMISSIONS (S_IRWXU | S_IROTH | S_IXOTH) /* 705 */ + +struct hfs_mount_args { + char *fspec; /* block special device to mount */ + uid_t hfs_uid; /* uid that owns hfs files (standard HFS only) */ + gid_t hfs_gid; /* gid that owns hfs files (standard HFS only) */ + mode_t hfs_mask; /* mask to be applied for hfs perms (standard HFS only) */ + u_int32_t hfs_encoding; /* encoding for this volume (standard HFS only) */ + struct timezone hfs_timezone; /* user time zone info (standard HFS only) */ + int flags; /* mounting flags, see below */ + int journal_tbuffer_size; /* size in bytes of the journal transaction buffer */ + int journal_flags; /* flags to pass to journal_open/create */ + int journal_disable; /* don't use journaling (potentially dangerous) */ +}; + +#define HFSFSMNT_NOXONFILES 0x1 /* disable execute permissions for files */ +#define HFSFSMNT_WRAPPER 0x2 /* mount HFS wrapper (if it exists) */ +#define HFSFSMNT_EXTENDED_ARGS 0x4 /* indicates new fields after "flags" are valid */ + +/* + * User specifiable flags. + * + * Unmount uses MNT_FORCE flag. + */ +#define MNT_RDONLY 0x00000001 /* read only filesystem */ +#define MNT_SYNCHRONOUS 0x00000002 /* file system written synchronously */ +#define MNT_NOEXEC 0x00000004 /* can't exec from filesystem */ +#define MNT_NOSUID 0x00000008 /* don't honor setuid bits on fs */ +#define MNT_NODEV 0x00000010 /* don't interpret special files */ +#define MNT_UNION 0x00000020 /* union with underlying filesystem */ +#define MNT_ASYNC 0x00000040 /* file system written asynchronously */ +#define MNT_CPROTECT 0x00000080 /* file system supports content protection */ + +#define MNT_LOCAL 0x00001000 /* filesystem is stored locally */ +#define MNT_QUOTA 0x00002000 /* quotas are enabled on filesystem */ +#define MNT_ROOTFS 0x00004000 /* identifies the root filesystem */ +#define MNT_DOVOLFS 0x00008000 /* FS supports volfs (deprecated flag in Mac OS X 10.5) */ + +#define MNT_DONTBROWSE 0x00100000 /* file system is not appropriate path to user data */ +#define MNT_IGNORE_OWNERSHIP 0x00200000 /* VFS will ignore ownership information on filesystem objects */ +#define MNT_AUTOMOUNTED 0x00400000 /* filesystem was mounted by automounter */ +#define MNT_JOURNALED 0x00800000 /* filesystem is journaled */ +#define MNT_NOUSERXATTR 0x01000000 /* Don't allow user extended attributes */ +#define MNT_DEFWRITE 0x02000000 /* filesystem should defer writes */ +#define MNT_MULTILABEL 0x04000000 /* MAC support for individual labels */ +#define MNT_NOATIME 0x10000000 /* disable update of file access time */ +#define MNT_SNAPSHOT 0x40000000 /* The mount is a snapshot */ + + +#endif /* lf_hfs_mount_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_rangelist.c b/livefiles_hfs_plugin/lf_hfs_rangelist.c new file mode 100644 index 0000000..cfec3af --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_rangelist.c @@ -0,0 +1,381 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_rangelist.c + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 19/03/2018. + */ + +#include "lf_hfs_rangelist.h" +#include "lf_hfs_vfsutils.h" + +static void rl_collapse_forwards(struct rl_head *rangelist, struct rl_entry *range); +static void rl_collapse_backwards(struct rl_head *rangelist, struct rl_entry *range); +static void rl_collapse_neighbors(struct rl_head *rangelist, struct rl_entry *range); + +/* + * Walk the list of ranges for an entry to + * find an overlapping range (if any). + * + * NOTE: this returns only the FIRST overlapping range. + * There may be more than one. + */ +static enum rl_overlaptype +rl_scan_from(struct rl_head *rangelist __unused, + off_t start, + off_t end, + struct rl_entry **overlap, + struct rl_entry *range) +{ + + while (range) + { + enum rl_overlaptype ot = rl_overlap(range, start, end); + + if (ot != RL_NOOVERLAP || range->rl_start > end) + { + *overlap = range; + return ot; + } + + range = TAILQ_NEXT(range, rl_link); + } + + *overlap = NULL; + return RL_NOOVERLAP; +} + +void +rl_init(struct rl_head *rangelist) +{ + TAILQ_INIT(rangelist); +} + +enum rl_overlaptype +rl_overlap(const struct rl_entry *range, off_t start, off_t end) +{ + /* + * OK, check for overlap + * + * Six cases: + * 0) no overlap (RL_NOOVERLAP) + * 1) overlap == range (RL_MATCHINGOVERLAP) + * 2) overlap contains range (RL_OVERLAPCONTAINSRANGE) + * 3) range contains overlap (RL_OVERLAPISCONTAINED) + * 4) overlap starts before range (RL_OVERLAPSTARTSBEFORE) + * 5) overlap ends after range (RL_OVERLAPENDSAFTER) + */ + if (start > range->rl_end || range->rl_start > end) + { + /* Case 0 (RL_NOOVERLAP) */ + return RL_NOOVERLAP; + } + + if (range->rl_start == start && range->rl_end == end) + { + /* Case 1 (RL_MATCHINGOVERLAP) */ + return RL_MATCHINGOVERLAP; + } + + if (range->rl_start <= start && range->rl_end >= end) + { + /* Case 2 (RL_OVERLAPCONTAINSRANGE) */ + return RL_OVERLAPCONTAINSRANGE; + } + + if (start <= range->rl_start && end >= range->rl_end) + { + /* Case 3 (RL_OVERLAPISCONTAINED) */ + return RL_OVERLAPISCONTAINED; + } + + if (range->rl_start < start && range->rl_end < end) + { + /* Case 4 (RL_OVERLAPSTARTSBEFORE) */ + return RL_OVERLAPSTARTSBEFORE; + } + + /* Case 5 (RL_OVERLAPENDSAFTER) */ + // range->rl_start > start && range->rl_end > end + return RL_OVERLAPENDSAFTER; +} + +/* + * Remove a range from a range list. + * + * Generally, find the range (or an overlap to that range) + * and remove it (or shrink it), then wakeup anyone we can. + */ +void +rl_remove(off_t start, off_t end, struct rl_head *rangelist) +{ + struct rl_entry *range, *next_range, *overlap, *splitrange; + int ovcase; + + if (TAILQ_EMPTY(rangelist)) + { + return; + }; + + range = TAILQ_FIRST(rangelist); + while ((ovcase = rl_scan_from(rangelist, start, end, &overlap, range))) + { + switch (ovcase) + { + + case RL_MATCHINGOVERLAP: /* 1: overlap == range */ + TAILQ_REMOVE(rangelist, overlap, rl_link); + hfs_free(overlap); + break; + + case RL_OVERLAPCONTAINSRANGE: /* 2: overlap contains range: split it */ + if (overlap->rl_start == start) { + overlap->rl_start = end + 1; + break; + }; + + if (overlap->rl_end == end) { + overlap->rl_end = start - 1; + break; + }; + + /* + * Make a new range consisting of the last part of the encompassing range + */ + splitrange = hfs_malloc(sizeof(struct rl_entry)); + splitrange->rl_start = end + 1; + splitrange->rl_end = overlap->rl_end; + overlap->rl_end = start - 1; + + /* + * Now link the new entry into the range list after the range from which it was split: + */ + TAILQ_INSERT_AFTER(rangelist, overlap, splitrange, rl_link); + break; + + case RL_OVERLAPISCONTAINED: /* 3: range contains overlap */ + /* Check before discarding overlap entry */ + next_range = TAILQ_NEXT(overlap, rl_link); + TAILQ_REMOVE(rangelist, overlap, rl_link); + hfs_free(overlap); + if (next_range) + { + range = next_range; + continue; + }; + break; + + case RL_OVERLAPSTARTSBEFORE: /* 4: overlap starts before range */ + overlap->rl_end = start - 1; + range = TAILQ_NEXT(overlap, rl_link); + if (range) { + continue; + } + break; + + case RL_OVERLAPENDSAFTER: /* 5: overlap ends after range */ + overlap->rl_start = (end == RL_INFINITY ? RL_INFINITY : end + 1); + break; + } + break; + } +} + +off_t rl_len(const struct rl_entry *range) +{ + return range->rl_end - range->rl_start + 1; +} + +void rl_remove_all(struct rl_head *rangelist) +{ + struct rl_entry *r, *nextr; + TAILQ_FOREACH_SAFE(r, rangelist, rl_link, nextr){ + hfs_free(r); + } + TAILQ_INIT(rangelist); +} + +/* + * Add a range to the list + */ +void +rl_add(off_t start, off_t end, struct rl_head *rangelist) +{ + struct rl_entry *range; + struct rl_entry *overlap; + enum rl_overlaptype ovcase; + +#ifdef RL_DIAGNOSTIC + if (end < start) + { + LFHFS_LOG(LEVEL_ERROR, "rl_add: end < start?!"); + hfs_assert(0); + } +#endif + + ovcase = rl_scan(rangelist, start, end, &overlap); + + /* + * Six cases: + * 0) no overlap + * 1) overlap == range + * 2) overlap contains range + * 3) range contains overlap + * 4) overlap starts before range + * 5) overlap ends after range + */ + switch (ovcase) { + case RL_NOOVERLAP: /* 0: no overlap */ + /* + * overlap points to the entry we should insert before, or + * if NULL, we should insert at the end. + */ + range = hfs_mallocz(sizeof(*range)); + range->rl_start = start; + range->rl_end = end; + + /* Link in the new range: */ + if (overlap) { + TAILQ_INSERT_BEFORE(overlap, range, rl_link); + } else { + TAILQ_INSERT_TAIL(rangelist, range, rl_link); + } + + /* Check to see if any ranges can be combined (possibly including the immediately + preceding range entry) + */ + rl_collapse_neighbors(rangelist, range); + break; + + case RL_MATCHINGOVERLAP: /* 1: overlap == range */ + case RL_OVERLAPCONTAINSRANGE: /* 2: overlap contains range */ + break; + + case RL_OVERLAPISCONTAINED: /* 3: range contains overlap */ + /* + * Replace the overlap with the new, larger range: + */ + overlap->rl_start = start; + overlap->rl_end = end; + rl_collapse_neighbors(rangelist, overlap); + break; + + case RL_OVERLAPSTARTSBEFORE: /* 4: overlap starts before range */ + /* + * Expand the overlap area to cover the new range: + */ + overlap->rl_end = end; + rl_collapse_forwards(rangelist, overlap); + break; + + case RL_OVERLAPENDSAFTER: /* 5: overlap ends after range */ + /* + * Expand the overlap area to cover the new range: + */ + overlap->rl_start = start; + rl_collapse_backwards(rangelist, overlap); + break; + } + +#ifdef RL_DIAGNOSTIC + rl_verify(rangelist); +#endif +} + +/* + * Scan a range list for an entry in a specified range (if any): + * + * NOTE: this returns only the FIRST overlapping range. + * There may be more than one. + */ + +enum rl_overlaptype +rl_scan(struct rl_head *rangelist, off_t start, off_t end, struct rl_entry **overlap) +{ + return rl_scan_from(rangelist, start, end, overlap, TAILQ_FIRST(rangelist)); +} + +static void +rl_collapse_forwards(struct rl_head *rangelist, struct rl_entry *range) { + struct rl_entry *next_range; + + while ((next_range = TAILQ_NEXT(range, rl_link))) { + if ((range->rl_end != RL_INFINITY) && (range->rl_end < next_range->rl_start - 1)) return; + + /* Expand this range to include the next range: */ + range->rl_end = next_range->rl_end; + + /* Remove the now covered range from the list: */ + TAILQ_REMOVE(rangelist, next_range, rl_link); + hfs_free(next_range); + +#ifdef RL_DIAGNOSTIC + rl_verify(rangelist); +#endif + }; +} + +static void +rl_collapse_backwards(struct rl_head *rangelist, struct rl_entry *range) { + struct rl_entry *prev_range; + + while ((prev_range = TAILQ_PREV(range, rl_head, rl_link))) { + if (prev_range->rl_end < range->rl_start -1) { +#ifdef RL_DIAGNOSTIC + rl_verify(rangelist); +#endif + return; + }; + + /* Expand this range to include the previous range: */ + range->rl_start = prev_range->rl_start; + + /* Remove the now covered range from the list: */ + TAILQ_REMOVE(rangelist, prev_range, rl_link); + hfs_free(prev_range); + }; +} + +static void +rl_collapse_neighbors(struct rl_head *rangelist, struct rl_entry *range) +{ + rl_collapse_forwards(rangelist, range); + rl_collapse_backwards(rangelist, range); +} + +/* + * In the case where b is contained by a, we return the the largest part + * remaining. The result is stored in a. + */ +void rl_subtract(struct rl_entry *a, const struct rl_entry *b) +{ + switch (rl_overlap(b, a->rl_start, a->rl_end)) { + case RL_MATCHINGOVERLAP: + case RL_OVERLAPCONTAINSRANGE: + a->rl_end = a->rl_start - 1; + break; + case RL_OVERLAPISCONTAINED: + // Keep the bigger part + if (b->rl_start - a->rl_start >= a->rl_end - b->rl_end) { + // Keep left + a->rl_end = b->rl_start - 1; + } else { + // Keep right + a->rl_start = b->rl_end + 1; + } + break; + case RL_OVERLAPSTARTSBEFORE: + a->rl_start = b->rl_end + 1; + break; + case RL_OVERLAPENDSAFTER: + a->rl_end = b->rl_start - 1; + break; + case RL_NOOVERLAP: + break; + } +} + +struct rl_entry rl_make(off_t start, off_t end) +{ + return (struct rl_entry){ .rl_start = start, .rl_end = end }; +} diff --git a/livefiles_hfs_plugin/lf_hfs_rangelist.h b/livefiles_hfs_plugin/lf_hfs_rangelist.h new file mode 100644 index 0000000..a082eff --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_rangelist.h @@ -0,0 +1,44 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_rangelist.h + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 19/03/2018. + */ + +#ifndef lf_hfs_rangelist_h +#define lf_hfs_rangelist_h + +#include +#include + +TAILQ_HEAD(rl_head, rl_entry); + +struct rl_entry { + TAILQ_ENTRY(rl_entry) rl_link; + off_t rl_start; + off_t rl_end; +}; + +enum rl_overlaptype { + RL_NOOVERLAP = 0, /* 0 */ + RL_MATCHINGOVERLAP, /* 1 */ + RL_OVERLAPCONTAINSRANGE, /* 2 */ + RL_OVERLAPISCONTAINED, /* 3 */ + RL_OVERLAPSTARTSBEFORE, /* 4 */ + RL_OVERLAPENDSAFTER /* 5 */ +}; + +#define RL_INFINITY INT64_MAX + +void rl_init(struct rl_head *rangelist); +enum rl_overlaptype rl_overlap(const struct rl_entry *range, off_t start, off_t end); +void rl_remove(off_t start, off_t end, struct rl_head *rangelist); +off_t rl_len(const struct rl_entry *range); +void rl_remove_all(struct rl_head *rangelist); +enum rl_overlaptype rl_scan(struct rl_head *rangelist, off_t start, off_t end, struct rl_entry **overlap); +void rl_add(off_t start, off_t end, struct rl_head *rangelist); +void rl_subtract(struct rl_entry *a, const struct rl_entry *b); +struct rl_entry rl_make(off_t start, off_t end); + +#endif /* lf_hfs_rangelist_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_raw_read_write.c b/livefiles_hfs_plugin/lf_hfs_raw_read_write.c new file mode 100644 index 0000000..f3ab6fd --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_raw_read_write.c @@ -0,0 +1,534 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_raw_read_write.c + * livefiles_hfs + * + * Created by Or Haimovich on 19/3/18. + */ + +#include "lf_hfs_raw_read_write.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_fsops_handler.h" +#include "lf_hfs_file_mgr_internal.h" +#include "lf_hfs_file_extent_mapping.h" +#include "lf_hfs_vfsutils.h" + +#define MAX_READ_WRITE_LENGTH (0x7ffff000) + +#define ZERO_BUF_SIZE (1024*1024) + +static void* gpvZeroBuf = NULL; + + +int +raw_readwrite_get_cluster_from_offset( vnode_t psVnode, uint64_t uWantedOffset, uint64_t* puStartCluster, uint64_t* puInClusterOffset, uint64_t* puContigousClustersInBytes ) +{ + uint32_t uSectorsInCluster = HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount)->blockSize / HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount)->hfs_physical_block_size; + + uint64_t uStartSector = 0; + size_t uAvailableBytes = 0; + + int iErr = MapFileBlockC( HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount), VTOF(psVnode), MAX_READ_WRITE_LENGTH, uWantedOffset, (daddr64_t*)&uStartSector, &uAvailableBytes ); + if ( iErr != 0 ) + { + return iErr; + } + + if (puStartCluster) { + *puStartCluster = uStartSector / uSectorsInCluster; + } + + if (puInClusterOffset) { + *puInClusterOffset = (uStartSector % uSectorsInCluster) * HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount)->hfs_physical_block_size; + } + + if (puContigousClustersInBytes) { + *puContigousClustersInBytes = uAvailableBytes; + } + return iErr; +} + +errno_t raw_readwrite_read_mount( vnode_t psMountVnode, uint64_t uBlockN, uint64_t uClusterSize, void* pvBuf, uint64_t uBufLen, uint64_t *puActuallyRead, uint64_t* puReadStartCluster ) { + int iErr = 0; + int iFD = VNODE_TO_IFD(psMountVnode); + uint64_t uWantedOffset = uBlockN * uClusterSize; + + if (puReadStartCluster) + *puReadStartCluster = uBlockN; + + hfs_assert( uBufLen >= uClusterSize ); + + ssize_t iReadBytes = pread(iFD, pvBuf, uBufLen, uWantedOffset); + if ( iReadBytes != (ssize_t)uBufLen ) + { + iErr = ( (iReadBytes < 0) ? errno : EIO ); + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_read_mount failed [%d]\n", iErr ); + } + + if (puActuallyRead) + *puActuallyRead = iReadBytes; + + return iErr; +} + +errno_t raw_readwrite_write_mount( vnode_t psMountVnode, uint64_t uBlockN, uint64_t uClusterSize, void* pvBuf, uint64_t uBufLen, uint64_t *piActuallyWritten, uint64_t* puWriteStartCluster ) { + int iErr = 0; + int iFD = VNODE_TO_IFD(psMountVnode); + uint64_t uWantedOffset = uBlockN * uClusterSize; + ssize_t uActuallyWritten = 0; + + if (puWriteStartCluster) + *puWriteStartCluster = uBlockN; + + hfs_assert( uBufLen >= uClusterSize ); + + uActuallyWritten = pwrite(iFD, pvBuf, (size_t)uBufLen, uWantedOffset); + if ( uActuallyWritten != (ssize_t)uBufLen ) { + iErr = ( (uActuallyWritten < 0) ? errno : EIO ); + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_write_mount failed [%d]\n", iErr ); + } + + if (piActuallyWritten) + *piActuallyWritten = uActuallyWritten; + + return iErr; +} + +errno_t +raw_readwrite_read( vnode_t psVnode, uint64_t uOffset, void* pvBuf, uint64_t uLength, size_t *piActuallyRead, uint64_t* puReadStartCluster ) +{ + errno_t iErr = 0; + uint64_t uClusterSize = psVnode->sFSParams.vnfs_mp->psHfsmount->blockSize; + uint64_t uFileSize = ((struct filefork *)VTOF(psVnode))->ff_data.cf_blocks * uClusterSize; + uint64_t uActuallyRead = 0; + bool bFirstLoop = true; + + *piActuallyRead = 0; + while ( *piActuallyRead < uLength ) + { + uint64_t uCurrentCluster = 0; + uint64_t uInClusterOffset = 0; + uint64_t uContigousClustersInBytes = 0; + + // Look for the location to read + iErr = raw_readwrite_get_cluster_from_offset( psVnode, uOffset, &uCurrentCluster, &uInClusterOffset, &uContigousClustersInBytes ); + if ( iErr != 0 ) + { + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_read: raw_readwrite_get_cluster_from_offset failed [%d]\n", iErr ); + return iErr; + } + + if ( bFirstLoop ) + { + bFirstLoop = false; + if (puReadStartCluster) + *puReadStartCluster = uCurrentCluster; + } + + // Stop reading if we've reached the end of the file + if ( (uContigousClustersInBytes == 0) || (uOffset >= uFileSize) ) + { + break; + } + + uint64_t uBytesToRead = MIN(uFileSize - uOffset, uLength - *piActuallyRead); + + // Read data + iErr = raw_readwrite_read_internal( psVnode, uCurrentCluster, uContigousClustersInBytes, uOffset, uBytesToRead, pvBuf, &uActuallyRead ); + if ( iErr != 0 ) + { + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_read_internal: raw_readwrite_read_internal failed [%d]\n", iErr ); + return iErr; + } + + // Update the amount of bytes alreay read + *piActuallyRead += uActuallyRead; + // Update file offset + uOffset += uActuallyRead; + // Update buffer offset + pvBuf = (uint8_t*)pvBuf + uActuallyRead; + } + + return iErr; +} + +errno_t +raw_readwrite_read_internal( vnode_t psVnode, uint64_t uCluster, uint64_t uContigousClustersInBytes, + uint64_t uOffset, uint64_t uBytesToRead, void* pvBuf, uint64_t *piActuallyRead ) +{ + errno_t iErr = 0; + int iFD = VNODE_TO_IFD(psVnode); + struct hfsmount *hfsmp = VTOHFS(psVnode); + uint64_t uClusterSize = hfsmp->blockSize; + uint64_t uSectorSize = hfsmp->hfs_logical_block_size; + uint64_t uBytesToCopy = 0; + + // Calculate offset - offset by sector and need to add the offset by sector + uint64_t uReadOffset = FSOPS_GetOffsetFromClusterNum( psVnode, uCluster ) + ( ROUND_DOWN(uOffset, uSectorSize) % uClusterSize ); + + // If offset not align to sector size, need to read only 1 sector and memcpy its end + if ( (uOffset % uSectorSize) != 0 ) + { + void* pvBuffer = hfs_malloc(uSectorSize); + if (pvBuffer == NULL) + { + return ENOMEM; + } + + uint64_t uInSectorOffset = uOffset % uSectorSize; + uBytesToCopy = MIN(uSectorSize - uInSectorOffset, uBytesToRead); + + // Read the content of the file + ssize_t iReadBytes = pread( iFD, pvBuffer, uSectorSize, uReadOffset ); + if ( iReadBytes != (ssize_t)uSectorSize ) + { + iErr = ((iReadBytes < 0) ? errno : EIO); + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_read: pread failed to read wanted length\n" ); + hfs_free(pvBuffer); + return iErr; + } + memcpy( (uint8_t *)pvBuf, (uint8_t *)pvBuffer+uInSectorOffset, uBytesToCopy ); + hfs_free(pvBuffer); + } + // If uBytesToRead < uClusterSize, need to read 1 sector and memcpy the begining + else if (uBytesToRead < uSectorSize) + { + void* pvBuffer = hfs_malloc(uSectorSize); + if (pvBuffer == NULL) + { + return ENOMEM; + } + + uBytesToCopy = uBytesToRead; + + // Read the content of the file + ssize_t iReadBytes = pread( iFD, pvBuffer, uSectorSize, uReadOffset ); + if ( iReadBytes != (ssize_t)uSectorSize ) + { + iErr = ((iReadBytes < 0) ? errno : EIO); + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_read: pread failed to read wanted length\n" ); + hfs_free(pvBuffer); + return iErr; + } + + memcpy((uint8_t *)pvBuf, pvBuffer, uBytesToCopy); + hfs_free(pvBuffer); + } + // Can read buffer size chunk + else + { + uint64_t uAvailSectors = uContigousClustersInBytes / uSectorSize; + uint64_t uRemainingSectors = uBytesToRead / uSectorSize; + + uBytesToCopy = MIN(uAvailSectors, uRemainingSectors) * uSectorSize; + uBytesToCopy = MIN( uBytesToCopy, MAX_READ_WRITE_LENGTH ); + + assert( (uBytesToCopy % uSectorSize) == 0 ); + assert( (uReadOffset % uSectorSize) == 0 ); + + ssize_t iReadBytes = pread( iFD,(uint8_t *)pvBuf, (size_t)uBytesToCopy, uReadOffset ) ; + if ( iReadBytes != (ssize_t)uBytesToCopy ) + { + iErr = ((iReadBytes < 0) ? errno : EIO); + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_read: pread failed to read wanted length\n" ); + return iErr; + } + } + + // Update the amount of bytes alreay read + *piActuallyRead = uBytesToCopy; + + return iErr; +} + +errno_t +raw_readwrite_write( vnode_t psVnode, uint64_t uOffset, void* pvBuf, uint64_t uLength, uint64_t *piActuallyWritten ) +{ + errno_t iErr = 0; + uint64_t uClusterSize = psVnode->sFSParams.vnfs_mp->psHfsmount->blockSize; + uint64_t uFileSize = ((struct filefork *)VTOF(psVnode))->ff_data.cf_blocks * uClusterSize; + uint64_t uActuallyWritten = 0; + + *piActuallyWritten = 0; + + // Fill the buffer until the buffer is full or till the end of the file + while ( *piActuallyWritten < uLength ) + { + uint64_t uCurrentCluster = 0; + uint64_t uInClusterOffset = 0; + uint64_t uContigousClustersInBytes = 0; + + iErr = raw_readwrite_get_cluster_from_offset(psVnode, uOffset, &uCurrentCluster, &uInClusterOffset, &uContigousClustersInBytes ); + if ( iErr != 0 ) + { + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_write: raw_readwrite_get_cluster_from_offset failed [%d]\n", iErr ); + return iErr; + } + + // Stop writing if we've reached the end of the file + if ( (uContigousClustersInBytes == 0) || (uOffset >= uFileSize) ) + { + break; + } + + /* Calculate how many bytes are still missing to add to the device + * If offset near end of file need to set only (uFileSize - uOffset) + * else need to write as much as left (uLength - uAcctuallyRead) + */ + uint64_t uBytesToWrite = MIN(uFileSize - uOffset, uLength - *piActuallyWritten); + + // Write data + iErr = raw_readwrite_write_internal( psVnode, uCurrentCluster, uContigousClustersInBytes, uOffset, uBytesToWrite, pvBuf, &uActuallyWritten ); + if ( iErr != 0 ) + { + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_read_internal: raw_readwrite_read_internal failed [%d]\n", iErr ); + return iErr; + } + + // Update the amount of bytes alreay written + *piActuallyWritten += uActuallyWritten; + // Update file offset + uOffset += uActuallyWritten; + // Update buffer offset + pvBuf = (uint8_t*)pvBuf + uActuallyWritten; + } + + return iErr; +} + +errno_t +raw_readwrite_write_internal( vnode_t psVnode, uint64_t uCluster, uint64_t uContigousClustersInBytes, + uint64_t uOffset, uint64_t uBytesToWrite, void* pvBuf, uint64_t *piActuallyWritten ) +{ + errno_t iErr = 0; + int iFD = VNODE_TO_IFD(psVnode); + struct hfsmount *hfsmp = VTOHFS(psVnode); + uint64_t uClusterSize = hfsmp->blockSize; + uint64_t uSectorSize = hfsmp->hfs_logical_block_size; + uint64_t uBytesToCopy = 0; + + // Calculate offset - offset by sector and need to add the offset by sector + uint64_t uWriteOffset = FSOPS_GetOffsetFromClusterNum( psVnode, uCluster ) + ( ROUND_DOWN(uOffset, uSectorSize) % uClusterSize ); + + // If offset not align to sector size, need to read the existing data + // memcpy it's beginning and write back to the device + if ( (uOffset % uSectorSize) != 0 ) + { + void* pvBuffer = hfs_malloc(uSectorSize); + if (pvBuffer == NULL) + { + return ENOMEM; + } + + uint64_t uInSectorOffset = uOffset % uSectorSize; + uBytesToCopy = MIN( uBytesToWrite, uSectorSize - uInSectorOffset ); + + // Read the content of the existing file + ssize_t iReadBytes = pread(iFD, pvBuffer, uSectorSize, uWriteOffset); + if ( iReadBytes != (ssize_t)uSectorSize ) + { + iErr = (iReadBytes < 0) ? errno : EIO; + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_write: pread failed to read wanted length\n" ); + hfs_free(pvBuffer); + return iErr; + } + + // memcpy the data from the given buffer + memcpy((uint8_t *)pvBuffer+uInSectorOffset, pvBuf, uBytesToCopy); + + // Write the data into the device + ssize_t iWriteBytes = pwrite(iFD, pvBuffer, uSectorSize, uWriteOffset); + if ( iWriteBytes != (ssize_t)uSectorSize ) + { + iErr = (iWriteBytes < 0) ? errno : EIO; + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_write: pwrite failed to write wanted length\n" ); + hfs_free(pvBuffer); + return iErr; + } + + hfs_free(pvBuffer); + } + // If uBytesToWrite < uSectorSize, need to R/M/W 1 sector. + else if ( uBytesToWrite < uSectorSize ) + { + void* pvBuffer = hfs_malloc(uSectorSize); + if (pvBuffer == NULL) + { + return ENOMEM; + } + + uBytesToCopy = uBytesToWrite; + + // Read the content of the existing file + ssize_t iReadBytes = pread(iFD, pvBuffer, uSectorSize, uWriteOffset); + if ( iReadBytes != (ssize_t)uSectorSize ) + { + iErr = (iReadBytes < 0) ? errno : EIO; + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_write: pread failed to read wanted length\n" ); + hfs_free(pvBuffer); + return iErr; + } + + // memcpy the last data + memcpy(pvBuffer, (uint8_t *)pvBuf, uBytesToCopy); + + // Write the content to the file + ssize_t iWriteBytes = pwrite(iFD, pvBuffer, uSectorSize, uWriteOffset); + if ( iWriteBytes != (ssize_t)uSectorSize) + { + iErr = (iWriteBytes < 0) ? errno : EIO; + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_write: pwrite failed to write wanted length\n" ); + hfs_free(pvBuffer); + return iErr; + } + + hfs_free(pvBuffer); + } + // Can write buffer size chunk + else + { + uint64_t uAvailSectors = uContigousClustersInBytes / uSectorSize; + uint64_t uRemainingSectors = uBytesToWrite / uSectorSize; + + uBytesToCopy = MIN(uAvailSectors, uRemainingSectors) * uSectorSize; + uBytesToCopy = MIN( uBytesToCopy, MAX_READ_WRITE_LENGTH ); + + assert( (uBytesToCopy % uSectorSize) == 0 ); + assert( (uWriteOffset % uSectorSize) == 0 ); + + ssize_t iWriteBytes = pwrite(iFD, (uint8_t *)pvBuf, uBytesToCopy, uWriteOffset) ; + if ( iWriteBytes != (ssize_t) uBytesToCopy) + { + iErr = (iWriteBytes < 0) ? errno : EIO; + LFHFS_LOG( LEVEL_ERROR, "raw_readwrite_write: pwrite failed to write wanted length\n" ); + return iErr; + } + } + + // Update the amount of bytes alreay written + *piActuallyWritten = uBytesToCopy; + + return iErr; +} + +int +raw_readwrite_zero_fill_init() +{ + if ( gpvZeroBuf ) + { + return 0; + } + + gpvZeroBuf = hfs_malloc( ZERO_BUF_SIZE ); + if ( gpvZeroBuf == NULL ) + { + return ENOMEM; + } + + memset( gpvZeroBuf, 0, ZERO_BUF_SIZE ); + + return 0; +} + +void +raw_readwrite_zero_fill_de_init() +{ + if ( gpvZeroBuf ) + { + hfs_free( gpvZeroBuf ); + } + + gpvZeroBuf = NULL; +} + +int +raw_readwrite_zero_fill_fill( hfsmount_t* psMount, uint64_t uBlock, uint32_t uContigBlocks ) +{ + int iErr = 0; + int64_t lWriteSize = 0; + uint64_t uCurWriteOffset = 0; + uint64_t uCurWriteLen = 0; + uint64_t uDataWriten = 0; + + if ( gpvZeroBuf == NULL ) + { + iErr = EINVAL; + goto exit; + } + + uint64_t uLength = uContigBlocks*psMount->blockSize; + uint64_t uOffset = psMount->hfsPlusIOPosOffset + uBlock*psMount->blockSize; + + while ( uDataWriten < uLength ) + { + uCurWriteOffset = uOffset+uDataWriten; + uCurWriteLen = MIN( (uLength - uDataWriten), ZERO_BUF_SIZE ); + + lWriteSize = pwrite( psMount->hfs_devvp->psFSRecord->iFD, gpvZeroBuf, uCurWriteLen, uCurWriteOffset ); + if ( lWriteSize != (int64_t)uCurWriteLen ) + { + iErr = errno; + goto exit; + } + + uDataWriten += uCurWriteLen; + } + +exit: + return iErr; +} + +errno_t +raw_readwrite_zero_fill_last_block_suffix( vnode_t psVnode ) +{ + + int iErr = 0; + int iFD = (VPTOFSRECORD(psVnode))->iFD; + struct filefork *fp = VTOF(psVnode); + uint32_t uBlockSize = HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount)->blockSize; + uint32_t uBytesToKeep = fp->ff_size % uBlockSize; + uint64_t uBlockN = 0; + uint64_t uContigousClustersInBytes; + uint64_t uInClusterOffset; + + uint8_t* puClusterData = NULL; + + iErr = raw_readwrite_get_cluster_from_offset(psVnode, fp->ff_size - uBytesToKeep, &uBlockN, &uInClusterOffset, &uContigousClustersInBytes); + if ( iErr != 0 ) + { + goto exit; + } + + // Allocate buffer for cluster. + puClusterData = hfs_malloc( uBlockSize ); + if ( puClusterData == NULL ) + { + iErr = ENOMEM; + goto exit; + } + + // Read the last cluster. + size_t uBytesRead = pread( iFD, puClusterData, uBlockSize, FSOPS_GetOffsetFromClusterNum( psVnode, uBlockN ) ); + if ( uBytesRead != uBlockSize ) + { + iErr = errno; + goto exit; + } + + memset( puClusterData+uBytesToKeep, 0, uBlockSize-uBytesToKeep ); + + // Write the last cluster. + size_t uBytesWrite = pwrite( iFD, puClusterData, uBlockSize, FSOPS_GetOffsetFromClusterNum( psVnode, uBlockN ) ); + if ( uBytesWrite != uBlockSize ) + { + iErr = errno; + goto exit; + } + +exit: + if ( puClusterData ) + hfs_free( puClusterData ); + + return iErr; +} + diff --git a/livefiles_hfs_plugin/lf_hfs_raw_read_write.h b/livefiles_hfs_plugin/lf_hfs_raw_read_write.h new file mode 100644 index 0000000..ce32685 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_raw_read_write.h @@ -0,0 +1,32 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_raw_read_write.h + * livefiles_hfs + * + * Created by Or Haimovich on 19/3/18. + */ + +#ifndef lf_hfs_raw_read_write_h +#define lf_hfs_raw_read_write_h + +#include "lf_hfs_vnode.h" +#include "lf_hfs.h" + +errno_t raw_readwrite_read_mount( vnode_t psMountVnode, uint64_t uBlockN, uint64_t uClusterSize, void* pvBuf, uint64_t uBufLen, uint64_t *piActuallyRead, uint64_t* puReadStartCluster ); +errno_t raw_readwrite_write_mount( vnode_t psMountVnode, uint64_t uBlockN, uint64_t uClusterSize, void* pvBuf, uint64_t uBufLen, uint64_t *piActuallyWritten, uint64_t* puWrittenStartCluster ); + +int raw_readwrite_get_cluster_from_offset( vnode_t psVnode, uint64_t uWantedOffset, uint64_t* puStartCluster, uint64_t* puInClusterOffset, uint64_t* puContigousClustersInBytes ); +errno_t raw_readwrite_write( vnode_t psVnode, uint64_t uOffset, void* pvBuf, uint64_t uLength, uint64_t *piActuallyWritten ); +errno_t raw_readwrite_write_internal( vnode_t psVnode, uint64_t uCluster, uint64_t uContigousClustersInBytes, + uint64_t Offset, uint64_t uBytesToWrite, void* pvBuf, uint64_t *piActuallyWritten ); +errno_t raw_readwrite_read( vnode_t psVnode, uint64_t uOffset, void* pvBuf, uint64_t uLength, size_t *piActuallyRead, uint64_t* puReadStartCluster ); +errno_t raw_readwrite_read_internal( vnode_t psVnode, uint64_t uStartCluster, uint64_t uContigousClustersInBytes, + uint64_t Offset, uint64_t uBytesToRead, void* pvBuf, uint64_t *piActuallyRead ); + +int raw_readwrite_zero_fill_init( void ); +void raw_readwrite_zero_fill_de_init( void ); +int raw_readwrite_zero_fill_fill( hfsmount_t* psMount, uint64_t uOffset, uint32_t uLength ); +errno_t raw_readwrite_zero_fill_last_block_suffix( vnode_t psVnode ); + + +#endif /* lf_hfs_raw_read_write_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_readwrite_ops.c b/livefiles_hfs_plugin/lf_hfs_readwrite_ops.c new file mode 100644 index 0000000..ea0b44c --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_readwrite_ops.c @@ -0,0 +1,755 @@ +// +// lf_hfs_readwrite_ops.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include "lf_hfs_readwrite_ops.h" +#include "lf_hfs_rangelist.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_file_extent_mapping.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_cnode.h" +#include "lf_hfs_file_mgr_internal.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_vnops.h" +#include "lf_hfs_raw_read_write.h" + +#include + +static int do_hfs_truncate(struct vnode *vp, off_t length, int flags, int skip); + +static int +do_hfs_truncate(struct vnode *vp, off_t length, int flags, int truncateflags) +{ + register struct cnode *cp = VTOC(vp); + struct filefork *fp = VTOF(vp); + int retval; + off_t bytesToAdd; + off_t actualBytesAdded; + off_t filebytes; + u_int32_t fileblocks; + int blksize; + struct hfsmount *hfsmp; + int lockflags; + int suppress_times = (truncateflags & HFS_TRUNCATE_SKIPTIMES); + + blksize = VTOVCB(vp)->blockSize; + fileblocks = fp->ff_blocks; + filebytes = (off_t)fileblocks * (off_t)blksize; + + if (length < 0) + return (EINVAL); + + /* This should only happen with a corrupt filesystem */ + if ((off_t)fp->ff_size < 0) + return (EINVAL); + + hfsmp = VTOHFS(vp); + + retval = E_NONE; + /* + * Lengthen the size of the file. We must ensure that the + * last byte of the file is allocated. Since the smallest + * value of ff_size is 0, length will be at least 1. + */ + if (length > (off_t)fp->ff_size) { + /* + * If we don't have enough physical space then + * we need to extend the physical size. + */ + if (length > filebytes) { + int eflags = kEFReserveMask; + u_int32_t blockHint = 0; + + /* All or nothing and don't round up to clumpsize. */ + eflags |= kEFAllMask | kEFNoClumpMask; + + if (hfs_start_transaction(hfsmp) != 0) { + retval = EINVAL; + goto Err_Exit; + } + + /* Protect extents b-tree and allocation bitmap */ + lockflags = SFL_BITMAP; + if (overflow_extents(fp)) + lockflags |= SFL_EXTENTS; + lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK); + + /* + * Keep growing the file as long as the current EOF is + * less than the desired value. + */ + while ((length > filebytes) && (retval == E_NONE)) { + bytesToAdd = length - filebytes; + retval = MacToVFSError(ExtendFileC(VTOVCB(vp), + (FCB*)fp, + bytesToAdd, + blockHint, + eflags, + &actualBytesAdded)); + + filebytes = (off_t)fp->ff_blocks * (off_t)blksize; + if (actualBytesAdded == 0 && retval == E_NONE) { + if (length > filebytes) + length = filebytes; + break; + } + } /* endwhile */ + + hfs_systemfile_unlock(hfsmp, lockflags); + + if (hfsmp->jnl) { + hfs_update(vp, 0); + hfs_volupdate(hfsmp, VOL_UPDATE, 0); + } + + hfs_end_transaction(hfsmp); + + if (retval) + goto Err_Exit; + } + + if (ISSET(flags, IO_NOZEROFILL)) + { + + } + else + { + if (!vnode_issystem(vp) && retval == E_NONE) { + if (length > (off_t)fp->ff_size) { + struct timeval tv; + + /* Extending the file: time to fill out the current last page w. zeroes? */ + retval = raw_readwrite_zero_fill_last_block_suffix(vp); + if (retval) goto Err_Exit; + + microuptime(&tv); +// Currently disabling the rl_add, sice the +// data is being filled with 0's and that a valid content for us +// rl_add(fp->ff_size, length - 1, &fp->ff_invalidranges); + cp->c_zftimeout = (uint32_t)tv.tv_sec + ZFTIMELIMIT; + } + }else{ + LFHFS_LOG(LEVEL_ERROR, "hfs_truncate: invoked on non-UBC object?!"); + hfs_assert(0); + } + } + if (suppress_times == 0) { + cp->c_touch_modtime = TRUE; + } + fp->ff_size = length; + + } else { /* Shorten the size of the file */ + + if ((off_t)fp->ff_size > length) { + /* Any space previously marked as invalid is now irrelevant: */ + rl_remove(length, fp->ff_size - 1, &fp->ff_invalidranges); + } + + /* + * Account for any unmapped blocks. Note that the new + * file length can still end up with unmapped blocks. + */ + if (fp->ff_unallocblocks > 0) { + u_int32_t finalblks; + u_int32_t loanedBlocks; + + hfs_lock_mount(hfsmp); + loanedBlocks = fp->ff_unallocblocks; + cp->c_blocks -= loanedBlocks; + fp->ff_blocks -= loanedBlocks; + fp->ff_unallocblocks = 0; + + hfsmp->loanedBlocks -= loanedBlocks; + + finalblks = (uint32_t)((length + blksize - 1) / blksize); + if (finalblks > fp->ff_blocks) { + /* calculate required unmapped blocks */ + loanedBlocks = finalblks - fp->ff_blocks; + hfsmp->loanedBlocks += loanedBlocks; + + fp->ff_unallocblocks = loanedBlocks; + cp->c_blocks += loanedBlocks; + fp->ff_blocks += loanedBlocks; + } + hfs_unlock_mount (hfsmp); + } + if (hfs_start_transaction(hfsmp) != 0) { + retval = EINVAL; + goto Err_Exit; + } + + if (fp->ff_unallocblocks == 0) { + /* Protect extents b-tree and allocation bitmap */ + lockflags = SFL_BITMAP; + if (overflow_extents(fp)) + lockflags |= SFL_EXTENTS; + lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK); + + retval = MacToVFSError(TruncateFileC(VTOVCB(vp), (FCB*)fp, length, 0, + FORK_IS_RSRC (fp), FTOC(fp)->c_fileid, false)); + + hfs_systemfile_unlock(hfsmp, lockflags); + } + if (hfsmp->jnl) { + if (retval == 0) { + fp->ff_size = length; + } + hfs_update(vp, 0); + hfs_volupdate(hfsmp, VOL_UPDATE, 0); + } + + hfs_end_transaction(hfsmp); + + if (retval) goto Err_Exit; + + /* + * Only set update flag if the logical length changes & we aren't + * suppressing modtime updates. + */ + if (((off_t)fp->ff_size != length) && (suppress_times == 0)) { + cp->c_touch_modtime = TRUE; + } + fp->ff_size = length; + } + + cp->c_flag |= C_MODIFIED; + cp->c_touch_chgtime = TRUE; /* status changed */ + if (suppress_times == 0) { + cp->c_touch_modtime = TRUE; /* file data was modified */ + + /* + * If we are not suppressing the modtime update, then + * update the gen count as well. + */ + if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK (cp->c_attr.ca_mode)) { + hfs_incr_gencount(cp); + } + } + + retval = hfs_update(vp, 0); + +Err_Exit: + + return (retval); +} + +int +hfs_vnop_blockmap(struct vnop_blockmap_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct cnode *cp; + struct filefork *fp; + struct hfsmount *hfsmp; + size_t bytesContAvail = ap->a_size; + int retval = E_NONE; + int syslocks = 0; + int lockflags = 0; + struct rl_entry *invalid_range; + enum rl_overlaptype overlaptype; + int started_tr = 0; + int tooklock = 0; + + /* Do not allow blockmap operation on a directory */ + if (vnode_isdir(vp)) { + return (ENOTSUP); + } + + /* + * Check for underlying vnode requests and ensure that logical + * to physical mapping is requested. + */ + if (ap->a_bpn == NULL) + return (0); + + hfsmp = VTOHFS(vp); + cp = VTOC(vp); + fp = VTOF(vp); + + if ( !vnode_issystem(vp) && !vnode_islnk(vp) ) { + if (cp->c_lockowner != pthread_self()) { + hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS); + tooklock = 1; + } + + // For reads, check the invalid ranges + if (ISSET(ap->a_flags, VNODE_READ)) { + if (ap->a_foffset >= fp->ff_size) { + retval = ERANGE; + goto exit; + } + + overlaptype = rl_scan(&fp->ff_invalidranges, ap->a_foffset, + ap->a_foffset + (off_t)bytesContAvail - 1, + &invalid_range); + switch(overlaptype) { + case RL_MATCHINGOVERLAP: + case RL_OVERLAPCONTAINSRANGE: + case RL_OVERLAPSTARTSBEFORE: + /* There's no valid block for this byte offset */ + *ap->a_bpn = (daddr64_t)-1; + /* There's no point limiting the amount to be returned + * if the invalid range that was hit extends all the way + * to the EOF (i.e. there's no valid bytes between the + * end of this range and the file's EOF): + */ + if (((off_t)fp->ff_size > (invalid_range->rl_end + 1)) && + ((size_t)(invalid_range->rl_end + 1 - ap->a_foffset) < bytesContAvail)) { + bytesContAvail = invalid_range->rl_end + 1 - ap->a_foffset; + } + + retval = 0; + goto exit; + + case RL_OVERLAPISCONTAINED: + case RL_OVERLAPENDSAFTER: + /* The range of interest hits an invalid block before the end: */ + if (invalid_range->rl_start == ap->a_foffset) { + /* There's actually no valid information to be had starting here: */ + *ap->a_bpn = (daddr64_t)-1; + if (((off_t)fp->ff_size > (invalid_range->rl_end + 1)) && + ((size_t)(invalid_range->rl_end + 1 - ap->a_foffset) < bytesContAvail)) { + bytesContAvail = invalid_range->rl_end + 1 - ap->a_foffset; + } + + retval = 0; + goto exit; + } else { + /* + * Sadly, the lower layers don't like us to + * return unaligned ranges, so we skip over + * any invalid ranges here that are less than + * a page: zeroing of those bits is not our + * responsibility (it's dealt with elsewhere). + */ + do { + off_t rounded_start = (((uint64_t)(invalid_range->rl_start) + (off_t)PAGE_MASK) & ~((off_t)PAGE_MASK)); + if ((off_t)bytesContAvail < rounded_start - ap->a_foffset) + break; + if (rounded_start < invalid_range->rl_end + 1) { + bytesContAvail = rounded_start - ap->a_foffset; + break; + } + } while ((invalid_range = TAILQ_NEXT(invalid_range, + rl_link))); + } + break; + + case RL_NOOVERLAP: + break; + } // switch + } + } + +retry: + + /* Check virtual blocks only when performing write operation */ + if ((ap->a_flags & VNODE_WRITE) && (fp->ff_unallocblocks != 0)) { + if (hfs_start_transaction(hfsmp) != 0) { + retval = EINVAL; + goto exit; + } else { + started_tr = 1; + } + syslocks = SFL_EXTENTS | SFL_BITMAP; + + } else if (overflow_extents(fp)) { + syslocks = SFL_EXTENTS; + } + + if (syslocks) + lockflags = hfs_systemfile_lock(hfsmp, syslocks, HFS_EXCLUSIVE_LOCK); + + /* + * Check for any delayed allocations. + */ + if ((ap->a_flags & VNODE_WRITE) && (fp->ff_unallocblocks != 0)) { + int64_t actbytes; + u_int32_t loanedBlocks; + + // + // Make sure we have a transaction. It's possible + // that we came in and fp->ff_unallocblocks was zero + // but during the time we blocked acquiring the extents + // btree, ff_unallocblocks became non-zero and so we + // will need to start a transaction. + // + if (started_tr == 0) { + if (syslocks) { + hfs_systemfile_unlock(hfsmp, lockflags); + syslocks = 0; + } + goto retry; + } + + /* + * Note: ExtendFileC will Release any blocks on loan and + * aquire real blocks. So we ask to extend by zero bytes + * since ExtendFileC will account for the virtual blocks. + */ + + loanedBlocks = fp->ff_unallocblocks; + retval = ExtendFileC(hfsmp, (FCB*)fp, 0, 0, + kEFAllMask | kEFNoClumpMask, &actbytes); + + if (retval) { + fp->ff_unallocblocks = loanedBlocks; + cp->c_blocks += loanedBlocks; + fp->ff_blocks += loanedBlocks; + + hfs_lock_mount (hfsmp); + hfsmp->loanedBlocks += loanedBlocks; + hfs_unlock_mount (hfsmp); + + hfs_systemfile_unlock(hfsmp, lockflags); + cp->c_flag |= C_MODIFIED; + if (started_tr) { + (void) hfs_update(vp, 0); + (void) hfs_volupdate(hfsmp, VOL_UPDATE, 0); + + hfs_end_transaction(hfsmp); + started_tr = 0; + } + goto exit; + } + } + + retval = MapFileBlockC(hfsmp, (FCB *)fp, bytesContAvail, ap->a_foffset, + ap->a_bpn, &bytesContAvail); + if (syslocks) { + hfs_systemfile_unlock(hfsmp, lockflags); + } + + if (retval) { + /* On write, always return error because virtual blocks, if any, + * should have been allocated in ExtendFileC(). We do not + * allocate virtual blocks on read, therefore return error + * only if no virtual blocks are allocated. Otherwise we search + * rangelist for zero-fills + */ + if ((MacToVFSError(retval) != ERANGE) || + (ap->a_flags & VNODE_WRITE) || + ((ap->a_flags & VNODE_READ) && (fp->ff_unallocblocks == 0))) { + goto exit; + } + + /* Validate if the start offset is within logical file size */ + if (ap->a_foffset >= fp->ff_size) { + goto exit; + } + + /* + * At this point, we have encountered a failure during + * MapFileBlockC that resulted in ERANGE, and we are not + * servicing a write, and there are borrowed blocks. + * + * However, the cluster layer will not call blockmap for + * blocks that are borrowed and in-cache. We have to assume + * that because we observed ERANGE being emitted from + * MapFileBlockC, this extent range is not valid on-disk. So + * we treat this as a mapping that needs to be zero-filled + * prior to reading. + */ + + if (fp->ff_size - ap->a_foffset < (off_t)bytesContAvail) + bytesContAvail = fp->ff_size - ap->a_foffset; + + *ap->a_bpn = (daddr64_t) -1; + retval = 0; + + goto exit; + } + +exit: + if (retval == 0) { + if (ISSET(ap->a_flags, VNODE_WRITE)) { + struct rl_entry *r = TAILQ_FIRST(&fp->ff_invalidranges); + + // See if we might be overlapping invalid ranges... + if (r && (ap->a_foffset + (off_t)bytesContAvail) > r->rl_start) { + /* + * Mark the file as needing an update if we think the + * on-disk EOF has changed. + */ + if (ap->a_foffset <= r->rl_start) + SET(cp->c_flag, C_MODIFIED); + + /* + * This isn't the ideal place to put this. Ideally, we + * should do something *after* we have successfully + * written to the range, but that's difficult to do + * because we cannot take locks in the callback. At + * present, the cluster code will call us with VNODE_WRITE + * set just before it's about to write the data so we know + * that data is about to be written. If we get an I/O + * error at this point then chances are the metadata + * update to follow will also have an I/O error so the + * risk here is small. + */ + rl_remove(ap->a_foffset, ap->a_foffset + bytesContAvail - 1, + &fp->ff_invalidranges); + + if (!TAILQ_FIRST(&fp->ff_invalidranges)) { + cp->c_flag &= ~C_ZFWANTSYNC; + cp->c_zftimeout = 0; + } + } + } + + if (ap->a_run) + *ap->a_run = bytesContAvail; + + if (ap->a_poff) + *(int *)ap->a_poff = 0; + } + + if (started_tr) { + hfs_update(vp, TRUE); + hfs_volupdate(hfsmp, VOL_UPDATE, 0); + hfs_end_transaction(hfsmp); + } + + if (tooklock) + hfs_unlock(cp); + + return (MacToVFSError(retval)); +} + +int +hfs_prepare_release_storage (struct hfsmount *hfsmp, struct vnode *vp) { + + struct filefork *fp = VTOF(vp); + struct cnode *cp = VTOC(vp); + + /* Cannot truncate an HFS directory! */ + if (IS_DIR(vp)) + { + return (EISDIR); + } + + /* This should only happen with a corrupt filesystem */ + if ((off_t)fp->ff_size < 0) + return (EINVAL); + + /* + * We cannot just check if fp->ff_size == length (as an optimization) + * since there may be extra physical blocks that also need truncation. + */ + + /* Wipe out any invalid ranges which have yet to be backed by disk */ + rl_remove(0, fp->ff_size - 1, &fp->ff_invalidranges); + + /* + * Account for any unmapped blocks. Since we're deleting the + * entire file, we don't have to worry about just shrinking + * to a smaller number of borrowed blocks. + */ + if (fp->ff_unallocblocks > 0) + { + u_int32_t loanedBlocks; + + hfs_lock_mount (hfsmp); + loanedBlocks = fp->ff_unallocblocks; + cp->c_blocks -= loanedBlocks; + fp->ff_blocks -= loanedBlocks; + fp->ff_unallocblocks = 0; + + hfsmp->loanedBlocks -= loanedBlocks; + + hfs_unlock_mount (hfsmp); + } + + return 0; +} + +int +hfs_release_storage (struct hfsmount *hfsmp, struct filefork *datafork, struct filefork *rsrcfork, u_int32_t fileid) +{ + int error = 0; + int blksize = hfsmp->blockSize; + + /* Data Fork */ + if (datafork) + { + datafork->ff_size = 0; + + u_int32_t fileblocks = datafork->ff_blocks; + off_t filebytes = (off_t)fileblocks * (off_t)blksize; + + /* We killed invalid ranges and loaned blocks before we removed the catalog entry */ + + while (filebytes > 0) { + if (filebytes > HFS_BIGFILE_SIZE) { + filebytes -= HFS_BIGFILE_SIZE; + } else { + filebytes = 0; + } + + /* Start a transaction, and wipe out as many blocks as we can in this iteration */ + if (hfs_start_transaction(hfsmp) != 0) { + error = EINVAL; + break; + } + + if (datafork->ff_unallocblocks == 0) + { + /* Protect extents b-tree and allocation bitmap */ + int lockflags = SFL_BITMAP; + if (overflow_extents(datafork)) + lockflags |= SFL_EXTENTS; + lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK); + + error = MacToVFSError(TruncateFileC(HFSTOVCB(hfsmp), datafork, filebytes, 1, 0, fileid, false)); + + hfs_systemfile_unlock(hfsmp, lockflags); + } + (void) hfs_volupdate(hfsmp, VOL_UPDATE, 0); + + /* Finish the transaction and start over if necessary */ + hfs_end_transaction(hfsmp); + + if (error) { + break; + } + } + } + + /* Resource fork */ + if (error == 0 && rsrcfork) + { + rsrcfork->ff_size = 0; + + u_int32_t fileblocks = rsrcfork->ff_blocks; + off_t filebytes = (off_t)fileblocks * (off_t)blksize; + + /* We killed invalid ranges and loaned blocks before we removed the catalog entry */ + + while (filebytes > 0) + { + if (filebytes > HFS_BIGFILE_SIZE) + { + filebytes -= HFS_BIGFILE_SIZE; + } + else + { + filebytes = 0; + } + + /* Start a transaction, and wipe out as many blocks as we can in this iteration */ + if (hfs_start_transaction(hfsmp) != 0) + { + error = EINVAL; + break; + } + + if (rsrcfork->ff_unallocblocks == 0) + { + /* Protect extents b-tree and allocation bitmap */ + int lockflags = SFL_BITMAP; + if (overflow_extents(rsrcfork)) + lockflags |= SFL_EXTENTS; + lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK); + + error = MacToVFSError(TruncateFileC(HFSTOVCB(hfsmp), rsrcfork, filebytes, 1, 1, fileid, false)); + + hfs_systemfile_unlock(hfsmp, lockflags); + } + (void) hfs_volupdate(hfsmp, VOL_UPDATE, 0); + + /* Finish the transaction and start over if necessary */ + hfs_end_transaction(hfsmp); + + if (error) + { + break; + } + } + } + + return error; +} + +/* + * Truncate a cnode to at most length size, freeing (or adding) the + * disk blocks. + */ +int +hfs_truncate(struct vnode *vp, off_t length, int flags, int truncateflags) +{ + struct filefork *fp = VTOF(vp); + off_t filebytes; + u_int32_t fileblocks; + int blksize; + errno_t error = 0; + struct cnode *cp = VTOC(vp); + hfsmount_t *hfsmp = VTOHFS(vp); + + /* Cannot truncate an HFS directory! */ + if (vnode_isdir(vp)) { + return (EISDIR); + } + + blksize = hfsmp->blockSize; + fileblocks = fp->ff_blocks; + filebytes = (off_t)fileblocks * (off_t)blksize; + + bool caller_has_cnode_lock = (cp->c_lockowner == pthread_self()); + + if (!caller_has_cnode_lock) { + error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + if (error) + return error; + } + + if (vnode_islnk(vp) && cp->c_datafork->ff_symlinkptr) { + hfs_free(cp->c_datafork->ff_symlinkptr); + cp->c_datafork->ff_symlinkptr = NULL; + } + + // have to loop truncating or growing files that are + // really big because otherwise transactions can get + // enormous and consume too many kernel resources. + + if (length < filebytes) { + while (filebytes > length) { + if ((filebytes - length) > HFS_BIGFILE_SIZE) { + filebytes -= HFS_BIGFILE_SIZE; + } else { + filebytes = length; + } + error = do_hfs_truncate(vp, filebytes, flags, truncateflags); + if (error) + break; + } + } else if (length > filebytes) { + const bool keep_reserve = false; //cred && suser(cred, NULL) != 0; + + if (hfs_freeblks(hfsmp, keep_reserve) < howmany(length - filebytes, blksize)) + { + error = ENOSPC; + } + else + { + while (filebytes < length) { + if ((length - filebytes) > HFS_BIGFILE_SIZE) { + filebytes += HFS_BIGFILE_SIZE; + } else { + filebytes = length; + } + error = do_hfs_truncate(vp, filebytes, flags, truncateflags); + if (error) + break; + } + } + } else /* Same logical size */ { + + error = do_hfs_truncate(vp, length, flags, truncateflags); + } + + if (!caller_has_cnode_lock) + hfs_unlock(cp); + + return error; +} diff --git a/livefiles_hfs_plugin/lf_hfs_readwrite_ops.h b/livefiles_hfs_plugin/lf_hfs_readwrite_ops.h new file mode 100644 index 0000000..491d31f --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_readwrite_ops.h @@ -0,0 +1,32 @@ +// +// lf_hfs_readwrite_ops.h +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#ifndef lf_hfs_readwrite_ops_h +#define lf_hfs_readwrite_ops_h + +#include +#include "lf_hfs.h" + +struct vnop_blockmap_args { + struct vnodeop_desc *a_desc; + vnode_t a_vp; + off_t a_foffset; + size_t a_size; + daddr64_t *a_bpn; + size_t *a_run; + void *a_poff; + int a_flags; +}; + +#define HFS_TRUNCATE_SKIPTIMES 0x00000002 /* implied by skipupdate; it is a subset */ + +int hfs_vnop_blockmap(struct vnop_blockmap_args *ap); +int hfs_prepare_release_storage (struct hfsmount *hfsmp, struct vnode *vp); +int hfs_release_storage (struct hfsmount *hfsmp, struct filefork *datafork, struct filefork *rsrcfork, u_int32_t fileid); +int hfs_truncate(struct vnode *vp, off_t length, int flags, int truncateflags); + +#endif /* lf_hfs_readwrite_ops_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_sbunicode.c b/livefiles_hfs_plugin/lf_hfs_sbunicode.c new file mode 100644 index 0000000..80ce127 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_sbunicode.c @@ -0,0 +1,973 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_sbunicode.c + * livefiles_hfs + * + * Created by Oded Shoshani on 31/1/18. + */ + +/* + * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +/* + Includes Unicode 3.2 decomposition code derived from Core Foundation + */ + +#pragma clang diagnostic ignored "-Wsign-conversion" +#pragma clang diagnostic ignored "-Wconversion" + +#include +#include +#include +#include +#include +#include "lf_hfs_sbunicode.h" + + +/* + * UTF-8 (Unicode Transformation Format) + * + * UTF-8 is the Unicode Transformation Format that serializes a Unicode + * character as a sequence of one to four bytes. Only the shortest form + * required to represent the significant Unicode bits is legal. + * + * UTF-8 Multibyte Codes + * + * Bytes Bits Unicode Min Unicode Max UTF-8 Byte Sequence (binary) + * ----------------------------------------------------------------------------- + * 1 7 0x0000 0x007F 0xxxxxxx + * 2 11 0x0080 0x07FF 110xxxxx 10xxxxxx + * 3 16 0x0800 0xFFFF 1110xxxx 10xxxxxx 10xxxxxx + * 4 21 0x10000 0x10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + * ----------------------------------------------------------------------------- + */ + + +#define UNICODE_TO_UTF8_LEN(c) \ +((c) < 0x0080 ? 1 : ((c) < 0x0800 ? 2 : (((c) & 0xf800) == 0xd800 ? 2 : 3))) + +#define UCS_ALT_NULL 0x2400 + + +/* Surrogate Pair Constants */ + +#define SP_HALF_SHIFT 10 +#define SP_HALF_BASE 0x0010000u +#define SP_HALF_MASK 0x3FFu +#define SP_HIGH_FIRST 0xD800u +#define SP_HIGH_LAST 0xDBFFu +#define SP_LOW_FIRST 0xDC00u +#define SP_LOW_LAST 0xDFFFu + + +#include "lf_hfs_utfconvdata.h" + + +/* + * Test for a combining character. + * + * Similar to __CFUniCharIsNonBaseCharacter except that + * unicode_combinable also includes Hangul Jamo characters. + */ +static int +unicode_combinable(u_int16_t character) +{ + const u_int8_t *bitmap = __CFUniCharCombiningBitmap; + u_int8_t value; + + if (character < 0x0300) + return (0); + + value = bitmap[(character >> 8) & 0xFF]; + + if (value == 0xFF) { + return (1); + } else if (value) { + bitmap = bitmap + ((value - 1) * 32) + 256; + return (bitmap[(character & 0xFF) / 8] & (1 << (character % 8)) ? 1 : 0); + } + return (0); +} + +/* + * Test for a precomposed character. + * + * Similar to __CFUniCharIsDecomposableCharacter. + */ +static int +unicode_decomposeable(u_int16_t character) { + const u_int8_t *bitmap = __CFUniCharDecomposableBitmap; + u_int8_t value; + + if (character < 0x00C0) + return (0); + + value = bitmap[(character >> 8) & 0xFF]; + + if (value == 0xFF) { + return (1); + } else if (value) { + bitmap = bitmap + ((value - 1) * 32) + 256; + return (bitmap[(character & 0xFF) / 8] & (1 << (character % 8)) ? 1 : 0); + } + return (0); +} + + +/* + * Get the combing class. + * + * Similar to CFUniCharGetCombiningPropertyForCharacter. + */ +static inline u_int8_t +get_combining_class(u_int16_t character) { + const u_int8_t *bitmap = __CFUniCharCombiningPropertyBitmap; + + u_int8_t value = bitmap[(character >> 8)]; + + if (value) { + bitmap = bitmap + (value * 256); + return bitmap[character % 256]; + } + return (0); +} + +static int unicode_decompose(u_int16_t character, u_int16_t *convertedChars); + +static u_int16_t unicode_combine(u_int16_t base, u_int16_t combining); + +static void priortysort(u_int16_t* characters, int count); + +static u_int16_t ucs_to_sfm(u_int16_t ucs_ch, int lastchar); + +static u_int16_t sfm_to_ucs(u_int16_t ucs_ch); + +char utf_extrabytes[32] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 2, 2, 3, -1 +}; + +const char hexdigits[16] = { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' +}; + +/* + * utf8_encodelen - Calculate the UTF-8 encoding length + * + * This function takes a Unicode input string, ucsp, of ucslen bytes + * and calculates the size of the UTF-8 output in bytes (not including + * a NULL termination byte). The string must reside in kernel memory. + * + * If '/' chars are possible in the Unicode input then an alternate + * (replacement) char should be provided in altslash. + * + * FLAGS + * UTF_REVERSE_ENDIAN: Unicode byte order is opposite current runtime + * + * UTF_BIG_ENDIAN: Unicode byte order is always big endian + * + * UTF_LITTLE_ENDIAN: Unicode byte order is always little endian + * + * UTF_DECOMPOSED: generate fully decomposed output + * + * UTF_PRECOMPOSED is ignored since utf8_encodestr doesn't support it + * + * ERRORS + * None + */ +size_t +utf8_encodelen(const u_int16_t * ucsp, size_t ucslen, u_int16_t altslash, int flags) +{ + u_int16_t ucs_ch; + u_int16_t * chp = NULL; + u_int16_t sequence[8]; + int extra = 0; + size_t charcnt; + int swapbytes = (flags & UTF_REVERSE_ENDIAN); + int decompose = (flags & UTF_DECOMPOSED); + size_t len; + + charcnt = ucslen / 2; + len = 0; + + while (charcnt-- > 0) { + if (extra > 0) { + --extra; + ucs_ch = *chp++; + } else { + ucs_ch = *ucsp++; + if (swapbytes) { + ucs_ch = OSSwapInt16(ucs_ch); + } + if (ucs_ch == '/') { + ucs_ch = altslash ? altslash : '_'; + } else if (ucs_ch == '\0') { + ucs_ch = UCS_ALT_NULL; + } else if (decompose && unicode_decomposeable(ucs_ch)) { + extra = unicode_decompose(ucs_ch, sequence) - 1; + charcnt += extra; + ucs_ch = sequence[0]; + chp = &sequence[1]; + } + } + len += UNICODE_TO_UTF8_LEN(ucs_ch); + } + + return (len); +} + + +/* + * utf8_encodestr - Encodes a Unicode string to UTF-8 + * + * NOTES: + * The resulting UTF-8 string is NULL terminated. + * + * If '/' chars are allowed on disk then an alternate + * (replacement) char must be provided in altslash. + * + * input flags: + * UTF_REVERSE_ENDIAN: Unicode byteorder is opposite current runtime + * + * UTF_BIG_ENDIAN: Unicode byte order is always big endian + * + * UTF_LITTLE_ENDIAN: Unicode byte order is always little endian + * + * UTF_DECOMPOSED: generate fully decomposed output + * + * UTF_ADD_NULL_TERM: add NULL termination to UTF-8 output + * + * result: + * ENAMETOOLONG: Name didn't fit; only buflen bytes were encoded + * + * EINVAL: Illegal char found; char was replaced by an '_'. + */ +extern int +utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, + size_t * utf8len, size_t buflen, u_int16_t altslash, int flags) +{ + u_int8_t * bufstart; + u_int8_t * bufend; + u_int16_t ucs_ch; + u_int16_t * chp = NULL; + u_int16_t sequence[8]; + int extra = 0; + size_t charcnt; + int swapbytes = (flags & UTF_REVERSE_ENDIAN); + int nullterm = (flags & UTF_ADD_NULL_TERM); + int decompose = (flags & UTF_DECOMPOSED); + int sfmconv = (flags & UTF_SFM_CONVERSIONS); + int result = 0; + + bufstart = utf8p; + bufend = bufstart + buflen; + if (nullterm) + --bufend; + charcnt = ucslen / 2; + + while (charcnt-- > 0) { + if (extra > 0) { + --extra; + ucs_ch = *chp++; + } else { + ucs_ch = swapbytes ? OSSwapInt16(*ucsp++) : *ucsp++; + + if (decompose && unicode_decomposeable(ucs_ch)) { + extra = unicode_decompose(ucs_ch, sequence) - 1; + charcnt += extra; + ucs_ch = sequence[0]; + chp = &sequence[1]; + } + } + + /* Slash and NULL are not permitted */ + if (ucs_ch == '/') { + if (altslash) + ucs_ch = altslash; + else { + ucs_ch = '_'; + result = EINVAL; + } + } else if (ucs_ch == '\0') { + ucs_ch = UCS_ALT_NULL; + } + + if (ucs_ch < 0x0080) { + if (utf8p >= bufend) { + result = ENAMETOOLONG; + break; + } + *utf8p++ = ucs_ch; + + } else if (ucs_ch < 0x800) { + if ((utf8p + 1) >= bufend) { + result = ENAMETOOLONG; + break; + } + *utf8p++ = 0xc0 | (ucs_ch >> 6); + *utf8p++ = 0x80 | (0x3f & ucs_ch); + + } else { + /* These chars never valid Unicode. */ + if (ucs_ch == 0xFFFE || ucs_ch == 0xFFFF) { + result = EINVAL; + break; + } + + /* Combine valid surrogate pairs */ + if (ucs_ch >= SP_HIGH_FIRST && ucs_ch <= SP_HIGH_LAST + && charcnt > 0) { + u_int16_t ch2; + u_int32_t pair; + + ch2 = swapbytes ? OSSwapInt16(*ucsp) : *ucsp; + if (ch2 >= SP_LOW_FIRST && ch2 <= SP_LOW_LAST) { + pair = (u_int32_t)((ucs_ch - SP_HIGH_FIRST) << SP_HALF_SHIFT) + + (ch2 - SP_LOW_FIRST) + SP_HALF_BASE; + if ((utf8p + 3) >= bufend) { + result = ENAMETOOLONG; + break; + } + --charcnt; + ++ucsp; + *utf8p++ = 0xf0 | (pair >> 18); + *utf8p++ = 0x80 | (0x3f & (pair >> 12)); + *utf8p++ = 0x80 | (0x3f & (pair >> 6)); + *utf8p++ = 0x80 | (0x3f & pair); + continue; + } + } else if (sfmconv) { + ucs_ch = sfm_to_ucs(ucs_ch); + if (ucs_ch < 0x0080) { + if (utf8p >= bufend) { + result = ENAMETOOLONG; + break; + } + *utf8p++ = ucs_ch; + continue; + } + } + if ((utf8p + 2) >= bufend) { + result = ENAMETOOLONG; + break; + } + *utf8p++ = 0xe0 | (ucs_ch >> 12); + *utf8p++ = 0x80 | (0x3f & (ucs_ch >> 6)); + *utf8p++ = 0x80 | (0x3f & ucs_ch); + } + } + + *utf8len = utf8p - bufstart; + if (nullterm) + *utf8p++ = '\0'; + + return (result); +} + + +/* + * utf8_decodestr - Decodes a UTF-8 string back to Unicode + * + * NOTES: + * The input UTF-8 string does not need to be null terminated + * if utf8len is set. + * + * If '/' chars are allowed on disk then an alternate + * (replacement) char must be provided in altslash. + * + * input flags: + * UTF_REV_ENDIAN: Unicode byte order is opposite current runtime + * + * UTF_BIG_ENDIAN: Unicode byte order is always big endian + * + * UTF_LITTLE_ENDIAN: Unicode byte order is always little endian + * + * UTF_DECOMPOSED: generate fully decomposed output (NFD) + * + * UTF_PRECOMPOSED: generate precomposed output (NFC) + * + * UTF_ESCAPE_ILLEGAL: percent escape any illegal UTF-8 input + * + * result: + * ENAMETOOLONG: Name didn't fit; only ucslen chars were decoded. + * + * EINVAL: Illegal UTF-8 sequence found. + */ +int +utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, + size_t *ucslen, size_t buflen, u_int16_t altslash, int flags) +{ + u_int16_t* bufstart; + u_int16_t* bufend; + unsigned int ucs_ch; + unsigned int byte; + int combcharcnt = 0; + int result = 0; + int decompose, precompose, swapbytes, escaping; + int sfmconv; + int extrabytes; + + decompose = (flags & UTF_DECOMPOSED); + precompose = (flags & UTF_PRECOMPOSED); + swapbytes = (flags & UTF_REVERSE_ENDIAN); + escaping = (flags & UTF_ESCAPE_ILLEGAL); + sfmconv = (flags & UTF_SFM_CONVERSIONS); + + bufstart = ucsp; + bufend = (u_int16_t *)((u_int8_t *)ucsp + buflen); + + while (utf8len-- > 0 && (byte = *utf8p++) != '\0') { + if (ucsp >= bufend) + goto toolong; + + /* check for ascii */ + if (byte < 0x80) { + ucs_ch = sfmconv ? ucs_to_sfm(byte, utf8len == 0) : byte; + } else { + u_int32_t ch; + + extrabytes = utf_extrabytes[byte >> 3]; + if ((extrabytes < 0) || ((int)utf8len < extrabytes)) { + goto escape; + } + utf8len -= extrabytes; + + switch (extrabytes) { + case 1: + ch = byte; ch <<= 6; /* 1st byte */ + byte = *utf8p++; /* 2nd byte */ + if ((byte >> 6) != 2) + goto escape2; + ch += byte; + ch -= 0x00003080UL; + if (ch < 0x0080) + goto escape2; + ucs_ch = ch; + break; + case 2: + ch = byte; ch <<= 6; /* 1st byte */ + byte = *utf8p++; /* 2nd byte */ + if ((byte >> 6) != 2) + goto escape2; + ch += byte; ch <<= 6; + byte = *utf8p++; /* 3rd byte */ + if ((byte >> 6) != 2) + goto escape3; + ch += byte; + ch -= 0x000E2080UL; + if (ch < 0x0800) + goto escape3; + if (ch >= 0xD800) { + if (ch <= 0xDFFF) + goto escape3; + if (ch == 0xFFFE || ch == 0xFFFF) + goto escape3; + } + ucs_ch = ch; + break; + case 3: + ch = byte; ch <<= 6; /* 1st byte */ + byte = *utf8p++; /* 2nd byte */ + if ((byte >> 6) != 2) + goto escape2; + ch += byte; ch <<= 6; + byte = *utf8p++; /* 3rd byte */ + if ((byte >> 6) != 2) + goto escape3; + ch += byte; ch <<= 6; + byte = *utf8p++; /* 4th byte */ + if ((byte >> 6) != 2) + goto escape4; + ch += byte; + ch -= 0x03C82080UL + SP_HALF_BASE; + ucs_ch = (ch >> SP_HALF_SHIFT) + SP_HIGH_FIRST; + if (ucs_ch < SP_HIGH_FIRST || ucs_ch > SP_HIGH_LAST) + goto escape4; + *ucsp++ = swapbytes ? OSSwapInt16(ucs_ch) : (u_int16_t)ucs_ch; + if (ucsp >= bufend) + goto toolong; + ucs_ch = (ch & SP_HALF_MASK) + SP_LOW_FIRST; + if (ucs_ch < SP_LOW_FIRST || ucs_ch > SP_LOW_LAST) { + --ucsp; + goto escape4; + } + *ucsp++ = swapbytes ? OSSwapInt16(ucs_ch) : (u_int16_t)ucs_ch; + continue; + default: + result = EINVAL; + goto exit; + } + if (decompose) { + if (unicode_decomposeable(ucs_ch)) { + u_int16_t sequence[8] = {0}; + int count, i; + + /* Before decomposing a new unicode character, sort + * previous combining characters, if any, and reset + * the counter. + */ + if (combcharcnt > 1) { + priortysort(ucsp - combcharcnt, combcharcnt); + } + combcharcnt = 0; + + count = unicode_decompose(ucs_ch, sequence); + for (i = 0; i < count; ++i) { + ucs_ch = sequence[i]; + *ucsp++ = swapbytes ? OSSwapInt16(ucs_ch) : (u_int16_t)ucs_ch; + if (ucsp >= bufend) + goto toolong; + } + combcharcnt += count - 1; + continue; + } + } else if (precompose && (ucsp != bufstart)) { + u_int16_t composite, base; + + if (unicode_combinable(ucs_ch)) { + base = swapbytes ? OSSwapInt16(*(ucsp - 1)) : *(ucsp - 1); + composite = unicode_combine(base, ucs_ch); + if (composite) { + --ucsp; + ucs_ch = composite; + } + } + } + if (ucs_ch == UCS_ALT_NULL) + ucs_ch = '\0'; + } + if (ucs_ch == altslash) + ucs_ch = '/'; + + /* + * Make multiple combining character sequences canonical + */ + if (unicode_combinable(ucs_ch)) { + ++combcharcnt; /* start tracking a run */ + } else if (combcharcnt) { + if (combcharcnt > 1) { + priortysort(ucsp - combcharcnt, combcharcnt); + } + combcharcnt = 0; /* start over */ + } + + *ucsp++ = swapbytes ? OSSwapInt16(ucs_ch) : (u_int16_t)ucs_ch; + continue; + + /* + * Escape illegal UTF-8 into something legal. + */ + escape4: + utf8p -= 3; + goto escape; + escape3: + utf8p -= 2; + goto escape; + escape2: + utf8p -= 1; + escape: + if (!escaping) { + result = EINVAL; + goto exit; + } + if (extrabytes > 0) + utf8len += extrabytes; + byte = *(utf8p - 1); + + if ((ucsp + 2) >= bufend) + goto toolong; + + /* Make a previous combining sequence canonical. */ + if (combcharcnt > 1) { + priortysort(ucsp - combcharcnt, combcharcnt); + } + combcharcnt = 0; + + ucs_ch = '%'; + *ucsp++ = swapbytes ? OSSwapInt16(ucs_ch) : (u_int16_t)ucs_ch; + ucs_ch = hexdigits[byte >> 4]; + *ucsp++ = swapbytes ? OSSwapInt16(ucs_ch) : (u_int16_t)ucs_ch; + ucs_ch = hexdigits[byte & 0x0F]; + *ucsp++ = swapbytes ? OSSwapInt16(ucs_ch) : (u_int16_t)ucs_ch; + } + /* + * Make a previous combining sequence canonical + */ + if (combcharcnt > 1) { + priortysort(ucsp - combcharcnt, combcharcnt); + } +exit: + *ucslen = (u_int8_t*)ucsp - (u_int8_t*)bufstart; + + return (result); + +toolong: + result = ENAMETOOLONG; + goto exit; +} + +/* + * Unicode 3.2 decomposition code (derived from Core Foundation) + */ + +#define HANGUL_SBASE 0xAC00 +#define HANGUL_LBASE 0x1100 +#define HANGUL_VBASE 0x1161 +#define HANGUL_TBASE 0x11A7 + +#define HANGUL_SCOUNT 11172 +#define HANGUL_LCOUNT 19 +#define HANGUL_VCOUNT 21 +#define HANGUL_TCOUNT 28 +#define HANGUL_NCOUNT (HANGUL_VCOUNT * HANGUL_TCOUNT) + + +typedef struct { + u_int32_t _key; + u_int32_t _value; +} unicode_mappings32; + +#define RECURSIVE_DECOMPOSITION (1 << 15) +#define EXTRACT_COUNT(value) (((value) >> 12) & 0x0007) + +typedef struct { + u_int16_t _key; + u_int16_t _value; +} unicode_mappings16; + +static inline u_int32_t +getmappedvalue32(const unicode_mappings32 *theTable, u_int32_t numElem, + u_int16_t character) +{ + const unicode_mappings32 *p, *q, *divider; + + if ((character < theTable[0]._key) || (character > theTable[numElem-1]._key)) + return (0); + + p = theTable; + q = p + (numElem-1); + while (p <= q) { + divider = p + ((q - p) >> 1); /* divide by 2 */ + if (character < divider->_key) { q = divider - 1; } + else if (character > divider->_key) { p = divider + 1; } + else { return (divider->_value); } + } + return (0); +} + +static inline u_int16_t +getmappedvalue16(const unicode_mappings16 *theTable, u_int32_t numElem, + u_int16_t character) +{ + const unicode_mappings16 *p, *q, *divider; + + if ((character < theTable[0]._key) || (character > theTable[numElem-1]._key)) + return (0); + + p = theTable; + q = p + (numElem-1); + while (p <= q) { + divider = p + ((q - p) >> 1); /* divide by 2 */ + if (character < divider->_key) + q = divider - 1; + else if (character > divider->_key) + p = divider + 1; + else + return (divider->_value); + } + return (0); +} + +static u_int32_t +unicode_recursive_decompose(u_int16_t character, u_int16_t *convertedChars) +{ + u_int16_t value; + u_int32_t length; + u_int16_t firstChar; + u_int16_t theChar; + const u_int16_t *bmpMappings; + u_int32_t usedLength; + + value = getmappedvalue16( + (const unicode_mappings16 *)__CFUniCharDecompositionTable, + __UniCharDecompositionTableLength, character); + length = EXTRACT_COUNT(value); + firstChar = value & 0x0FFF; + theChar = firstChar; + bmpMappings = (length == 1 ? &theChar : __CFUniCharMultipleDecompositionTable + firstChar); + usedLength = 0; + + if (value & RECURSIVE_DECOMPOSITION) { + usedLength = unicode_recursive_decompose((u_int16_t)*bmpMappings, convertedChars); + + --length; /* Decrement for the first char */ + if (!usedLength) + return 0; + ++bmpMappings; + convertedChars += usedLength; + } + + usedLength += length; + + while (length--) + *(convertedChars++) = *(bmpMappings++); + + return (usedLength); +} + +/* + * unicode_decompose - decompose a composed Unicode char + * + * Composed Unicode characters are forbidden on + * HFS Plus volumes. ucs_decompose will convert a + * composed character into its correct decomposed + * sequence. + * + * Similar to CFUniCharDecomposeCharacter + */ +static int +unicode_decompose(u_int16_t character, u_int16_t *convertedChars) +{ + if ((character >= HANGUL_SBASE) && + (character <= (HANGUL_SBASE + HANGUL_SCOUNT))) { + u_int32_t length; + + character -= HANGUL_SBASE; + length = (character % HANGUL_TCOUNT ? 3 : 2); + + *(convertedChars++) = + character / HANGUL_NCOUNT + HANGUL_LBASE; + *(convertedChars++) = + (character % HANGUL_NCOUNT) / HANGUL_TCOUNT + HANGUL_VBASE; + if (length > 2) + *convertedChars = (character % HANGUL_TCOUNT) + HANGUL_TBASE; + return (length); + } else { + return (unicode_recursive_decompose(character, convertedChars)); + } +} + +/* + * unicode_combine - generate a precomposed Unicode char + * + * Precomposed Unicode characters are required for some volume + * formats and network protocols. unicode_combine will combine + * a decomposed character sequence into a single precomposed + * (composite) character. + * + * Similar toCFUniCharPrecomposeCharacter but unicode_combine + * also handles Hangul Jamo characters. + */ +static u_int16_t +unicode_combine(u_int16_t base, u_int16_t combining) +{ + u_int32_t value; + + /* Check HANGUL */ + if ((combining >= HANGUL_VBASE) && (combining < (HANGUL_TBASE + HANGUL_TCOUNT))) { + /* 2 char Hangul sequences */ + if ((combining < (HANGUL_VBASE + HANGUL_VCOUNT)) && + (base >= HANGUL_LBASE && base < (HANGUL_LBASE + HANGUL_LCOUNT))) { + return (HANGUL_SBASE + + ((base - HANGUL_LBASE)*(HANGUL_VCOUNT*HANGUL_TCOUNT)) + + ((combining - HANGUL_VBASE)*HANGUL_TCOUNT)); + } + + /* 3 char Hangul sequences */ + if ((combining > HANGUL_TBASE) && + (base >= HANGUL_SBASE && base < (HANGUL_SBASE + HANGUL_SCOUNT))) { + if ((base - HANGUL_SBASE) % HANGUL_TCOUNT) + return (0); + else + return (base + (combining - HANGUL_TBASE)); + } + } + + value = getmappedvalue32( + (const unicode_mappings32 *)__CFUniCharPrecompSourceTable, + __CFUniCharPrecompositionTableLength, combining); + + if (value) { + value = getmappedvalue16( + (const unicode_mappings16 *) + ((const u_int32_t *)__CFUniCharBMPPrecompDestinationTable + (value & 0xFFFF)), + (value >> 16), base); + } + return (value); +} + + +/* + * priortysort - order combining chars into canonical order + * + * Similar to CFUniCharPrioritySort + */ +static void +priortysort(u_int16_t* characters, int count) +{ + u_int32_t p1, p2; + u_int16_t *ch1, *ch2; + u_int16_t *end; + int changes = 0; + + end = characters + count; + do { + changes = 0; + ch1 = characters; + ch2 = characters + 1; + p2 = get_combining_class(*ch1); + while (ch2 < end) { + p1 = p2; + p2 = get_combining_class(*ch2); + if (p1 > p2 && p2 != 0) { + u_int32_t tmp; + + tmp = *ch1; + *ch1 = *ch2; + *ch2 = tmp; + changes = 1; + + /* + * Make sure that p2 contains the combining class for the + * character now stored at *ch2. This isn't required for + * correctness, but it will be more efficient if a character + * with a large combining class has to "bubble past" several + * characters with lower combining classes. + */ + p2 = p1; + } + ++ch1; + ++ch2; + } + } while (changes); +} + + +/* + * Invalid NTFS filename characters are encodeded using the + * SFM (Services for Macintosh) private use Unicode characters. + * + * These should only be used for SMB, MSDOS or NTFS. + * + * Illegal NTFS Char SFM Unicode Char + * ---------------------------------------- + * 0x01-0x1f 0xf001-0xf01f + * '"' 0xf020 + * '*' 0xf021 + * '/' 0xf022 + * '<' 0xf023 + * '>' 0xf024 + * '?' 0xf025 + * '\' 0xf026 + * '|' 0xf027 + * ' ' 0xf028 (Only if last char of the name) + * '.' 0xf029 (Only if last char of the name) + * ---------------------------------------- + * + * Reference: http://support.microsoft.com/kb/q117258/ + */ + +#define MAX_SFM2MAC 0x29 +#define SFMCODE_PREFIX_MASK 0xf000 + +/* + * In the Mac OS 9 days the colon was illegal in a file name. For that reason + * SFM had no conversion for the colon. There is a conversion for the + * slash. In Mac OS X the slash is illegal in a file name. So for us the colon + * is a slash and a slash is a colon. So we can just replace the slash with the + * colon in our tables and everything will just work. + */ +static u_int8_t +sfm2mac[42] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 00 - 07 */ + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 08 - 0F */ + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 10 - 17 */ + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 18 - 1F */ + 0x22, 0x2a, 0x3a, 0x3c, 0x3e, 0x3f, 0x5c, 0x7c, /* 20 - 27 */ + 0x20, 0x2e /* 28 - 29 */ +}; + +static u_int8_t +mac2sfm[112] = { + 0x20, 0x21, 0x20, 0x23, 0x24, 0x25, 0x26, 0x27, /* 20 - 27 */ + 0x28, 0x29, 0x21, 0x2b, 0x2c, 0x2d, 0x2e, 0x22, /* 28 - 2f */ + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 30 - 37 */ + 0x38, 0x39, 0x22, 0x3b, 0x23, 0x3d, 0x24, 0x25, /* 38 - 3f */ + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 40 - 47 */ + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 48 - 4f */ + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 50 - 57 */ + 0x58, 0x59, 0x5a, 0x5b, 0x26, 0x5d, 0x5e, 0x5f, /* 58 - 5f */ + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 60 - 67 */ + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 68 - 6f */ + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 70 - 77 */ + 0x78, 0x79, 0x7a, 0x7b, 0x27, 0x7d, 0x7e, 0x7f /* 78 - 7f */ +}; + + +/* + * Encode illegal NTFS filename characters into SFM Private Unicode characters + * + * Assumes non-zero ASCII input. + */ +static u_int16_t +ucs_to_sfm(u_int16_t ucs_ch, int lastchar) +{ + /* The last character of filename cannot be a space or period. */ + if (lastchar) { + if (ucs_ch == 0x20) + return (0xf028); + else if (ucs_ch == 0x2e) + return (0xf029); + } + /* 0x01 - 0x1f is simple transformation. */ + if (ucs_ch <= 0x1f) { + return (ucs_ch | 0xf000); + } else /* 0x20 - 0x7f */ { + u_int16_t lsb; + + lsb = mac2sfm[ucs_ch - 0x0020]; + if (lsb != ucs_ch) + return(0xf000 | lsb); + } + return (ucs_ch); +} + +/* + * Decode any SFM Private Unicode characters + */ +static u_int16_t +sfm_to_ucs(u_int16_t ucs_ch) +{ + if (((ucs_ch & 0xffC0) == SFMCODE_PREFIX_MASK) && + ((ucs_ch & 0x003f) <= MAX_SFM2MAC)) { + ucs_ch = sfm2mac[ucs_ch & 0x003f]; + } + return (ucs_ch); +} + diff --git a/livefiles_hfs_plugin/lf_hfs_sbunicode.h b/livefiles_hfs_plugin/lf_hfs_sbunicode.h new file mode 100644 index 0000000..346cbb0 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_sbunicode.h @@ -0,0 +1,135 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_sbunicode.h + * livefiles_hfs + * + * Created by Oded Shoshani on 31/1/18. + */ + +#ifndef lf_hfs_sbunicode_h +#define lf_hfs_sbunicode_h + +/* + Includes Unicode 3.2 decomposition code derived from Core Foundation + */ + +/* + * UTF-8 (Unicode Transformation Format) + * + * UTF-8 is the Unicode Transformation Format that serializes a Unicode + * character as a sequence of one to four bytes. Only the shortest form + * required to represent the significant Unicode bits is legal. + * + * UTF-8 Multibyte Codes + * + * Bytes Bits Unicode Min Unicode Max UTF-8 Byte Sequence (binary) + * ----------------------------------------------------------------------------- + * 1 7 0x0000 0x007F 0xxxxxxx + * 2 11 0x0080 0x07FF 110xxxxx 10xxxxxx + * 3 16 0x0800 0xFFFF 1110xxxx 10xxxxxx 10xxxxxx + * 4 21 0x10000 0x10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + * ----------------------------------------------------------------------------- + */ + +/* + * UTF-8 encode/decode flags + */ +#define UTF_REVERSE_ENDIAN 0x0001 /* reverse UCS-2 byte order */ +#define UTF_ADD_NULL_TERM 0x0002 /* add null termination */ +#define UTF_DECOMPOSED 0x0004 /* generate fully decomposed UCS-2 */ +#define UTF_PRECOMPOSED 0x0008 /* generate precomposed UCS-2 */ +#define UTF_ESCAPE_ILLEGAL 0x0010 /* escape illegal UTF-8 */ +#define UTF_SFM_CONVERSIONS 0x0020 /* Use SFM mappings for illegal NTFS chars */ + +#define UTF_BIG_ENDIAN \ +((BYTE_ORDER == BIG_ENDIAN) ? 0 : UTF_REVERSE_ENDIAN) +#define UTF_LITTLE_ENDIAN \ +((BYTE_ORDER == LITTLE_ENDIAN) ? 0 : UTF_REVERSE_ENDIAN) + + + +/* + * utf8_encodelen - Calculate the UTF-8 encoding length + * + * This function takes a Unicode input string, ucsp, of ucslen bytes + * and calculates the size of the UTF-8 output in bytes (not including + * a NULL termination byte). The string must reside in kernel memory. + * + * If '/' chars are possible in the Unicode input then an alternate + * (replacement) char should be provided in altslash. + * + * FLAGS + * UTF_REVERSE_ENDIAN: Unicode byte order is opposite current runtime + * + * UTF_BIG_ENDIAN: Unicode byte order is always big endian + * + * UTF_LITTLE_ENDIAN: Unicode byte order is always little endian + * + * UTF_DECOMPOSED: generate fully decomposed output + * + * UTF_PRECOMPOSED is ignored since utf8_encodestr doesn't support it + * + * ERRORS + * None + */ +size_t utf8_encodelen(const u_int16_t * ucsp, size_t ucslen, u_int16_t altslash, int flags); + +/* + * utf8_encodestr - Encodes a Unicode string to UTF-8 + * + * NOTES: + * The resulting UTF-8 string is NULL terminated. + * + * If '/' chars are allowed on disk then an alternate + * (replacement) char must be provided in altslash. + * + * input flags: + * UTF_REVERSE_ENDIAN: Unicode byteorder is opposite current runtime + * + * UTF_BIG_ENDIAN: Unicode byte order is always big endian + * + * UTF_LITTLE_ENDIAN: Unicode byte order is always little endian + * + * UTF_DECOMPOSED: generate fully decomposed output + * + * UTF_ADD_NULL_TERM: add NULL termination to UTF-8 output + * + * result: + * ENAMETOOLONG: Name didn't fit; only buflen bytes were encoded + * + * EINVAL: Illegal char found; char was replaced by an '_'. + */ +extern int utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, size_t * utf8len, size_t buflen, u_int16_t altslash, int flags); + +/* + * utf8_decodestr - Decodes a UTF-8 string back to Unicode + * + * NOTES: + * The input UTF-8 string does not need to be null terminated + * if utf8len is set. + * + * If '/' chars are allowed on disk then an alternate + * (replacement) char must be provided in altslash. + * + * input flags: + * UTF_REV_ENDIAN: Unicode byte order is opposite current runtime + * + * UTF_BIG_ENDIAN: Unicode byte order is always big endian + * + * UTF_LITTLE_ENDIAN: Unicode byte order is always little endian + * + * UTF_DECOMPOSED: generate fully decomposed output (NFD) + * + * UTF_PRECOMPOSED: generate precomposed output (NFC) + * + * UTF_ESCAPE_ILLEGAL: percent escape any illegal UTF-8 input + * + * result: + * ENAMETOOLONG: Name didn't fit; only ucslen chars were decoded. + * + * EINVAL: Illegal UTF-8 sequence found. + */ +int utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, size_t *ucslen, size_t buflen, u_int16_t altslash, int flags); + +#endif /* lf_hfs_sbunicode_h */ + diff --git a/livefiles_hfs_plugin/lf_hfs_ucs_string_cmp_data.h b/livefiles_hfs_plugin/lf_hfs_ucs_string_cmp_data.h new file mode 100644 index 0000000..9397cd8 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_ucs_string_cmp_data.h @@ -0,0 +1,268 @@ +// +// lf_hfs_ucs_string_cmp_data.h +// hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#ifndef lf_hfs_ucs_string_cmp_data_h +#define lf_hfs_ucs_string_cmp_data_h + +/* + * For better performance, the case folding table for basic latin + * is seperate from the others. This eliminates the extra lookup + * to get the offset to this table. + * + * Note: 0x0000 now maps to 0 so that it will be ignored + */ +u_int16_t gLatinCaseFold[] = { + /* 0 */ 0xFFFF, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F, + /* 1 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F, + /* 2 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F, + /* 3 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F, + /* 4 */ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, + /* 5 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F, + /* 6 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, + /* 7 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F, + /* 8 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F, + /* 9 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F, + /* A */ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7, 0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF, + /* B */ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7, 0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF, + /* C */ 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00E6, 0x00C7, 0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF, + /* D */ 0x00F0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7, 0x00F8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00FE, 0x00DF, + /* E */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7, 0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF, + /* F */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7, 0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF, +}; + +/* The lower case table consists of a 256-entry high-byte table followed by some number of + 256-entry subtables. The high-byte table contains either an offset to the subtable for + characters with that high byte or zero, which means that there are no case mappings or + ignored characters in that block. Ignored characters are mapped to zero. + */ + +u_int16_t gLowerCaseTable[] = { + + /* High-byte indices ( == 0 iff no case mapping and no ignorables ) */ + + /* 0 */ 0x0000, 0x0100, 0x0000, 0x0200, 0x0300, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 1 */ 0x0500, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 2 */ 0x0600, 0x0700, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 3 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 4 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 5 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 6 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 7 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 9 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* A */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* B */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* C */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* D */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* E */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* F */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0800, 0x0900, + + /* Table 1 (for high byte 0x01) */ + + /* 0 */ 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107, 0x0108, 0x0109, 0x010A, 0x010B, 0x010C, 0x010D, 0x010E, 0x010F, + /* 1 */ 0x0111, 0x0111, 0x0112, 0x0113, 0x0114, 0x0115, 0x0116, 0x0117, 0x0118, 0x0119, 0x011A, 0x011B, 0x011C, 0x011D, 0x011E, 0x011F, + /* 2 */ 0x0120, 0x0121, 0x0122, 0x0123, 0x0124, 0x0125, 0x0127, 0x0127, 0x0128, 0x0129, 0x012A, 0x012B, 0x012C, 0x012D, 0x012E, 0x012F, + /* 3 */ 0x0130, 0x0131, 0x0133, 0x0133, 0x0134, 0x0135, 0x0136, 0x0137, 0x0138, 0x0139, 0x013A, 0x013B, 0x013C, 0x013D, 0x013E, 0x0140, + /* 4 */ 0x0140, 0x0142, 0x0142, 0x0143, 0x0144, 0x0145, 0x0146, 0x0147, 0x0148, 0x0149, 0x014B, 0x014B, 0x014C, 0x014D, 0x014E, 0x014F, + /* 5 */ 0x0150, 0x0151, 0x0153, 0x0153, 0x0154, 0x0155, 0x0156, 0x0157, 0x0158, 0x0159, 0x015A, 0x015B, 0x015C, 0x015D, 0x015E, 0x015F, + /* 6 */ 0x0160, 0x0161, 0x0162, 0x0163, 0x0164, 0x0165, 0x0167, 0x0167, 0x0168, 0x0169, 0x016A, 0x016B, 0x016C, 0x016D, 0x016E, 0x016F, + /* 7 */ 0x0170, 0x0171, 0x0172, 0x0173, 0x0174, 0x0175, 0x0176, 0x0177, 0x0178, 0x0179, 0x017A, 0x017B, 0x017C, 0x017D, 0x017E, 0x017F, + /* 8 */ 0x0180, 0x0253, 0x0183, 0x0183, 0x0185, 0x0185, 0x0254, 0x0188, 0x0188, 0x0256, 0x0257, 0x018C, 0x018C, 0x018D, 0x01DD, 0x0259, + /* 9 */ 0x025B, 0x0192, 0x0192, 0x0260, 0x0263, 0x0195, 0x0269, 0x0268, 0x0199, 0x0199, 0x019A, 0x019B, 0x026F, 0x0272, 0x019E, 0x0275, + /* A */ 0x01A0, 0x01A1, 0x01A3, 0x01A3, 0x01A5, 0x01A5, 0x01A6, 0x01A8, 0x01A8, 0x0283, 0x01AA, 0x01AB, 0x01AD, 0x01AD, 0x0288, 0x01AF, + /* B */ 0x01B0, 0x028A, 0x028B, 0x01B4, 0x01B4, 0x01B6, 0x01B6, 0x0292, 0x01B9, 0x01B9, 0x01BA, 0x01BB, 0x01BD, 0x01BD, 0x01BE, 0x01BF, + /* C */ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C6, 0x01C6, 0x01C6, 0x01C9, 0x01C9, 0x01C9, 0x01CC, 0x01CC, 0x01CC, 0x01CD, 0x01CE, 0x01CF, + /* D */ 0x01D0, 0x01D1, 0x01D2, 0x01D3, 0x01D4, 0x01D5, 0x01D6, 0x01D7, 0x01D8, 0x01D9, 0x01DA, 0x01DB, 0x01DC, 0x01DD, 0x01DE, 0x01DF, + /* E */ 0x01E0, 0x01E1, 0x01E2, 0x01E3, 0x01E5, 0x01E5, 0x01E6, 0x01E7, 0x01E8, 0x01E9, 0x01EA, 0x01EB, 0x01EC, 0x01ED, 0x01EE, 0x01EF, + /* F */ 0x01F0, 0x01F3, 0x01F3, 0x01F3, 0x01F4, 0x01F5, 0x01F6, 0x01F7, 0x01F8, 0x01F9, 0x01FA, 0x01FB, 0x01FC, 0x01FD, 0x01FE, 0x01FF, + + /* Table 2 (for high byte 0x03) */ + + /* 0 */ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307, 0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F, + /* 1 */ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317, 0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F, + /* 2 */ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327, 0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F, + /* 3 */ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337, 0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F, + /* 4 */ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0345, 0x0346, 0x0347, 0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F, + /* 5 */ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357, 0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F, + /* 6 */ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367, 0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F, + /* 7 */ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377, 0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F, + /* 8 */ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386, 0x0387, 0x0388, 0x0389, 0x038A, 0x038B, 0x038C, 0x038D, 0x038E, 0x038F, + /* 9 */ 0x0390, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, + /* A */ 0x03C0, 0x03C1, 0x03A2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7, 0x03C8, 0x03C9, 0x03AA, 0x03AB, 0x03AC, 0x03AD, 0x03AE, 0x03AF, + /* B */ 0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, + /* C */ 0x03C0, 0x03C1, 0x03C2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7, 0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03CC, 0x03CD, 0x03CE, 0x03CF, + /* D */ 0x03D0, 0x03D1, 0x03D2, 0x03D3, 0x03D4, 0x03D5, 0x03D6, 0x03D7, 0x03D8, 0x03D9, 0x03DA, 0x03DB, 0x03DC, 0x03DD, 0x03DE, 0x03DF, + /* E */ 0x03E0, 0x03E1, 0x03E3, 0x03E3, 0x03E5, 0x03E5, 0x03E7, 0x03E7, 0x03E9, 0x03E9, 0x03EB, 0x03EB, 0x03ED, 0x03ED, 0x03EF, 0x03EF, + /* F */ 0x03F0, 0x03F1, 0x03F2, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7, 0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF, + + /* Table 3 (for high byte 0x04) */ + + /* 0 */ 0x0400, 0x0401, 0x0452, 0x0403, 0x0454, 0x0455, 0x0456, 0x0407, 0x0458, 0x0459, 0x045A, 0x045B, 0x040C, 0x040D, 0x040E, 0x045F, + /* 1 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0419, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, + /* 2 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, + /* 3 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, + /* 4 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, + /* 5 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457, 0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F, + /* 6 */ 0x0461, 0x0461, 0x0463, 0x0463, 0x0465, 0x0465, 0x0467, 0x0467, 0x0469, 0x0469, 0x046B, 0x046B, 0x046D, 0x046D, 0x046F, 0x046F, + /* 7 */ 0x0471, 0x0471, 0x0473, 0x0473, 0x0475, 0x0475, 0x0476, 0x0477, 0x0479, 0x0479, 0x047B, 0x047B, 0x047D, 0x047D, 0x047F, 0x047F, + /* 8 */ 0x0481, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048D, 0x048E, 0x048F, + /* 9 */ 0x0491, 0x0491, 0x0493, 0x0493, 0x0495, 0x0495, 0x0497, 0x0497, 0x0499, 0x0499, 0x049B, 0x049B, 0x049D, 0x049D, 0x049F, 0x049F, + /* A */ 0x04A1, 0x04A1, 0x04A3, 0x04A3, 0x04A5, 0x04A5, 0x04A7, 0x04A7, 0x04A9, 0x04A9, 0x04AB, 0x04AB, 0x04AD, 0x04AD, 0x04AF, 0x04AF, + /* B */ 0x04B1, 0x04B1, 0x04B3, 0x04B3, 0x04B5, 0x04B5, 0x04B7, 0x04B7, 0x04B9, 0x04B9, 0x04BB, 0x04BB, 0x04BD, 0x04BD, 0x04BF, 0x04BF, + /* C */ 0x04C0, 0x04C1, 0x04C2, 0x04C4, 0x04C4, 0x04C5, 0x04C6, 0x04C8, 0x04C8, 0x04C9, 0x04CA, 0x04CC, 0x04CC, 0x04CD, 0x04CE, 0x04CF, + /* D */ 0x04D0, 0x04D1, 0x04D2, 0x04D3, 0x04D4, 0x04D5, 0x04D6, 0x04D7, 0x04D8, 0x04D9, 0x04DA, 0x04DB, 0x04DC, 0x04DD, 0x04DE, 0x04DF, + /* E */ 0x04E0, 0x04E1, 0x04E2, 0x04E3, 0x04E4, 0x04E5, 0x04E6, 0x04E7, 0x04E8, 0x04E9, 0x04EA, 0x04EB, 0x04EC, 0x04ED, 0x04EE, 0x04EF, + /* F */ 0x04F0, 0x04F1, 0x04F2, 0x04F3, 0x04F4, 0x04F5, 0x04F6, 0x04F7, 0x04F8, 0x04F9, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF, + + /* Table 4 (for high byte 0x05) */ + + /* 0 */ 0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507, 0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F, + /* 1 */ 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517, 0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F, + /* 2 */ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527, 0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F, + /* 3 */ 0x0530, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567, 0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F, + /* 4 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577, 0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F, + /* 5 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0557, 0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F, + /* 6 */ 0x0560, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567, 0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F, + /* 7 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577, 0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F, + /* 8 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0587, 0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F, + /* 9 */ 0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597, 0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F, + /* A */ 0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7, 0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF, + /* B */ 0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7, 0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF, + /* C */ 0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7, 0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF, + /* D */ 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7, 0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF, + /* E */ 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7, 0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF, + /* F */ 0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7, 0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF, + + /* Table 5 (for high byte 0x10) */ + + /* 0 */ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1008, 0x1009, 0x100A, 0x100B, 0x100C, 0x100D, 0x100E, 0x100F, + /* 1 */ 0x1010, 0x1011, 0x1012, 0x1013, 0x1014, 0x1015, 0x1016, 0x1017, 0x1018, 0x1019, 0x101A, 0x101B, 0x101C, 0x101D, 0x101E, 0x101F, + /* 2 */ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027, 0x1028, 0x1029, 0x102A, 0x102B, 0x102C, 0x102D, 0x102E, 0x102F, + /* 3 */ 0x1030, 0x1031, 0x1032, 0x1033, 0x1034, 0x1035, 0x1036, 0x1037, 0x1038, 0x1039, 0x103A, 0x103B, 0x103C, 0x103D, 0x103E, 0x103F, + /* 4 */ 0x1040, 0x1041, 0x1042, 0x1043, 0x1044, 0x1045, 0x1046, 0x1047, 0x1048, 0x1049, 0x104A, 0x104B, 0x104C, 0x104D, 0x104E, 0x104F, + /* 5 */ 0x1050, 0x1051, 0x1052, 0x1053, 0x1054, 0x1055, 0x1056, 0x1057, 0x1058, 0x1059, 0x105A, 0x105B, 0x105C, 0x105D, 0x105E, 0x105F, + /* 6 */ 0x1060, 0x1061, 0x1062, 0x1063, 0x1064, 0x1065, 0x1066, 0x1067, 0x1068, 0x1069, 0x106A, 0x106B, 0x106C, 0x106D, 0x106E, 0x106F, + /* 7 */ 0x1070, 0x1071, 0x1072, 0x1073, 0x1074, 0x1075, 0x1076, 0x1077, 0x1078, 0x1079, 0x107A, 0x107B, 0x107C, 0x107D, 0x107E, 0x107F, + /* 8 */ 0x1080, 0x1081, 0x1082, 0x1083, 0x1084, 0x1085, 0x1086, 0x1087, 0x1088, 0x1089, 0x108A, 0x108B, 0x108C, 0x108D, 0x108E, 0x108F, + /* 9 */ 0x1090, 0x1091, 0x1092, 0x1093, 0x1094, 0x1095, 0x1096, 0x1097, 0x1098, 0x1099, 0x109A, 0x109B, 0x109C, 0x109D, 0x109E, 0x109F, + /* A */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7, 0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF, + /* B */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7, 0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF, + /* C */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10C6, 0x10C7, 0x10C8, 0x10C9, 0x10CA, 0x10CB, 0x10CC, 0x10CD, 0x10CE, 0x10CF, + /* D */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7, 0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF, + /* E */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7, 0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF, + /* F */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10F6, 0x10F7, 0x10F8, 0x10F9, 0x10FA, 0x10FB, 0x10FC, 0x10FD, 0x10FE, 0x10FF, + + /* Table 6 (for high byte 0x20) */ + + /* 0 */ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x200B, 0x0000, 0x0000, 0x0000, 0x0000, + /* 1 */ 0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2016, 0x2017, 0x2018, 0x2019, 0x201A, 0x201B, 0x201C, 0x201D, 0x201E, 0x201F, + /* 2 */ 0x2020, 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027, 0x2028, 0x2029, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x202F, + /* 3 */ 0x2030, 0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037, 0x2038, 0x2039, 0x203A, 0x203B, 0x203C, 0x203D, 0x203E, 0x203F, + /* 4 */ 0x2040, 0x2041, 0x2042, 0x2043, 0x2044, 0x2045, 0x2046, 0x2047, 0x2048, 0x2049, 0x204A, 0x204B, 0x204C, 0x204D, 0x204E, 0x204F, + /* 5 */ 0x2050, 0x2051, 0x2052, 0x2053, 0x2054, 0x2055, 0x2056, 0x2057, 0x2058, 0x2059, 0x205A, 0x205B, 0x205C, 0x205D, 0x205E, 0x205F, + /* 6 */ 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x2065, 0x2066, 0x2067, 0x2068, 0x2069, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 7 */ 0x2070, 0x2071, 0x2072, 0x2073, 0x2074, 0x2075, 0x2076, 0x2077, 0x2078, 0x2079, 0x207A, 0x207B, 0x207C, 0x207D, 0x207E, 0x207F, + /* 8 */ 0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087, 0x2088, 0x2089, 0x208A, 0x208B, 0x208C, 0x208D, 0x208E, 0x208F, + /* 9 */ 0x2090, 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, 0x2097, 0x2098, 0x2099, 0x209A, 0x209B, 0x209C, 0x209D, 0x209E, 0x209F, + /* A */ 0x20A0, 0x20A1, 0x20A2, 0x20A3, 0x20A4, 0x20A5, 0x20A6, 0x20A7, 0x20A8, 0x20A9, 0x20AA, 0x20AB, 0x20AC, 0x20AD, 0x20AE, 0x20AF, + /* B */ 0x20B0, 0x20B1, 0x20B2, 0x20B3, 0x20B4, 0x20B5, 0x20B6, 0x20B7, 0x20B8, 0x20B9, 0x20BA, 0x20BB, 0x20BC, 0x20BD, 0x20BE, 0x20BF, + /* C */ 0x20C0, 0x20C1, 0x20C2, 0x20C3, 0x20C4, 0x20C5, 0x20C6, 0x20C7, 0x20C8, 0x20C9, 0x20CA, 0x20CB, 0x20CC, 0x20CD, 0x20CE, 0x20CF, + /* D */ 0x20D0, 0x20D1, 0x20D2, 0x20D3, 0x20D4, 0x20D5, 0x20D6, 0x20D7, 0x20D8, 0x20D9, 0x20DA, 0x20DB, 0x20DC, 0x20DD, 0x20DE, 0x20DF, + /* E */ 0x20E0, 0x20E1, 0x20E2, 0x20E3, 0x20E4, 0x20E5, 0x20E6, 0x20E7, 0x20E8, 0x20E9, 0x20EA, 0x20EB, 0x20EC, 0x20ED, 0x20EE, 0x20EF, + /* F */ 0x20F0, 0x20F1, 0x20F2, 0x20F3, 0x20F4, 0x20F5, 0x20F6, 0x20F7, 0x20F8, 0x20F9, 0x20FA, 0x20FB, 0x20FC, 0x20FD, 0x20FE, 0x20FF, + + /* Table 7 (for high byte 0x21) */ + + /* 0 */ 0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107, 0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F, + /* 1 */ 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117, 0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F, + /* 2 */ 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127, 0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F, + /* 3 */ 0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137, 0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F, + /* 4 */ 0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147, 0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F, + /* 5 */ 0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157, 0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F, + /* 6 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177, 0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F, + /* 7 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177, 0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F, + /* 8 */ 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187, 0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F, + /* 9 */ 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197, 0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F, + /* A */ 0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7, 0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF, + /* B */ 0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7, 0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF, + /* C */ 0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7, 0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF, + /* D */ 0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7, 0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF, + /* E */ 0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7, 0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF, + /* F */ 0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7, 0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF, + + /* Table 8 (for high byte 0xFE) */ + + /* 0 */ 0xFE00, 0xFE01, 0xFE02, 0xFE03, 0xFE04, 0xFE05, 0xFE06, 0xFE07, 0xFE08, 0xFE09, 0xFE0A, 0xFE0B, 0xFE0C, 0xFE0D, 0xFE0E, 0xFE0F, + /* 1 */ 0xFE10, 0xFE11, 0xFE12, 0xFE13, 0xFE14, 0xFE15, 0xFE16, 0xFE17, 0xFE18, 0xFE19, 0xFE1A, 0xFE1B, 0xFE1C, 0xFE1D, 0xFE1E, 0xFE1F, + /* 2 */ 0xFE20, 0xFE21, 0xFE22, 0xFE23, 0xFE24, 0xFE25, 0xFE26, 0xFE27, 0xFE28, 0xFE29, 0xFE2A, 0xFE2B, 0xFE2C, 0xFE2D, 0xFE2E, 0xFE2F, + /* 3 */ 0xFE30, 0xFE31, 0xFE32, 0xFE33, 0xFE34, 0xFE35, 0xFE36, 0xFE37, 0xFE38, 0xFE39, 0xFE3A, 0xFE3B, 0xFE3C, 0xFE3D, 0xFE3E, 0xFE3F, + /* 4 */ 0xFE40, 0xFE41, 0xFE42, 0xFE43, 0xFE44, 0xFE45, 0xFE46, 0xFE47, 0xFE48, 0xFE49, 0xFE4A, 0xFE4B, 0xFE4C, 0xFE4D, 0xFE4E, 0xFE4F, + /* 5 */ 0xFE50, 0xFE51, 0xFE52, 0xFE53, 0xFE54, 0xFE55, 0xFE56, 0xFE57, 0xFE58, 0xFE59, 0xFE5A, 0xFE5B, 0xFE5C, 0xFE5D, 0xFE5E, 0xFE5F, + /* 6 */ 0xFE60, 0xFE61, 0xFE62, 0xFE63, 0xFE64, 0xFE65, 0xFE66, 0xFE67, 0xFE68, 0xFE69, 0xFE6A, 0xFE6B, 0xFE6C, 0xFE6D, 0xFE6E, 0xFE6F, + /* 7 */ 0xFE70, 0xFE71, 0xFE72, 0xFE73, 0xFE74, 0xFE75, 0xFE76, 0xFE77, 0xFE78, 0xFE79, 0xFE7A, 0xFE7B, 0xFE7C, 0xFE7D, 0xFE7E, 0xFE7F, + /* 8 */ 0xFE80, 0xFE81, 0xFE82, 0xFE83, 0xFE84, 0xFE85, 0xFE86, 0xFE87, 0xFE88, 0xFE89, 0xFE8A, 0xFE8B, 0xFE8C, 0xFE8D, 0xFE8E, 0xFE8F, + /* 9 */ 0xFE90, 0xFE91, 0xFE92, 0xFE93, 0xFE94, 0xFE95, 0xFE96, 0xFE97, 0xFE98, 0xFE99, 0xFE9A, 0xFE9B, 0xFE9C, 0xFE9D, 0xFE9E, 0xFE9F, + /* A */ 0xFEA0, 0xFEA1, 0xFEA2, 0xFEA3, 0xFEA4, 0xFEA5, 0xFEA6, 0xFEA7, 0xFEA8, 0xFEA9, 0xFEAA, 0xFEAB, 0xFEAC, 0xFEAD, 0xFEAE, 0xFEAF, + /* B */ 0xFEB0, 0xFEB1, 0xFEB2, 0xFEB3, 0xFEB4, 0xFEB5, 0xFEB6, 0xFEB7, 0xFEB8, 0xFEB9, 0xFEBA, 0xFEBB, 0xFEBC, 0xFEBD, 0xFEBE, 0xFEBF, + /* C */ 0xFEC0, 0xFEC1, 0xFEC2, 0xFEC3, 0xFEC4, 0xFEC5, 0xFEC6, 0xFEC7, 0xFEC8, 0xFEC9, 0xFECA, 0xFECB, 0xFECC, 0xFECD, 0xFECE, 0xFECF, + /* D */ 0xFED0, 0xFED1, 0xFED2, 0xFED3, 0xFED4, 0xFED5, 0xFED6, 0xFED7, 0xFED8, 0xFED9, 0xFEDA, 0xFEDB, 0xFEDC, 0xFEDD, 0xFEDE, 0xFEDF, + /* E */ 0xFEE0, 0xFEE1, 0xFEE2, 0xFEE3, 0xFEE4, 0xFEE5, 0xFEE6, 0xFEE7, 0xFEE8, 0xFEE9, 0xFEEA, 0xFEEB, 0xFEEC, 0xFEED, 0xFEEE, 0xFEEF, + /* F */ 0xFEF0, 0xFEF1, 0xFEF2, 0xFEF3, 0xFEF4, 0xFEF5, 0xFEF6, 0xFEF7, 0xFEF8, 0xFEF9, 0xFEFA, 0xFEFB, 0xFEFC, 0xFEFD, 0xFEFE, 0x0000, + + /* Table 9 (for high byte 0xFF) */ + + /* 0 */ 0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07, 0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F, + /* 1 */ 0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17, 0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F, + /* 2 */ 0xFF20, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47, 0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F, + /* 3 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57, 0xFF58, 0xFF59, 0xFF5A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F, + /* 4 */ 0xFF40, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47, 0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F, + /* 5 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57, 0xFF58, 0xFF59, 0xFF5A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F, + /* 6 */ 0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67, 0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F, + /* 7 */ 0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77, 0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F, + /* 8 */ 0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87, 0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F, + /* 9 */ 0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97, 0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F, + /* A */ 0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7, 0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF, + /* B */ 0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7, 0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF, + /* C */ 0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7, 0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF, + /* D */ 0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7, 0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF, + /* E */ 0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7, 0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF, + /* F */ 0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7, 0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF, +}; + + +/* RelString case folding table */ + +unsigned short gCompareTable[] = { + + /* 0 */ 0x0000, 0x0100, 0x0200, 0x0300, 0x0400, 0x0500, 0x0600, 0x0700, 0x0800, 0x0900, 0x0A00, 0x0B00, 0x0C00, 0x0D00, 0x0E00, 0x0F00, + /* 1 */ 0x1000, 0x1100, 0x1200, 0x1300, 0x1400, 0x1500, 0x1600, 0x1700, 0x1800, 0x1900, 0x1A00, 0x1B00, 0x1C00, 0x1D00, 0x1E00, 0x1F00, + /* 2 */ 0x2000, 0x2100, 0x2200, 0x2300, 0x2400, 0x2500, 0x2600, 0x2700, 0x2800, 0x2900, 0x2A00, 0x2B00, 0x2C00, 0x2D00, 0x2E00, 0x2F00, + /* 3 */ 0x3000, 0x3100, 0x3200, 0x3300, 0x3400, 0x3500, 0x3600, 0x3700, 0x3800, 0x3900, 0x3A00, 0x3B00, 0x3C00, 0x3D00, 0x3E00, 0x3F00, + /* 4 */ 0x4000, 0x4100, 0x4200, 0x4300, 0x4400, 0x4500, 0x4600, 0x4700, 0x4800, 0x4900, 0x4A00, 0x4B00, 0x4C00, 0x4D00, 0x4E00, 0x4F00, + /* 5 */ 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5A00, 0x5B00, 0x5C00, 0x5D00, 0x5E00, 0x5F00, + + // 0x60 maps to 'a' + // range 0x61 to 0x7a ('a' to 'z') map to upper case + + /* 6 */ 0x4180, 0x4100, 0x4200, 0x4300, 0x4400, 0x4500, 0x4600, 0x4700, 0x4800, 0x4900, 0x4A00, 0x4B00, 0x4C00, 0x4D00, 0x4E00, 0x4F00, + /* 7 */ 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5A00, 0x7B00, 0x7C00, 0x7D00, 0x7E00, 0x7F00, + + // range 0x80 to 0xd8 gets mapped... + + /* 8 */ 0x4108, 0x410C, 0x4310, 0x4502, 0x4E0A, 0x4F08, 0x5508, 0x4182, 0x4104, 0x4186, 0x4108, 0x410A, 0x410C, 0x4310, 0x4502, 0x4584, + /* 9 */ 0x4586, 0x4588, 0x4982, 0x4984, 0x4986, 0x4988, 0x4E0A, 0x4F82, 0x4F84, 0x4F86, 0x4F08, 0x4F0A, 0x5582, 0x5584, 0x5586, 0x5508, + /* A */ 0xA000, 0xA100, 0xA200, 0xA300, 0xA400, 0xA500, 0xA600, 0x5382, 0xA800, 0xA900, 0xAA00, 0xAB00, 0xAC00, 0xAD00, 0x4114, 0x4F0E, + /* B */ 0xB000, 0xB100, 0xB200, 0xB300, 0xB400, 0xB500, 0xB600, 0xB700, 0xB800, 0xB900, 0xBA00, 0x4192, 0x4F92, 0xBD00, 0x4114, 0x4F0E, + /* C */ 0xC000, 0xC100, 0xC200, 0xC300, 0xC400, 0xC500, 0xC600, 0x2206, 0x2208, 0xC900, 0x2000, 0x4104, 0x410A, 0x4F0A, 0x4F14, 0x4F14, + /* D */ 0xD000, 0xD100, 0x2202, 0x2204, 0x2702, 0x2704, 0xD600, 0xD700, 0x5988, 0xD900, 0xDA00, 0xDB00, 0xDC00, 0xDD00, 0xDE00, 0xDF00, + + /* E */ 0xE000, 0xE100, 0xE200, 0xE300, 0xE400, 0xE500, 0xE600, 0xE700, 0xE800, 0xE900, 0xEA00, 0xEB00, 0xEC00, 0xED00, 0xEE00, 0xEF00, + /* F */ 0xF000, 0xF100, 0xF200, 0xF300, 0xF400, 0xF500, 0xF600, 0xF700, 0xF800, 0xF900, 0xFA00, 0xFB00, 0xFC00, 0xFD00, 0xFE00, 0xFF00, + +}; + +#endif /* lf_hfs_ucs_string_cmp_data_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_unicode_wrappers.c b/livefiles_hfs_plugin/lf_hfs_unicode_wrappers.c new file mode 100644 index 0000000..652db6e --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_unicode_wrappers.c @@ -0,0 +1,401 @@ +// +// lf_hfs_unicode_wrappers.c +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#include "lf_hfs_unicode_wrappers.h" +#include "lf_hfs_ucs_string_cmp_data.h" +#include "lf_hfs_sbunicode.h" + + + +enum { + kMinFileExtensionChars = 1, /* does not include dot */ + kMaxFileExtensionChars = 5 /* does not include dot */ +}; + + +#define EXTENSIONCHAR(c) (((c) >= 0x61 && (c) <= 0x7A) || \ + ((c) >= 0x41 && (c) <= 0x5A) || \ + ((c) >= 0x30 && (c) <= 0x39)) + + +#define IsHexDigit(c) (((c) >= (u_int8_t) '0' && (c) <= (u_int8_t) '9') || \ + ((c) >= (u_int8_t) 'A' && (c) <= (u_int8_t) 'F')) + + +static void +GetFilenameExtension( ItemCount length, ConstUniCharArrayPtr unicodeStr, char* extStr ); + + +static u_int32_t +HexStringToInteger( u_int32_t length, const u_int8_t *hexStr ); + + +/* + * Get filename extension (if any) as a C string + */ +static void +GetFilenameExtension(ItemCount length, ConstUniCharArrayPtr unicodeStr, char * extStr) +{ + u_int64_t i; + UniChar c; + u_int16_t extChars; /* number of extension chars (excluding dot) */ + u_int16_t maxExtChars; + Boolean foundExtension; + + extStr[0] = '\0'; /* assume there's no extension */ + + if ( length < 3 ) + return; /* "x.y" is smallest possible extension */ + + if ( length < (kMaxFileExtensionChars + 2) ) + maxExtChars = length - 2; /* save room for prefix + dot */ + else + maxExtChars = kMaxFileExtensionChars; + + i = length; + extChars = 0; + foundExtension = false; + + while ( extChars <= maxExtChars ) { + c = unicodeStr[--i]; + + /* look for leading dot */ + if ( c == (UniChar) '.' ) { + if ( extChars > 0 ) /* cannot end with a dot */ + foundExtension = true; + break; + } + + if ( EXTENSIONCHAR(c) ) + ++extChars; + else + break; + } + + /* if we found one then copy it */ + if ( foundExtension ) { + u_int8_t *extStrPtr = (u_int8_t *)extStr; + const UniChar *unicodeStrPtr = &unicodeStr[i]; + + for ( i = 0; i <= extChars; ++i ) + *(extStrPtr++) = (u_int8_t) *(unicodeStrPtr++); + extStr[extChars + 1] = '\0'; /* terminate extension + dot */ + } +} + +// +// FastUnicodeCompare - Compare two Unicode strings; produce a relative ordering +// +// IF RESULT +// -------------------------- +// str1 < str2 => -1 +// str1 = str2 => 0 +// str1 > str2 => +1 +// +// The lower case table starts with 256 entries (one for each of the upper bytes +// of the original Unicode char). If that entry is zero, then all characters with +// that upper byte are already case folded. If the entry is non-zero, then it is +// the _index_ (not byte offset) of the start of the sub-table for the characters +// with that upper byte. All ignorable characters are folded to the value zero. +// +// In pseudocode: +// +// Let c = source Unicode character +// Let table[] = lower case table +// +// lower = table[highbyte(c)] +// if (lower == 0) +// lower = c +// else +// lower = table[lower+lowbyte(c)] +// +// if (lower == 0) +// ignore this character +// +// To handle ignorable characters, we now need a loop to find the next valid character. +// Also, we can't pre-compute the number of characters to compare; the string length might +// be larger than the number of non-ignorable characters. Further, we must be able to handle +// ignorable characters at any point in the string, including as the first or last characters. +// We use a zero value as a sentinel to detect both end-of-string and ignorable characters. +// Since the File Manager doesn't prevent the NUL character (value zero) as part of a filename, +// the case mapping table is assumed to map u+0000 to some non-zero value (like 0xFFFF, which is +// an invalid Unicode character). +// +// Pseudocode: +// +// while (1) { +// c1 = GetNextValidChar(str1) // returns zero if at end of string +// c2 = GetNextValidChar(str2) +// +// if (c1 != c2) break // found a difference +// +// if (c1 == 0) // reached end of string on both strings at once? +// return 0; // yes, so strings are equal +// } +// +// // When we get here, c1 != c2. So, we just need to determine which one is less. +// if (c1 < c2) +// return -1; +// else +// return 1; +// + +int32_t FastUnicodeCompare ( register ConstUniCharArrayPtr str1, register ItemCount length1, + register ConstUniCharArrayPtr str2, register ItemCount length2) +{ + register u_int16_t c1,c2; + register u_int16_t temp; + register u_int16_t* lowerCaseTable; + + lowerCaseTable = (u_int16_t*) gLowerCaseTable; + + while (1) { + /* Set default values for c1, c2 in case there are no more valid chars */ + c1 = 0; + c2 = 0; + + /* Find next non-ignorable char from str1, or zero if no more */ + while (length1 && c1 == 0) { + c1 = *(str1++); + --length1; + /* check for basic latin first */ + if (c1 < 0x0100) { + c1 = gLatinCaseFold[c1]; + break; + } + /* case fold if neccessary */ + if ((temp = lowerCaseTable[c1>>8]) != 0) + c1 = lowerCaseTable[temp + (c1 & 0x00FF)]; + } + + + /* Find next non-ignorable char from str2, or zero if no more */ + while (length2 && c2 == 0) { + c2 = *(str2++); + --length2; + /* check for basic latin first */ + if (c2 < 0x0100) { + c2 = gLatinCaseFold[c2]; + break; + } + /* case fold if neccessary */ + if ((temp = lowerCaseTable[c2>>8]) != 0) + c2 = lowerCaseTable[temp + (c2 & 0x00FF)]; + } + + if (c1 != c2) // found a difference, so stop looping + break; + + if (c1 == 0) // did we reach the end of both strings at the same time? + return 0; // yes, so strings are equal + } + + if (c1 < c2) + return -1; + else + return 1; +} + + +/* + * UnicodeBinaryCompare + * Compare two UTF-16 strings and perform case-sensitive (binary) matching against them. + * + * Results are emitted like FastUnicodeCompare: + * + * + * IF RESULT + * -------------------------- + * str1 < str2 => -1 + * str1 = str2 => 0 + * str1 > str2 => +1 + * + * The case matching source code is greatly simplified due to the lack of case-folding + * in this comparison routine. We compare, in order: the lengths, then do character-by- + * character comparisons. + * + */ +int32_t UnicodeBinaryCompare (register ConstUniCharArrayPtr str1, register ItemCount len1, + register ConstUniCharArrayPtr str2, register ItemCount len2) { + uint16_t c1 =0; + uint16_t c2 =0; + ItemCount string_length; + int32_t result = 0; + + /* First generate the string length (for comparison purposes) */ + if (len1 < len2) { + string_length = len1; + --result; + } + else if (len1 > len2) { + string_length = len2; + ++result; + } + else { + string_length = len1; + } + + /* now compare the two string pointers */ + while (string_length--) { + c1 = *(str1++); + c2 = *(str2++); + + if (c1 > c2) { + result = 1; + break; + } + + if (c1 < c2) { + result = -1; + break; + } + /* If equal, iterate to the next two respective chars */ + } + + return result; +} + +/* + * extract the file id from a mangled name + */ +HFSCatalogNodeID +GetEmbeddedFileID(const unsigned char * filename, u_int32_t length, u_int32_t *prefixLength) +{ + short extChars; + short i; + u_int8_t c; + + *prefixLength = 0; + + if ( filename == NULL ) + return 0; + + if ( length < 28 ) + return 0; /* too small to have been mangled */ + + /* big enough for a file ID (#10) and an extension (.x) ? */ + if ( length > 5 ) + extChars = CountFilenameExtensionChars(filename, length); + else + extChars = 0; + + /* skip over dot plus extension characters */ + if ( extChars > 0 ) + length -= (extChars + 1); + + /* scan for file id digits */ + for ( i = length - 1; i >= 0; --i) { + c = filename[i]; + + /* look for file ID marker */ + if ( c == '#' ) { + if ( (length - i) < 3 ) + break; /* too small to be a file ID */ + + *prefixLength = i; + return HexStringToInteger(length - i - 1, &filename[i+1]); + } + + if ( !IsHexDigit(c) ) + break; /* file ID string must have hex digits */ + } + + return 0; +} + +/* + * Count filename extension characters (if any) + */ +u_int32_t +CountFilenameExtensionChars( const unsigned char * filename, u_int32_t length ) +{ + UniChar c; + u_int16_t maxExtChars; + + if ( length < 3 ) + return 0; /* "x.y" is smallest possible extension */ + + if ( length < (kMaxFileExtensionChars + 2) ) + maxExtChars = length - 2; /* save room for prefix + dot */ + else + maxExtChars = kMaxFileExtensionChars; + + u_int32_t extChars = 0; /* number of extension chars (excluding dot) - assume there's no extension */ + u_int32_t i = length - 1; /* index to last ascii character */ + + while ( extChars <= maxExtChars ) { + c = filename[i--]; + + /* look for leading dot */ + if ( c == (u_int8_t) '.' ) { + if ( extChars > 0 ) /* cannot end with a dot */ + return (extChars); + + break; + } + + if ( EXTENSIONCHAR(c) ) + ++extChars; + else + break; + } + + return 0; +} + +static u_int32_t +HexStringToInteger(u_int32_t length, const u_int8_t *hexStr) +{ + u_int32_t value; + u_int32_t i; + u_int8_t c; + const u_int8_t *p; + + value = 0; + p = hexStr; + + for ( i = 0; i < length; ++i ) { + c = *p++; + + if (c >= '0' && c <= '9') { + value = value << 4; + value += (u_int32_t) c - (u_int32_t) '0'; + } else if (c >= 'A' && c <= 'F') { + value = value << 4; + value += 10 + ((unsigned int) c - (unsigned int) 'A'); + } else { + return 0; /* bad character */ + } + } + + return value; +} + +OSErr +ConvertUnicodeToUTF8Mangled(ByteCount srcLen, ConstUniCharArrayPtr srcStr, ByteCount maxDstLen, + ByteCount *actualDstLen, unsigned char* dstStr, HFSCatalogNodeID cnid) +{ + ByteCount subMaxLen; + size_t utf8len; + char fileIDStr[15]; + char extStr[15]; + + snprintf(fileIDStr, sizeof(fileIDStr), "#%X", cnid); + GetFilenameExtension(srcLen/sizeof(UniChar), srcStr, extStr); + + /* remove extension chars from source */ + srcLen -= strlen(extStr) * sizeof(UniChar); + subMaxLen = maxDstLen - (strlen(extStr) + strlen(fileIDStr)); + + (void) utf8_encodestr(srcStr, srcLen, dstStr, &utf8len, subMaxLen, ':', UTF_ADD_NULL_TERM); + + strlcat((char *)dstStr, fileIDStr, maxDstLen); + strlcat((char *)dstStr, extStr, maxDstLen); + *actualDstLen = utf8len + (strlen(extStr) + strlen(fileIDStr)); + + return noErr; +} diff --git a/livefiles_hfs_plugin/lf_hfs_unicode_wrappers.h b/livefiles_hfs_plugin/lf_hfs_unicode_wrappers.h new file mode 100644 index 0000000..3833e57 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_unicode_wrappers.h @@ -0,0 +1,28 @@ +// +// lf_hfs_unicode_wrappers.h +// livefiles_hfs +// +// Created by Yakov Ben Zaken on 22/03/2018. +// + +#ifndef lf_hfs_unicode_wrappers_h +#define lf_hfs_unicode_wrappers_h + +#include +#include "lf_hfs_defs.h" +#include "lf_hfs_file_mgr_internal.h" + +int32_t FastUnicodeCompare ( register ConstUniCharArrayPtr str1, register ItemCount len1, register ConstUniCharArrayPtr str2, register ItemCount len2); + +int32_t UnicodeBinaryCompare ( register ConstUniCharArrayPtr str1, register ItemCount len1, register ConstUniCharArrayPtr str2, register ItemCount len2 ); + +HFSCatalogNodeID GetEmbeddedFileID( ConstStr31Param filename, u_int32_t length, u_int32_t *prefixLength ); + +OSErr ConvertUnicodeToUTF8Mangled(ByteCount srcLen, ConstUniCharArrayPtr srcStr, ByteCount maxDstLen, + ByteCount *actualDstLen, unsigned char* dstStr, HFSCatalogNodeID cnid); + +u_int32_t +CountFilenameExtensionChars( const unsigned char * filename, u_int32_t length ); + + +#endif /* lf_hfs_unicode_wrappers_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_utfconvdata.h b/livefiles_hfs_plugin/lf_hfs_utfconvdata.h new file mode 100644 index 0000000..2f6c9dd --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_utfconvdata.h @@ -0,0 +1,1713 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_utfconvdata.h + * livefiles_hfs + * + * Created by Oded Shoshani on 31/1/18. + */ + +#ifndef lf_hfs_utfconvdata_h +#define lf_hfs_utfconvdata_h +/* + * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +/* + Derived from Core Foundation headers: + + CFUniCharDecompData.h + CFUniCharPrecompData.h + CFUniCharNonBaseData.h + */ + +static const u_int16_t +__CFUniCharDecompositionTable[] = { + 0x00C0, 0x2000, 0x00C1, 0x2002, 0x00C2, 0x2004, 0x00C3, 0x2006, + 0x00C4, 0x2008, 0x00C5, 0x200A, 0x00C7, 0x200C, 0x00C8, 0x200E, + 0x00C9, 0x2010, 0x00CA, 0x2012, 0x00CB, 0x2014, 0x00CC, 0x2016, + 0x00CD, 0x2018, 0x00CE, 0x201A, 0x00CF, 0x201C, 0x00D1, 0x201E, + 0x00D2, 0x2020, 0x00D3, 0x2022, 0x00D4, 0x2024, 0x00D5, 0x2026, + 0x00D6, 0x2028, 0x00D9, 0x202A, 0x00DA, 0x202C, 0x00DB, 0x202E, + 0x00DC, 0x2030, 0x00DD, 0x2032, 0x00E0, 0x2034, 0x00E1, 0x2036, + 0x00E2, 0x2038, 0x00E3, 0x203A, 0x00E4, 0x203C, 0x00E5, 0x203E, + 0x00E7, 0x2040, 0x00E8, 0x2042, 0x00E9, 0x2044, 0x00EA, 0x2046, + 0x00EB, 0x2048, 0x00EC, 0x204A, 0x00ED, 0x204C, 0x00EE, 0x204E, + 0x00EF, 0x2050, 0x00F1, 0x2052, 0x00F2, 0x2054, 0x00F3, 0x2056, + 0x00F4, 0x2058, 0x00F5, 0x205A, 0x00F6, 0x205C, 0x00F9, 0x205E, + 0x00FA, 0x2060, 0x00FB, 0x2062, 0x00FC, 0x2064, 0x00FD, 0x2066, + 0x00FF, 0x2068, 0x0100, 0x206A, 0x0101, 0x206C, 0x0102, 0x206E, + 0x0103, 0x2070, 0x0104, 0x2072, 0x0105, 0x2074, 0x0106, 0x2076, + 0x0107, 0x2078, 0x0108, 0x207A, 0x0109, 0x207C, 0x010A, 0x207E, + 0x010B, 0x2080, 0x010C, 0x2082, 0x010D, 0x2084, 0x010E, 0x2086, + 0x010F, 0x2088, 0x0112, 0x208A, 0x0113, 0x208C, 0x0114, 0x208E, + 0x0115, 0x2090, 0x0116, 0x2092, 0x0117, 0x2094, 0x0118, 0x2096, + 0x0119, 0x2098, 0x011A, 0x209A, 0x011B, 0x209C, 0x011C, 0x209E, + 0x011D, 0x20A0, 0x011E, 0x20A2, 0x011F, 0x20A4, 0x0120, 0x20A6, + 0x0121, 0x20A8, 0x0122, 0x20AA, 0x0123, 0x20AC, 0x0124, 0x20AE, + 0x0125, 0x20B0, 0x0128, 0x20B2, 0x0129, 0x20B4, 0x012A, 0x20B6, + 0x012B, 0x20B8, 0x012C, 0x20BA, 0x012D, 0x20BC, 0x012E, 0x20BE, + 0x012F, 0x20C0, 0x0130, 0x20C2, 0x0134, 0x20C4, 0x0135, 0x20C6, + 0x0136, 0x20C8, 0x0137, 0x20CA, 0x0139, 0x20CC, 0x013A, 0x20CE, + 0x013B, 0x20D0, 0x013C, 0x20D2, 0x013D, 0x20D4, 0x013E, 0x20D6, + 0x0143, 0x20D8, 0x0144, 0x20DA, 0x0145, 0x20DC, 0x0146, 0x20DE, + 0x0147, 0x20E0, 0x0148, 0x20E2, 0x014C, 0x20E4, 0x014D, 0x20E6, + 0x014E, 0x20E8, 0x014F, 0x20EA, 0x0150, 0x20EC, 0x0151, 0x20EE, + 0x0154, 0x20F0, 0x0155, 0x20F2, 0x0156, 0x20F4, 0x0157, 0x20F6, + 0x0158, 0x20F8, 0x0159, 0x20FA, 0x015A, 0x20FC, 0x015B, 0x20FE, + 0x015C, 0x2100, 0x015D, 0x2102, 0x015E, 0x2104, 0x015F, 0x2106, + 0x0160, 0x2108, 0x0161, 0x210A, 0x0162, 0x210C, 0x0163, 0x210E, + 0x0164, 0x2110, 0x0165, 0x2112, 0x0168, 0x2114, 0x0169, 0x2116, + 0x016A, 0x2118, 0x016B, 0x211A, 0x016C, 0x211C, 0x016D, 0x211E, + 0x016E, 0x2120, 0x016F, 0x2122, 0x0170, 0x2124, 0x0171, 0x2126, + 0x0172, 0x2128, 0x0173, 0x212A, 0x0174, 0x212C, 0x0175, 0x212E, + 0x0176, 0x2130, 0x0177, 0x2132, 0x0178, 0x2134, 0x0179, 0x2136, + 0x017A, 0x2138, 0x017B, 0x213A, 0x017C, 0x213C, 0x017D, 0x213E, + 0x017E, 0x2140, 0x01A0, 0x2142, 0x01A1, 0x2144, 0x01AF, 0x2146, + 0x01B0, 0x2148, 0x01CD, 0x214A, 0x01CE, 0x214C, 0x01CF, 0x214E, + 0x01D0, 0x2150, 0x01D1, 0x2152, 0x01D2, 0x2154, 0x01D3, 0x2156, + 0x01D4, 0x2158, 0x01D5, 0xA15A, 0x01D6, 0xA15C, 0x01D7, 0xA15E, + 0x01D8, 0xA160, 0x01D9, 0xA162, 0x01DA, 0xA164, 0x01DB, 0xA166, + 0x01DC, 0xA168, 0x01DE, 0xA16A, 0x01DF, 0xA16C, 0x01E0, 0xA16E, + 0x01E1, 0xA170, 0x01E2, 0x2172, 0x01E3, 0x2174, 0x01E6, 0x2176, + 0x01E7, 0x2178, 0x01E8, 0x217A, 0x01E9, 0x217C, 0x01EA, 0x217E, + 0x01EB, 0x2180, 0x01EC, 0xA182, 0x01ED, 0xA184, 0x01EE, 0x2186, + 0x01EF, 0x2188, 0x01F0, 0x218A, 0x01F4, 0x218C, 0x01F5, 0x218E, + 0x01F8, 0x2190, 0x01F9, 0x2192, 0x01FA, 0xA194, 0x01FB, 0xA196, + 0x01FC, 0x2198, 0x01FD, 0x219A, 0x01FE, 0x219C, 0x01FF, 0x219E, + 0x0200, 0x21A0, 0x0201, 0x21A2, 0x0202, 0x21A4, 0x0203, 0x21A6, + 0x0204, 0x21A8, 0x0205, 0x21AA, 0x0206, 0x21AC, 0x0207, 0x21AE, + 0x0208, 0x21B0, 0x0209, 0x21B2, 0x020A, 0x21B4, 0x020B, 0x21B6, + 0x020C, 0x21B8, 0x020D, 0x21BA, 0x020E, 0x21BC, 0x020F, 0x21BE, + 0x0210, 0x21C0, 0x0211, 0x21C2, 0x0212, 0x21C4, 0x0213, 0x21C6, + 0x0214, 0x21C8, 0x0215, 0x21CA, 0x0216, 0x21CC, 0x0217, 0x21CE, + 0x0218, 0x21D0, 0x0219, 0x21D2, 0x021A, 0x21D4, 0x021B, 0x21D6, + 0x021E, 0x21D8, 0x021F, 0x21DA, 0x0226, 0x21DC, 0x0227, 0x21DE, + 0x0228, 0x21E0, 0x0229, 0x21E2, 0x022A, 0xA1E4, 0x022B, 0xA1E6, + 0x022C, 0xA1E8, 0x022D, 0xA1EA, 0x022E, 0x21EC, 0x022F, 0x21EE, + 0x0230, 0xA1F0, 0x0231, 0xA1F2, 0x0232, 0x21F4, 0x0233, 0x21F6, + 0x0340, 0x1300, 0x0341, 0x1301, 0x0343, 0x1313, 0x0344, 0x21F8, + 0x0374, 0x12B9, 0x037E, 0x103B, 0x0385, 0x21FA, 0x0386, 0x21FC, + 0x0387, 0x10B7, 0x0388, 0x21FE, 0x0389, 0x2200, 0x038A, 0x2202, + 0x038C, 0x2204, 0x038E, 0x2206, 0x038F, 0x2208, 0x0390, 0xA20A, + 0x03AA, 0x220C, 0x03AB, 0x220E, 0x03AC, 0x2210, 0x03AD, 0x2212, + 0x03AE, 0x2214, 0x03AF, 0x2216, 0x03B0, 0xA218, 0x03CA, 0x221A, + 0x03CB, 0x221C, 0x03CC, 0x221E, 0x03CD, 0x2220, 0x03CE, 0x2222, + 0x03D3, 0x2224, 0x03D4, 0x2226, 0x0400, 0x2228, 0x0401, 0x222A, + 0x0403, 0x222C, 0x0407, 0x222E, 0x040C, 0x2230, 0x040D, 0x2232, + 0x040E, 0x2234, 0x0419, 0x2236, 0x0439, 0x2238, 0x0450, 0x223A, + 0x0451, 0x223C, 0x0453, 0x223E, 0x0457, 0x2240, 0x045C, 0x2242, + 0x045D, 0x2244, 0x045E, 0x2246, 0x0476, 0x2248, 0x0477, 0x224A, + 0x04C1, 0x224C, 0x04C2, 0x224E, 0x04D0, 0x2250, 0x04D1, 0x2252, + 0x04D2, 0x2254, 0x04D3, 0x2256, 0x04D6, 0x2258, 0x04D7, 0x225A, + 0x04DA, 0x225C, 0x04DB, 0x225E, 0x04DC, 0x2260, 0x04DD, 0x2262, + 0x04DE, 0x2264, 0x04DF, 0x2266, 0x04E2, 0x2268, 0x04E3, 0x226A, + 0x04E4, 0x226C, 0x04E5, 0x226E, 0x04E6, 0x2270, 0x04E7, 0x2272, + 0x04EA, 0x2274, 0x04EB, 0x2276, 0x04EC, 0x2278, 0x04ED, 0x227A, + 0x04EE, 0x227C, 0x04EF, 0x227E, 0x04F0, 0x2280, 0x04F1, 0x2282, + 0x04F2, 0x2284, 0x04F3, 0x2286, 0x04F4, 0x2288, 0x04F5, 0x228A, + 0x04F8, 0x228C, 0x04F9, 0x228E, 0x0622, 0x2290, 0x0623, 0x2292, + 0x0624, 0x2294, 0x0625, 0x2296, 0x0626, 0x2298, 0x06C0, 0x229A, + 0x06C2, 0x229C, 0x06D3, 0x229E, 0x0929, 0x22A0, 0x0931, 0x22A2, + 0x0934, 0x22A4, 0x0958, 0x22A6, 0x0959, 0x22A8, 0x095A, 0x22AA, + 0x095B, 0x22AC, 0x095C, 0x22AE, 0x095D, 0x22B0, 0x095E, 0x22B2, + 0x095F, 0x22B4, 0x09CB, 0x22B6, 0x09CC, 0x22B8, 0x09DC, 0x22BA, + 0x09DD, 0x22BC, 0x09DF, 0x22BE, 0x0A33, 0x22C0, 0x0A36, 0x22C2, + 0x0A59, 0x22C4, 0x0A5A, 0x22C6, 0x0A5B, 0x22C8, 0x0A5E, 0x22CA, + 0x0B48, 0x22CC, 0x0B4B, 0x22CE, 0x0B4C, 0x22D0, 0x0B5C, 0x22D2, + 0x0B5D, 0x22D4, 0x0B94, 0x22D6, 0x0BCA, 0x22D8, 0x0BCB, 0x22DA, + 0x0BCC, 0x22DC, 0x0C48, 0x22DE, 0x0CC0, 0x22E0, 0x0CC7, 0x22E2, + 0x0CC8, 0x22E4, 0x0CCA, 0x22E6, 0x0CCB, 0xA2E8, 0x0D4A, 0x22EA, + 0x0D4B, 0x22EC, 0x0D4C, 0x22EE, 0x0DDA, 0x22F0, 0x0DDC, 0x22F2, + 0x0DDD, 0xA2F4, 0x0DDE, 0x22F6, 0x0F43, 0x22F8, 0x0F4D, 0x22FA, + 0x0F52, 0x22FC, 0x0F57, 0x22FE, 0x0F5C, 0x2300, 0x0F69, 0x2302, + 0x0F73, 0x2304, 0x0F75, 0x2306, 0x0F76, 0x2308, 0x0F78, 0x230A, + 0x0F81, 0x230C, 0x0F93, 0x230E, 0x0F9D, 0x2310, 0x0FA2, 0x2312, + 0x0FA7, 0x2314, 0x0FAC, 0x2316, 0x0FB9, 0x2318, 0x1026, 0x231A, + 0x1E00, 0x231C, 0x1E01, 0x231E, 0x1E02, 0x2320, 0x1E03, 0x2322, + 0x1E04, 0x2324, 0x1E05, 0x2326, 0x1E06, 0x2328, 0x1E07, 0x232A, + 0x1E08, 0xA32C, 0x1E09, 0xA32E, 0x1E0A, 0x2330, 0x1E0B, 0x2332, + 0x1E0C, 0x2334, 0x1E0D, 0x2336, 0x1E0E, 0x2338, 0x1E0F, 0x233A, + 0x1E10, 0x233C, 0x1E11, 0x233E, 0x1E12, 0x2340, 0x1E13, 0x2342, + 0x1E14, 0xA344, 0x1E15, 0xA346, 0x1E16, 0xA348, 0x1E17, 0xA34A, + 0x1E18, 0x234C, 0x1E19, 0x234E, 0x1E1A, 0x2350, 0x1E1B, 0x2352, + 0x1E1C, 0xA354, 0x1E1D, 0xA356, 0x1E1E, 0x2358, 0x1E1F, 0x235A, + 0x1E20, 0x235C, 0x1E21, 0x235E, 0x1E22, 0x2360, 0x1E23, 0x2362, + 0x1E24, 0x2364, 0x1E25, 0x2366, 0x1E26, 0x2368, 0x1E27, 0x236A, + 0x1E28, 0x236C, 0x1E29, 0x236E, 0x1E2A, 0x2370, 0x1E2B, 0x2372, + 0x1E2C, 0x2374, 0x1E2D, 0x2376, 0x1E2E, 0xA378, 0x1E2F, 0xA37A, + 0x1E30, 0x237C, 0x1E31, 0x237E, 0x1E32, 0x2380, 0x1E33, 0x2382, + 0x1E34, 0x2384, 0x1E35, 0x2386, 0x1E36, 0x2388, 0x1E37, 0x238A, + 0x1E38, 0xA38C, 0x1E39, 0xA38E, 0x1E3A, 0x2390, 0x1E3B, 0x2392, + 0x1E3C, 0x2394, 0x1E3D, 0x2396, 0x1E3E, 0x2398, 0x1E3F, 0x239A, + 0x1E40, 0x239C, 0x1E41, 0x239E, 0x1E42, 0x23A0, 0x1E43, 0x23A2, + 0x1E44, 0x23A4, 0x1E45, 0x23A6, 0x1E46, 0x23A8, 0x1E47, 0x23AA, + 0x1E48, 0x23AC, 0x1E49, 0x23AE, 0x1E4A, 0x23B0, 0x1E4B, 0x23B2, + 0x1E4C, 0xA3B4, 0x1E4D, 0xA3B6, 0x1E4E, 0xA3B8, 0x1E4F, 0xA3BA, + 0x1E50, 0xA3BC, 0x1E51, 0xA3BE, 0x1E52, 0xA3C0, 0x1E53, 0xA3C2, + 0x1E54, 0x23C4, 0x1E55, 0x23C6, 0x1E56, 0x23C8, 0x1E57, 0x23CA, + 0x1E58, 0x23CC, 0x1E59, 0x23CE, 0x1E5A, 0x23D0, 0x1E5B, 0x23D2, + 0x1E5C, 0xA3D4, 0x1E5D, 0xA3D6, 0x1E5E, 0x23D8, 0x1E5F, 0x23DA, + 0x1E60, 0x23DC, 0x1E61, 0x23DE, 0x1E62, 0x23E0, 0x1E63, 0x23E2, + 0x1E64, 0xA3E4, 0x1E65, 0xA3E6, 0x1E66, 0xA3E8, 0x1E67, 0xA3EA, + 0x1E68, 0xA3EC, 0x1E69, 0xA3EE, 0x1E6A, 0x23F0, 0x1E6B, 0x23F2, + 0x1E6C, 0x23F4, 0x1E6D, 0x23F6, 0x1E6E, 0x23F8, 0x1E6F, 0x23FA, + 0x1E70, 0x23FC, 0x1E71, 0x23FE, 0x1E72, 0x2400, 0x1E73, 0x2402, + 0x1E74, 0x2404, 0x1E75, 0x2406, 0x1E76, 0x2408, 0x1E77, 0x240A, + 0x1E78, 0xA40C, 0x1E79, 0xA40E, 0x1E7A, 0xA410, 0x1E7B, 0xA412, + 0x1E7C, 0x2414, 0x1E7D, 0x2416, 0x1E7E, 0x2418, 0x1E7F, 0x241A, + 0x1E80, 0x241C, 0x1E81, 0x241E, 0x1E82, 0x2420, 0x1E83, 0x2422, + 0x1E84, 0x2424, 0x1E85, 0x2426, 0x1E86, 0x2428, 0x1E87, 0x242A, + 0x1E88, 0x242C, 0x1E89, 0x242E, 0x1E8A, 0x2430, 0x1E8B, 0x2432, + 0x1E8C, 0x2434, 0x1E8D, 0x2436, 0x1E8E, 0x2438, 0x1E8F, 0x243A, + 0x1E90, 0x243C, 0x1E91, 0x243E, 0x1E92, 0x2440, 0x1E93, 0x2442, + 0x1E94, 0x2444, 0x1E95, 0x2446, 0x1E96, 0x2448, 0x1E97, 0x244A, + 0x1E98, 0x244C, 0x1E99, 0x244E, 0x1E9B, 0x2450, 0x1EA0, 0x2452, + 0x1EA1, 0x2454, 0x1EA2, 0x2456, 0x1EA3, 0x2458, 0x1EA4, 0xA45A, + 0x1EA5, 0xA45C, 0x1EA6, 0xA45E, 0x1EA7, 0xA460, 0x1EA8, 0xA462, + 0x1EA9, 0xA464, 0x1EAA, 0xA466, 0x1EAB, 0xA468, 0x1EAC, 0xA46A, + 0x1EAD, 0xA46C, 0x1EAE, 0xA46E, 0x1EAF, 0xA470, 0x1EB0, 0xA472, + 0x1EB1, 0xA474, 0x1EB2, 0xA476, 0x1EB3, 0xA478, 0x1EB4, 0xA47A, + 0x1EB5, 0xA47C, 0x1EB6, 0xA47E, 0x1EB7, 0xA480, 0x1EB8, 0x2482, + 0x1EB9, 0x2484, 0x1EBA, 0x2486, 0x1EBB, 0x2488, 0x1EBC, 0x248A, + 0x1EBD, 0x248C, 0x1EBE, 0xA48E, 0x1EBF, 0xA490, 0x1EC0, 0xA492, + 0x1EC1, 0xA494, 0x1EC2, 0xA496, 0x1EC3, 0xA498, 0x1EC4, 0xA49A, + 0x1EC5, 0xA49C, 0x1EC6, 0xA49E, 0x1EC7, 0xA4A0, 0x1EC8, 0x24A2, + 0x1EC9, 0x24A4, 0x1ECA, 0x24A6, 0x1ECB, 0x24A8, 0x1ECC, 0x24AA, + 0x1ECD, 0x24AC, 0x1ECE, 0x24AE, 0x1ECF, 0x24B0, 0x1ED0, 0xA4B2, + 0x1ED1, 0xA4B4, 0x1ED2, 0xA4B6, 0x1ED3, 0xA4B8, 0x1ED4, 0xA4BA, + 0x1ED5, 0xA4BC, 0x1ED6, 0xA4BE, 0x1ED7, 0xA4C0, 0x1ED8, 0xA4C2, + 0x1ED9, 0xA4C4, 0x1EDA, 0xA4C6, 0x1EDB, 0xA4C8, 0x1EDC, 0xA4CA, + 0x1EDD, 0xA4CC, 0x1EDE, 0xA4CE, 0x1EDF, 0xA4D0, 0x1EE0, 0xA4D2, + 0x1EE1, 0xA4D4, 0x1EE2, 0xA4D6, 0x1EE3, 0xA4D8, 0x1EE4, 0x24DA, + 0x1EE5, 0x24DC, 0x1EE6, 0x24DE, 0x1EE7, 0x24E0, 0x1EE8, 0xA4E2, + 0x1EE9, 0xA4E4, 0x1EEA, 0xA4E6, 0x1EEB, 0xA4E8, 0x1EEC, 0xA4EA, + 0x1EED, 0xA4EC, 0x1EEE, 0xA4EE, 0x1EEF, 0xA4F0, 0x1EF0, 0xA4F2, + 0x1EF1, 0xA4F4, 0x1EF2, 0x24F6, 0x1EF3, 0x24F8, 0x1EF4, 0x24FA, + 0x1EF5, 0x24FC, 0x1EF6, 0x24FE, 0x1EF7, 0x2500, 0x1EF8, 0x2502, + 0x1EF9, 0x2504, 0x1F00, 0x2506, 0x1F01, 0x2508, 0x1F02, 0xA50A, + 0x1F03, 0xA50C, 0x1F04, 0xA50E, 0x1F05, 0xA510, 0x1F06, 0xA512, + 0x1F07, 0xA514, 0x1F08, 0x2516, 0x1F09, 0x2518, 0x1F0A, 0xA51A, + 0x1F0B, 0xA51C, 0x1F0C, 0xA51E, 0x1F0D, 0xA520, 0x1F0E, 0xA522, + 0x1F0F, 0xA524, 0x1F10, 0x2526, 0x1F11, 0x2528, 0x1F12, 0xA52A, + 0x1F13, 0xA52C, 0x1F14, 0xA52E, 0x1F15, 0xA530, 0x1F18, 0x2532, + 0x1F19, 0x2534, 0x1F1A, 0xA536, 0x1F1B, 0xA538, 0x1F1C, 0xA53A, + 0x1F1D, 0xA53C, 0x1F20, 0x253E, 0x1F21, 0x2540, 0x1F22, 0xA542, + 0x1F23, 0xA544, 0x1F24, 0xA546, 0x1F25, 0xA548, 0x1F26, 0xA54A, + 0x1F27, 0xA54C, 0x1F28, 0x254E, 0x1F29, 0x2550, 0x1F2A, 0xA552, + 0x1F2B, 0xA554, 0x1F2C, 0xA556, 0x1F2D, 0xA558, 0x1F2E, 0xA55A, + 0x1F2F, 0xA55C, 0x1F30, 0x255E, 0x1F31, 0x2560, 0x1F32, 0xA562, + 0x1F33, 0xA564, 0x1F34, 0xA566, 0x1F35, 0xA568, 0x1F36, 0xA56A, + 0x1F37, 0xA56C, 0x1F38, 0x256E, 0x1F39, 0x2570, 0x1F3A, 0xA572, + 0x1F3B, 0xA574, 0x1F3C, 0xA576, 0x1F3D, 0xA578, 0x1F3E, 0xA57A, + 0x1F3F, 0xA57C, 0x1F40, 0x257E, 0x1F41, 0x2580, 0x1F42, 0xA582, + 0x1F43, 0xA584, 0x1F44, 0xA586, 0x1F45, 0xA588, 0x1F48, 0x258A, + 0x1F49, 0x258C, 0x1F4A, 0xA58E, 0x1F4B, 0xA590, 0x1F4C, 0xA592, + 0x1F4D, 0xA594, 0x1F50, 0x2596, 0x1F51, 0x2598, 0x1F52, 0xA59A, + 0x1F53, 0xA59C, 0x1F54, 0xA59E, 0x1F55, 0xA5A0, 0x1F56, 0xA5A2, + 0x1F57, 0xA5A4, 0x1F59, 0x25A6, 0x1F5B, 0xA5A8, 0x1F5D, 0xA5AA, + 0x1F5F, 0xA5AC, 0x1F60, 0x25AE, 0x1F61, 0x25B0, 0x1F62, 0xA5B2, + 0x1F63, 0xA5B4, 0x1F64, 0xA5B6, 0x1F65, 0xA5B8, 0x1F66, 0xA5BA, + 0x1F67, 0xA5BC, 0x1F68, 0x25BE, 0x1F69, 0x25C0, 0x1F6A, 0xA5C2, + 0x1F6B, 0xA5C4, 0x1F6C, 0xA5C6, 0x1F6D, 0xA5C8, 0x1F6E, 0xA5CA, + 0x1F6F, 0xA5CC, 0x1F70, 0x25CE, 0x1F71, 0x93AC, 0x1F72, 0x25D0, + 0x1F73, 0x93AD, 0x1F74, 0x25D2, 0x1F75, 0x93AE, 0x1F76, 0x25D4, + 0x1F77, 0x93AF, 0x1F78, 0x25D6, 0x1F79, 0x93CC, 0x1F7A, 0x25D8, + 0x1F7B, 0x93CD, 0x1F7C, 0x25DA, 0x1F7D, 0x93CE, 0x1F80, 0xA5DC, + 0x1F81, 0xA5DE, 0x1F82, 0xA5E0, 0x1F83, 0xA5E2, 0x1F84, 0xA5E4, + 0x1F85, 0xA5E6, 0x1F86, 0xA5E8, 0x1F87, 0xA5EA, 0x1F88, 0xA5EC, + 0x1F89, 0xA5EE, 0x1F8A, 0xA5F0, 0x1F8B, 0xA5F2, 0x1F8C, 0xA5F4, + 0x1F8D, 0xA5F6, 0x1F8E, 0xA5F8, 0x1F8F, 0xA5FA, 0x1F90, 0xA5FC, + 0x1F91, 0xA5FE, 0x1F92, 0xA600, 0x1F93, 0xA602, 0x1F94, 0xA604, + 0x1F95, 0xA606, 0x1F96, 0xA608, 0x1F97, 0xA60A, 0x1F98, 0xA60C, + 0x1F99, 0xA60E, 0x1F9A, 0xA610, 0x1F9B, 0xA612, 0x1F9C, 0xA614, + 0x1F9D, 0xA616, 0x1F9E, 0xA618, 0x1F9F, 0xA61A, 0x1FA0, 0xA61C, + 0x1FA1, 0xA61E, 0x1FA2, 0xA620, 0x1FA3, 0xA622, 0x1FA4, 0xA624, + 0x1FA5, 0xA626, 0x1FA6, 0xA628, 0x1FA7, 0xA62A, 0x1FA8, 0xA62C, + 0x1FA9, 0xA62E, 0x1FAA, 0xA630, 0x1FAB, 0xA632, 0x1FAC, 0xA634, + 0x1FAD, 0xA636, 0x1FAE, 0xA638, 0x1FAF, 0xA63A, 0x1FB0, 0x263C, + 0x1FB1, 0x263E, 0x1FB2, 0xA640, 0x1FB3, 0x2642, 0x1FB4, 0xA644, + 0x1FB6, 0x2646, 0x1FB7, 0xA648, 0x1FB8, 0x264A, 0x1FB9, 0x264C, + 0x1FBA, 0x264E, 0x1FBB, 0x9386, 0x1FBC, 0x2650, 0x1FBE, 0x13B9, + 0x1FC1, 0x2652, 0x1FC2, 0xA654, 0x1FC3, 0x2656, 0x1FC4, 0xA658, + 0x1FC6, 0x265A, 0x1FC7, 0xA65C, 0x1FC8, 0x265E, 0x1FC9, 0x9388, + 0x1FCA, 0x2660, 0x1FCB, 0x9389, 0x1FCC, 0x2662, 0x1FCD, 0x2664, + 0x1FCE, 0x2666, 0x1FCF, 0x2668, 0x1FD0, 0x266A, 0x1FD1, 0x266C, + 0x1FD2, 0xA66E, 0x1FD3, 0x9390, 0x1FD6, 0x2670, 0x1FD7, 0xA672, + 0x1FD8, 0x2674, 0x1FD9, 0x2676, 0x1FDA, 0x2678, 0x1FDB, 0x938A, + 0x1FDD, 0x267A, 0x1FDE, 0x267C, 0x1FDF, 0x267E, 0x1FE0, 0x2680, + 0x1FE1, 0x2682, 0x1FE2, 0xA684, 0x1FE3, 0x93B0, 0x1FE4, 0x2686, + 0x1FE5, 0x2688, 0x1FE6, 0x268A, 0x1FE7, 0xA68C, 0x1FE8, 0x268E, + 0x1FE9, 0x2690, 0x1FEA, 0x2692, 0x1FEB, 0x938E, 0x1FEC, 0x2694, + 0x1FED, 0x2696, 0x1FEE, 0x9385, 0x1FEF, 0x1060, 0x1FF2, 0xA698, + 0x1FF3, 0x269A, 0x1FF4, 0xA69C, 0x1FF6, 0x269E, 0x1FF7, 0xA6A0, + 0x1FF8, 0x26A2, 0x1FF9, 0x938C, 0x1FFA, 0x26A4, 0x1FFB, 0x938F, + 0x1FFC, 0x26A6, 0x1FFD, 0x10B4, 0x304C, 0x26A8, 0x304E, 0x26AA, + 0x3050, 0x26AC, 0x3052, 0x26AE, 0x3054, 0x26B0, 0x3056, 0x26B2, + 0x3058, 0x26B4, 0x305A, 0x26B6, 0x305C, 0x26B8, 0x305E, 0x26BA, + 0x3060, 0x26BC, 0x3062, 0x26BE, 0x3065, 0x26C0, 0x3067, 0x26C2, + 0x3069, 0x26C4, 0x3070, 0x26C6, 0x3071, 0x26C8, 0x3073, 0x26CA, + 0x3074, 0x26CC, 0x3076, 0x26CE, 0x3077, 0x26D0, 0x3079, 0x26D2, + 0x307A, 0x26D4, 0x307C, 0x26D6, 0x307D, 0x26D8, 0x3094, 0x26DA, + 0x309E, 0x26DC, 0x30AC, 0x26DE, 0x30AE, 0x26E0, 0x30B0, 0x26E2, + 0x30B2, 0x26E4, 0x30B4, 0x26E6, 0x30B6, 0x26E8, 0x30B8, 0x26EA, + 0x30BA, 0x26EC, 0x30BC, 0x26EE, 0x30BE, 0x26F0, 0x30C0, 0x26F2, + 0x30C2, 0x26F4, 0x30C5, 0x26F6, 0x30C7, 0x26F8, 0x30C9, 0x26FA, + 0x30D0, 0x26FC, 0x30D1, 0x26FE, 0x30D3, 0x2700, 0x30D4, 0x2702, + 0x30D6, 0x2704, 0x30D7, 0x2706, 0x30D9, 0x2708, 0x30DA, 0x270A, + 0x30DC, 0x270C, 0x30DD, 0x270E, 0x30F4, 0x2710, 0x30F7, 0x2712, + 0x30F8, 0x2714, 0x30F9, 0x2716, 0x30FA, 0x2718, 0x30FE, 0x271A, + 0xFB1D, 0x271C, 0xFB1F, 0x271E, 0xFB2A, 0x2720, 0xFB2B, 0x2722, + 0xFB2C, 0xA724, 0xFB2D, 0xA726, 0xFB2E, 0x2728, 0xFB2F, 0x272A, + 0xFB30, 0x272C, 0xFB31, 0x272E, 0xFB32, 0x2730, 0xFB33, 0x2732, + 0xFB34, 0x2734, 0xFB35, 0x2736, 0xFB36, 0x2738, 0xFB38, 0x273A, + 0xFB39, 0x273C, 0xFB3A, 0x273E, 0xFB3B, 0x2740, 0xFB3C, 0x2742, + 0xFB3E, 0x2744, 0xFB40, 0x2746, 0xFB41, 0x2748, 0xFB43, 0x274A, + 0xFB44, 0x274C, 0xFB46, 0x274E, 0xFB47, 0x2750, 0xFB48, 0x2752, + 0xFB49, 0x2754, 0xFB4A, 0x2756, 0xFB4B, 0x2758, 0xFB4C, 0x275A, + 0xFB4D, 0x275C, 0xFB4E, 0x275E +}; + +static const u_int32_t __UniCharDecompositionTableLength = +(sizeof(__CFUniCharDecompositionTable) / (sizeof(u_int16_t) * 2)); + + +static const u_int16_t +__CFUniCharMultipleDecompositionTable[] = { + 0x0041, 0x0300, 0x0041, 0x0301, 0x0041, 0x0302, 0x0041, 0x0303, + 0x0041, 0x0308, 0x0041, 0x030A, 0x0043, 0x0327, 0x0045, 0x0300, + 0x0045, 0x0301, 0x0045, 0x0302, 0x0045, 0x0308, 0x0049, 0x0300, + 0x0049, 0x0301, 0x0049, 0x0302, 0x0049, 0x0308, 0x004E, 0x0303, + 0x004F, 0x0300, 0x004F, 0x0301, 0x004F, 0x0302, 0x004F, 0x0303, + 0x004F, 0x0308, 0x0055, 0x0300, 0x0055, 0x0301, 0x0055, 0x0302, + 0x0055, 0x0308, 0x0059, 0x0301, 0x0061, 0x0300, 0x0061, 0x0301, + 0x0061, 0x0302, 0x0061, 0x0303, 0x0061, 0x0308, 0x0061, 0x030A, + 0x0063, 0x0327, 0x0065, 0x0300, 0x0065, 0x0301, 0x0065, 0x0302, + 0x0065, 0x0308, 0x0069, 0x0300, 0x0069, 0x0301, 0x0069, 0x0302, + 0x0069, 0x0308, 0x006E, 0x0303, 0x006F, 0x0300, 0x006F, 0x0301, + 0x006F, 0x0302, 0x006F, 0x0303, 0x006F, 0x0308, 0x0075, 0x0300, + 0x0075, 0x0301, 0x0075, 0x0302, 0x0075, 0x0308, 0x0079, 0x0301, + 0x0079, 0x0308, 0x0041, 0x0304, 0x0061, 0x0304, 0x0041, 0x0306, + 0x0061, 0x0306, 0x0041, 0x0328, 0x0061, 0x0328, 0x0043, 0x0301, + 0x0063, 0x0301, 0x0043, 0x0302, 0x0063, 0x0302, 0x0043, 0x0307, + 0x0063, 0x0307, 0x0043, 0x030C, 0x0063, 0x030C, 0x0044, 0x030C, + 0x0064, 0x030C, 0x0045, 0x0304, 0x0065, 0x0304, 0x0045, 0x0306, + 0x0065, 0x0306, 0x0045, 0x0307, 0x0065, 0x0307, 0x0045, 0x0328, + 0x0065, 0x0328, 0x0045, 0x030C, 0x0065, 0x030C, 0x0047, 0x0302, + 0x0067, 0x0302, 0x0047, 0x0306, 0x0067, 0x0306, 0x0047, 0x0307, + 0x0067, 0x0307, 0x0047, 0x0327, 0x0067, 0x0327, 0x0048, 0x0302, + 0x0068, 0x0302, 0x0049, 0x0303, 0x0069, 0x0303, 0x0049, 0x0304, + 0x0069, 0x0304, 0x0049, 0x0306, 0x0069, 0x0306, 0x0049, 0x0328, + 0x0069, 0x0328, 0x0049, 0x0307, 0x004A, 0x0302, 0x006A, 0x0302, + 0x004B, 0x0327, 0x006B, 0x0327, 0x004C, 0x0301, 0x006C, 0x0301, + 0x004C, 0x0327, 0x006C, 0x0327, 0x004C, 0x030C, 0x006C, 0x030C, + 0x004E, 0x0301, 0x006E, 0x0301, 0x004E, 0x0327, 0x006E, 0x0327, + 0x004E, 0x030C, 0x006E, 0x030C, 0x004F, 0x0304, 0x006F, 0x0304, + 0x004F, 0x0306, 0x006F, 0x0306, 0x004F, 0x030B, 0x006F, 0x030B, + 0x0052, 0x0301, 0x0072, 0x0301, 0x0052, 0x0327, 0x0072, 0x0327, + 0x0052, 0x030C, 0x0072, 0x030C, 0x0053, 0x0301, 0x0073, 0x0301, + 0x0053, 0x0302, 0x0073, 0x0302, 0x0053, 0x0327, 0x0073, 0x0327, + 0x0053, 0x030C, 0x0073, 0x030C, 0x0054, 0x0327, 0x0074, 0x0327, + 0x0054, 0x030C, 0x0074, 0x030C, 0x0055, 0x0303, 0x0075, 0x0303, + 0x0055, 0x0304, 0x0075, 0x0304, 0x0055, 0x0306, 0x0075, 0x0306, + 0x0055, 0x030A, 0x0075, 0x030A, 0x0055, 0x030B, 0x0075, 0x030B, + 0x0055, 0x0328, 0x0075, 0x0328, 0x0057, 0x0302, 0x0077, 0x0302, + 0x0059, 0x0302, 0x0079, 0x0302, 0x0059, 0x0308, 0x005A, 0x0301, + 0x007A, 0x0301, 0x005A, 0x0307, 0x007A, 0x0307, 0x005A, 0x030C, + 0x007A, 0x030C, 0x004F, 0x031B, 0x006F, 0x031B, 0x0055, 0x031B, + 0x0075, 0x031B, 0x0041, 0x030C, 0x0061, 0x030C, 0x0049, 0x030C, + 0x0069, 0x030C, 0x004F, 0x030C, 0x006F, 0x030C, 0x0055, 0x030C, + 0x0075, 0x030C, 0x00DC, 0x0304, 0x00FC, 0x0304, 0x00DC, 0x0301, + 0x00FC, 0x0301, 0x00DC, 0x030C, 0x00FC, 0x030C, 0x00DC, 0x0300, + 0x00FC, 0x0300, 0x00C4, 0x0304, 0x00E4, 0x0304, 0x0226, 0x0304, + 0x0227, 0x0304, 0x00C6, 0x0304, 0x00E6, 0x0304, 0x0047, 0x030C, + 0x0067, 0x030C, 0x004B, 0x030C, 0x006B, 0x030C, 0x004F, 0x0328, + 0x006F, 0x0328, 0x01EA, 0x0304, 0x01EB, 0x0304, 0x01B7, 0x030C, + 0x0292, 0x030C, 0x006A, 0x030C, 0x0047, 0x0301, 0x0067, 0x0301, + 0x004E, 0x0300, 0x006E, 0x0300, 0x00C5, 0x0301, 0x00E5, 0x0301, + 0x00C6, 0x0301, 0x00E6, 0x0301, 0x00D8, 0x0301, 0x00F8, 0x0301, + 0x0041, 0x030F, 0x0061, 0x030F, 0x0041, 0x0311, 0x0061, 0x0311, + 0x0045, 0x030F, 0x0065, 0x030F, 0x0045, 0x0311, 0x0065, 0x0311, + 0x0049, 0x030F, 0x0069, 0x030F, 0x0049, 0x0311, 0x0069, 0x0311, + 0x004F, 0x030F, 0x006F, 0x030F, 0x004F, 0x0311, 0x006F, 0x0311, + 0x0052, 0x030F, 0x0072, 0x030F, 0x0052, 0x0311, 0x0072, 0x0311, + 0x0055, 0x030F, 0x0075, 0x030F, 0x0055, 0x0311, 0x0075, 0x0311, + 0x0053, 0x0326, 0x0073, 0x0326, 0x0054, 0x0326, 0x0074, 0x0326, + 0x0048, 0x030C, 0x0068, 0x030C, 0x0041, 0x0307, 0x0061, 0x0307, + 0x0045, 0x0327, 0x0065, 0x0327, 0x00D6, 0x0304, 0x00F6, 0x0304, + 0x00D5, 0x0304, 0x00F5, 0x0304, 0x004F, 0x0307, 0x006F, 0x0307, + 0x022E, 0x0304, 0x022F, 0x0304, 0x0059, 0x0304, 0x0079, 0x0304, + 0x0308, 0x0301, 0x00A8, 0x0301, 0x0391, 0x0301, 0x0395, 0x0301, + 0x0397, 0x0301, 0x0399, 0x0301, 0x039F, 0x0301, 0x03A5, 0x0301, + 0x03A9, 0x0301, 0x03CA, 0x0301, 0x0399, 0x0308, 0x03A5, 0x0308, + 0x03B1, 0x0301, 0x03B5, 0x0301, 0x03B7, 0x0301, 0x03B9, 0x0301, + 0x03CB, 0x0301, 0x03B9, 0x0308, 0x03C5, 0x0308, 0x03BF, 0x0301, + 0x03C5, 0x0301, 0x03C9, 0x0301, 0x03D2, 0x0301, 0x03D2, 0x0308, + 0x0415, 0x0300, 0x0415, 0x0308, 0x0413, 0x0301, 0x0406, 0x0308, + 0x041A, 0x0301, 0x0418, 0x0300, 0x0423, 0x0306, 0x0418, 0x0306, + 0x0438, 0x0306, 0x0435, 0x0300, 0x0435, 0x0308, 0x0433, 0x0301, + 0x0456, 0x0308, 0x043A, 0x0301, 0x0438, 0x0300, 0x0443, 0x0306, + 0x0474, 0x030F, 0x0475, 0x030F, 0x0416, 0x0306, 0x0436, 0x0306, + 0x0410, 0x0306, 0x0430, 0x0306, 0x0410, 0x0308, 0x0430, 0x0308, + 0x0415, 0x0306, 0x0435, 0x0306, 0x04D8, 0x0308, 0x04D9, 0x0308, + 0x0416, 0x0308, 0x0436, 0x0308, 0x0417, 0x0308, 0x0437, 0x0308, + 0x0418, 0x0304, 0x0438, 0x0304, 0x0418, 0x0308, 0x0438, 0x0308, + 0x041E, 0x0308, 0x043E, 0x0308, 0x04E8, 0x0308, 0x04E9, 0x0308, + 0x042D, 0x0308, 0x044D, 0x0308, 0x0423, 0x0304, 0x0443, 0x0304, + 0x0423, 0x0308, 0x0443, 0x0308, 0x0423, 0x030B, 0x0443, 0x030B, + 0x0427, 0x0308, 0x0447, 0x0308, 0x042B, 0x0308, 0x044B, 0x0308, + 0x0627, 0x0653, 0x0627, 0x0654, 0x0648, 0x0654, 0x0627, 0x0655, + 0x064A, 0x0654, 0x06D5, 0x0654, 0x06C1, 0x0654, 0x06D2, 0x0654, + 0x0928, 0x093C, 0x0930, 0x093C, 0x0933, 0x093C, 0x0915, 0x093C, + 0x0916, 0x093C, 0x0917, 0x093C, 0x091C, 0x093C, 0x0921, 0x093C, + 0x0922, 0x093C, 0x092B, 0x093C, 0x092F, 0x093C, 0x09C7, 0x09BE, + 0x09C7, 0x09D7, 0x09A1, 0x09BC, 0x09A2, 0x09BC, 0x09AF, 0x09BC, + 0x0A32, 0x0A3C, 0x0A38, 0x0A3C, 0x0A16, 0x0A3C, 0x0A17, 0x0A3C, + 0x0A1C, 0x0A3C, 0x0A2B, 0x0A3C, 0x0B47, 0x0B56, 0x0B47, 0x0B3E, + 0x0B47, 0x0B57, 0x0B21, 0x0B3C, 0x0B22, 0x0B3C, 0x0B92, 0x0BD7, + 0x0BC6, 0x0BBE, 0x0BC7, 0x0BBE, 0x0BC6, 0x0BD7, 0x0C46, 0x0C56, + 0x0CBF, 0x0CD5, 0x0CC6, 0x0CD5, 0x0CC6, 0x0CD6, 0x0CC6, 0x0CC2, + 0x0CCA, 0x0CD5, 0x0D46, 0x0D3E, 0x0D47, 0x0D3E, 0x0D46, 0x0D57, + 0x0DD9, 0x0DCA, 0x0DD9, 0x0DCF, 0x0DDC, 0x0DCA, 0x0DD9, 0x0DDF, + 0x0F42, 0x0FB7, 0x0F4C, 0x0FB7, 0x0F51, 0x0FB7, 0x0F56, 0x0FB7, + 0x0F5B, 0x0FB7, 0x0F40, 0x0FB5, 0x0F71, 0x0F72, 0x0F71, 0x0F74, + 0x0FB2, 0x0F80, 0x0FB3, 0x0F80, 0x0F71, 0x0F80, 0x0F92, 0x0FB7, + 0x0F9C, 0x0FB7, 0x0FA1, 0x0FB7, 0x0FA6, 0x0FB7, 0x0FAB, 0x0FB7, + 0x0F90, 0x0FB5, 0x1025, 0x102E, 0x0041, 0x0325, 0x0061, 0x0325, + 0x0042, 0x0307, 0x0062, 0x0307, 0x0042, 0x0323, 0x0062, 0x0323, + 0x0042, 0x0331, 0x0062, 0x0331, 0x00C7, 0x0301, 0x00E7, 0x0301, + 0x0044, 0x0307, 0x0064, 0x0307, 0x0044, 0x0323, 0x0064, 0x0323, + 0x0044, 0x0331, 0x0064, 0x0331, 0x0044, 0x0327, 0x0064, 0x0327, + 0x0044, 0x032D, 0x0064, 0x032D, 0x0112, 0x0300, 0x0113, 0x0300, + 0x0112, 0x0301, 0x0113, 0x0301, 0x0045, 0x032D, 0x0065, 0x032D, + 0x0045, 0x0330, 0x0065, 0x0330, 0x0228, 0x0306, 0x0229, 0x0306, + 0x0046, 0x0307, 0x0066, 0x0307, 0x0047, 0x0304, 0x0067, 0x0304, + 0x0048, 0x0307, 0x0068, 0x0307, 0x0048, 0x0323, 0x0068, 0x0323, + 0x0048, 0x0308, 0x0068, 0x0308, 0x0048, 0x0327, 0x0068, 0x0327, + 0x0048, 0x032E, 0x0068, 0x032E, 0x0049, 0x0330, 0x0069, 0x0330, + 0x00CF, 0x0301, 0x00EF, 0x0301, 0x004B, 0x0301, 0x006B, 0x0301, + 0x004B, 0x0323, 0x006B, 0x0323, 0x004B, 0x0331, 0x006B, 0x0331, + 0x004C, 0x0323, 0x006C, 0x0323, 0x1E36, 0x0304, 0x1E37, 0x0304, + 0x004C, 0x0331, 0x006C, 0x0331, 0x004C, 0x032D, 0x006C, 0x032D, + 0x004D, 0x0301, 0x006D, 0x0301, 0x004D, 0x0307, 0x006D, 0x0307, + 0x004D, 0x0323, 0x006D, 0x0323, 0x004E, 0x0307, 0x006E, 0x0307, + 0x004E, 0x0323, 0x006E, 0x0323, 0x004E, 0x0331, 0x006E, 0x0331, + 0x004E, 0x032D, 0x006E, 0x032D, 0x00D5, 0x0301, 0x00F5, 0x0301, + 0x00D5, 0x0308, 0x00F5, 0x0308, 0x014C, 0x0300, 0x014D, 0x0300, + 0x014C, 0x0301, 0x014D, 0x0301, 0x0050, 0x0301, 0x0070, 0x0301, + 0x0050, 0x0307, 0x0070, 0x0307, 0x0052, 0x0307, 0x0072, 0x0307, + 0x0052, 0x0323, 0x0072, 0x0323, 0x1E5A, 0x0304, 0x1E5B, 0x0304, + 0x0052, 0x0331, 0x0072, 0x0331, 0x0053, 0x0307, 0x0073, 0x0307, + 0x0053, 0x0323, 0x0073, 0x0323, 0x015A, 0x0307, 0x015B, 0x0307, + 0x0160, 0x0307, 0x0161, 0x0307, 0x1E62, 0x0307, 0x1E63, 0x0307, + 0x0054, 0x0307, 0x0074, 0x0307, 0x0054, 0x0323, 0x0074, 0x0323, + 0x0054, 0x0331, 0x0074, 0x0331, 0x0054, 0x032D, 0x0074, 0x032D, + 0x0055, 0x0324, 0x0075, 0x0324, 0x0055, 0x0330, 0x0075, 0x0330, + 0x0055, 0x032D, 0x0075, 0x032D, 0x0168, 0x0301, 0x0169, 0x0301, + 0x016A, 0x0308, 0x016B, 0x0308, 0x0056, 0x0303, 0x0076, 0x0303, + 0x0056, 0x0323, 0x0076, 0x0323, 0x0057, 0x0300, 0x0077, 0x0300, + 0x0057, 0x0301, 0x0077, 0x0301, 0x0057, 0x0308, 0x0077, 0x0308, + 0x0057, 0x0307, 0x0077, 0x0307, 0x0057, 0x0323, 0x0077, 0x0323, + 0x0058, 0x0307, 0x0078, 0x0307, 0x0058, 0x0308, 0x0078, 0x0308, + 0x0059, 0x0307, 0x0079, 0x0307, 0x005A, 0x0302, 0x007A, 0x0302, + 0x005A, 0x0323, 0x007A, 0x0323, 0x005A, 0x0331, 0x007A, 0x0331, + 0x0068, 0x0331, 0x0074, 0x0308, 0x0077, 0x030A, 0x0079, 0x030A, + 0x017F, 0x0307, 0x0041, 0x0323, 0x0061, 0x0323, 0x0041, 0x0309, + 0x0061, 0x0309, 0x00C2, 0x0301, 0x00E2, 0x0301, 0x00C2, 0x0300, + 0x00E2, 0x0300, 0x00C2, 0x0309, 0x00E2, 0x0309, 0x00C2, 0x0303, + 0x00E2, 0x0303, 0x1EA0, 0x0302, 0x1EA1, 0x0302, 0x0102, 0x0301, + 0x0103, 0x0301, 0x0102, 0x0300, 0x0103, 0x0300, 0x0102, 0x0309, + 0x0103, 0x0309, 0x0102, 0x0303, 0x0103, 0x0303, 0x1EA0, 0x0306, + 0x1EA1, 0x0306, 0x0045, 0x0323, 0x0065, 0x0323, 0x0045, 0x0309, + 0x0065, 0x0309, 0x0045, 0x0303, 0x0065, 0x0303, 0x00CA, 0x0301, + 0x00EA, 0x0301, 0x00CA, 0x0300, 0x00EA, 0x0300, 0x00CA, 0x0309, + 0x00EA, 0x0309, 0x00CA, 0x0303, 0x00EA, 0x0303, 0x1EB8, 0x0302, + 0x1EB9, 0x0302, 0x0049, 0x0309, 0x0069, 0x0309, 0x0049, 0x0323, + 0x0069, 0x0323, 0x004F, 0x0323, 0x006F, 0x0323, 0x004F, 0x0309, + 0x006F, 0x0309, 0x00D4, 0x0301, 0x00F4, 0x0301, 0x00D4, 0x0300, + 0x00F4, 0x0300, 0x00D4, 0x0309, 0x00F4, 0x0309, 0x00D4, 0x0303, + 0x00F4, 0x0303, 0x1ECC, 0x0302, 0x1ECD, 0x0302, 0x01A0, 0x0301, + 0x01A1, 0x0301, 0x01A0, 0x0300, 0x01A1, 0x0300, 0x01A0, 0x0309, + 0x01A1, 0x0309, 0x01A0, 0x0303, 0x01A1, 0x0303, 0x01A0, 0x0323, + 0x01A1, 0x0323, 0x0055, 0x0323, 0x0075, 0x0323, 0x0055, 0x0309, + 0x0075, 0x0309, 0x01AF, 0x0301, 0x01B0, 0x0301, 0x01AF, 0x0300, + 0x01B0, 0x0300, 0x01AF, 0x0309, 0x01B0, 0x0309, 0x01AF, 0x0303, + 0x01B0, 0x0303, 0x01AF, 0x0323, 0x01B0, 0x0323, 0x0059, 0x0300, + 0x0079, 0x0300, 0x0059, 0x0323, 0x0079, 0x0323, 0x0059, 0x0309, + 0x0079, 0x0309, 0x0059, 0x0303, 0x0079, 0x0303, 0x03B1, 0x0313, + 0x03B1, 0x0314, 0x1F00, 0x0300, 0x1F01, 0x0300, 0x1F00, 0x0301, + 0x1F01, 0x0301, 0x1F00, 0x0342, 0x1F01, 0x0342, 0x0391, 0x0313, + 0x0391, 0x0314, 0x1F08, 0x0300, 0x1F09, 0x0300, 0x1F08, 0x0301, + 0x1F09, 0x0301, 0x1F08, 0x0342, 0x1F09, 0x0342, 0x03B5, 0x0313, + 0x03B5, 0x0314, 0x1F10, 0x0300, 0x1F11, 0x0300, 0x1F10, 0x0301, + 0x1F11, 0x0301, 0x0395, 0x0313, 0x0395, 0x0314, 0x1F18, 0x0300, + 0x1F19, 0x0300, 0x1F18, 0x0301, 0x1F19, 0x0301, 0x03B7, 0x0313, + 0x03B7, 0x0314, 0x1F20, 0x0300, 0x1F21, 0x0300, 0x1F20, 0x0301, + 0x1F21, 0x0301, 0x1F20, 0x0342, 0x1F21, 0x0342, 0x0397, 0x0313, + 0x0397, 0x0314, 0x1F28, 0x0300, 0x1F29, 0x0300, 0x1F28, 0x0301, + 0x1F29, 0x0301, 0x1F28, 0x0342, 0x1F29, 0x0342, 0x03B9, 0x0313, + 0x03B9, 0x0314, 0x1F30, 0x0300, 0x1F31, 0x0300, 0x1F30, 0x0301, + 0x1F31, 0x0301, 0x1F30, 0x0342, 0x1F31, 0x0342, 0x0399, 0x0313, + 0x0399, 0x0314, 0x1F38, 0x0300, 0x1F39, 0x0300, 0x1F38, 0x0301, + 0x1F39, 0x0301, 0x1F38, 0x0342, 0x1F39, 0x0342, 0x03BF, 0x0313, + 0x03BF, 0x0314, 0x1F40, 0x0300, 0x1F41, 0x0300, 0x1F40, 0x0301, + 0x1F41, 0x0301, 0x039F, 0x0313, 0x039F, 0x0314, 0x1F48, 0x0300, + 0x1F49, 0x0300, 0x1F48, 0x0301, 0x1F49, 0x0301, 0x03C5, 0x0313, + 0x03C5, 0x0314, 0x1F50, 0x0300, 0x1F51, 0x0300, 0x1F50, 0x0301, + 0x1F51, 0x0301, 0x1F50, 0x0342, 0x1F51, 0x0342, 0x03A5, 0x0314, + 0x1F59, 0x0300, 0x1F59, 0x0301, 0x1F59, 0x0342, 0x03C9, 0x0313, + 0x03C9, 0x0314, 0x1F60, 0x0300, 0x1F61, 0x0300, 0x1F60, 0x0301, + 0x1F61, 0x0301, 0x1F60, 0x0342, 0x1F61, 0x0342, 0x03A9, 0x0313, + 0x03A9, 0x0314, 0x1F68, 0x0300, 0x1F69, 0x0300, 0x1F68, 0x0301, + 0x1F69, 0x0301, 0x1F68, 0x0342, 0x1F69, 0x0342, 0x03B1, 0x0300, + 0x03B5, 0x0300, 0x03B7, 0x0300, 0x03B9, 0x0300, 0x03BF, 0x0300, + 0x03C5, 0x0300, 0x03C9, 0x0300, 0x1F00, 0x0345, 0x1F01, 0x0345, + 0x1F02, 0x0345, 0x1F03, 0x0345, 0x1F04, 0x0345, 0x1F05, 0x0345, + 0x1F06, 0x0345, 0x1F07, 0x0345, 0x1F08, 0x0345, 0x1F09, 0x0345, + 0x1F0A, 0x0345, 0x1F0B, 0x0345, 0x1F0C, 0x0345, 0x1F0D, 0x0345, + 0x1F0E, 0x0345, 0x1F0F, 0x0345, 0x1F20, 0x0345, 0x1F21, 0x0345, + 0x1F22, 0x0345, 0x1F23, 0x0345, 0x1F24, 0x0345, 0x1F25, 0x0345, + 0x1F26, 0x0345, 0x1F27, 0x0345, 0x1F28, 0x0345, 0x1F29, 0x0345, + 0x1F2A, 0x0345, 0x1F2B, 0x0345, 0x1F2C, 0x0345, 0x1F2D, 0x0345, + 0x1F2E, 0x0345, 0x1F2F, 0x0345, 0x1F60, 0x0345, 0x1F61, 0x0345, + 0x1F62, 0x0345, 0x1F63, 0x0345, 0x1F64, 0x0345, 0x1F65, 0x0345, + 0x1F66, 0x0345, 0x1F67, 0x0345, 0x1F68, 0x0345, 0x1F69, 0x0345, + 0x1F6A, 0x0345, 0x1F6B, 0x0345, 0x1F6C, 0x0345, 0x1F6D, 0x0345, + 0x1F6E, 0x0345, 0x1F6F, 0x0345, 0x03B1, 0x0306, 0x03B1, 0x0304, + 0x1F70, 0x0345, 0x03B1, 0x0345, 0x03AC, 0x0345, 0x03B1, 0x0342, + 0x1FB6, 0x0345, 0x0391, 0x0306, 0x0391, 0x0304, 0x0391, 0x0300, + 0x0391, 0x0345, 0x00A8, 0x0342, 0x1F74, 0x0345, 0x03B7, 0x0345, + 0x03AE, 0x0345, 0x03B7, 0x0342, 0x1FC6, 0x0345, 0x0395, 0x0300, + 0x0397, 0x0300, 0x0397, 0x0345, 0x1FBF, 0x0300, 0x1FBF, 0x0301, + 0x1FBF, 0x0342, 0x03B9, 0x0306, 0x03B9, 0x0304, 0x03CA, 0x0300, + 0x03B9, 0x0342, 0x03CA, 0x0342, 0x0399, 0x0306, 0x0399, 0x0304, + 0x0399, 0x0300, 0x1FFE, 0x0300, 0x1FFE, 0x0301, 0x1FFE, 0x0342, + 0x03C5, 0x0306, 0x03C5, 0x0304, 0x03CB, 0x0300, 0x03C1, 0x0313, + 0x03C1, 0x0314, 0x03C5, 0x0342, 0x03CB, 0x0342, 0x03A5, 0x0306, + 0x03A5, 0x0304, 0x03A5, 0x0300, 0x03A1, 0x0314, 0x00A8, 0x0300, + 0x1F7C, 0x0345, 0x03C9, 0x0345, 0x03CE, 0x0345, 0x03C9, 0x0342, + 0x1FF6, 0x0345, 0x039F, 0x0300, 0x03A9, 0x0300, 0x03A9, 0x0345, + 0x304B, 0x3099, 0x304D, 0x3099, 0x304F, 0x3099, 0x3051, 0x3099, + 0x3053, 0x3099, 0x3055, 0x3099, 0x3057, 0x3099, 0x3059, 0x3099, + 0x305B, 0x3099, 0x305D, 0x3099, 0x305F, 0x3099, 0x3061, 0x3099, + 0x3064, 0x3099, 0x3066, 0x3099, 0x3068, 0x3099, 0x306F, 0x3099, + 0x306F, 0x309A, 0x3072, 0x3099, 0x3072, 0x309A, 0x3075, 0x3099, + 0x3075, 0x309A, 0x3078, 0x3099, 0x3078, 0x309A, 0x307B, 0x3099, + 0x307B, 0x309A, 0x3046, 0x3099, 0x309D, 0x3099, 0x30AB, 0x3099, + 0x30AD, 0x3099, 0x30AF, 0x3099, 0x30B1, 0x3099, 0x30B3, 0x3099, + 0x30B5, 0x3099, 0x30B7, 0x3099, 0x30B9, 0x3099, 0x30BB, 0x3099, + 0x30BD, 0x3099, 0x30BF, 0x3099, 0x30C1, 0x3099, 0x30C4, 0x3099, + 0x30C6, 0x3099, 0x30C8, 0x3099, 0x30CF, 0x3099, 0x30CF, 0x309A, + 0x30D2, 0x3099, 0x30D2, 0x309A, 0x30D5, 0x3099, 0x30D5, 0x309A, + 0x30D8, 0x3099, 0x30D8, 0x309A, 0x30DB, 0x3099, 0x30DB, 0x309A, + 0x30A6, 0x3099, 0x30EF, 0x3099, 0x30F0, 0x3099, 0x30F1, 0x3099, + 0x30F2, 0x3099, 0x30FD, 0x3099, 0x05D9, 0x05B4, 0x05F2, 0x05B7, + 0x05E9, 0x05C1, 0x05E9, 0x05C2, 0xFB49, 0x05C1, 0xFB49, 0x05C2, + 0x05D0, 0x05B7, 0x05D0, 0x05B8, 0x05D0, 0x05BC, 0x05D1, 0x05BC, + 0x05D2, 0x05BC, 0x05D3, 0x05BC, 0x05D4, 0x05BC, 0x05D5, 0x05BC, + 0x05D6, 0x05BC, 0x05D8, 0x05BC, 0x05D9, 0x05BC, 0x05DA, 0x05BC, + 0x05DB, 0x05BC, 0x05DC, 0x05BC, 0x05DE, 0x05BC, 0x05E0, 0x05BC, + 0x05E1, 0x05BC, 0x05E3, 0x05BC, 0x05E4, 0x05BC, 0x05E6, 0x05BC, + 0x05E7, 0x05BC, 0x05E8, 0x05BC, 0x05E9, 0x05BC, 0x05EA, 0x05BC, + 0x05D5, 0x05B9, 0x05D1, 0x05BF, 0x05DB, 0x05BF, 0x05E4, 0x05BF +}; + +static const u_int8_t +__CFUniCharDecomposableBitmap[] = { + 0x01, 0x02, 0x03, 0x04, 0x05, 0x00, 0x06, 0x00, + 0x00, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x00, 0x0C, + 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x0F, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xBF, 0xFF, 0x7E, 0x3E, 0xBF, 0xFF, 0x7E, 0xBE, + 0xFF, 0xFF, 0xFC, 0xFF, 0x3F, 0xFF, 0xF1, 0x7E, + 0xF8, 0xF1, 0xF3, 0xFF, 0x3F, 0xFF, 0xFF, 0x7F, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x01, 0x00, + 0x00, 0xE0, 0xFF, 0xDF, 0xCF, 0xFF, 0x31, 0xFF, + 0xFF, 0xFF, 0xFF, 0xCF, 0xC0, 0xFF, 0x0F, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, + 0xE0, 0xD7, 0x01, 0x00, 0x00, 0xFC, 0x01, 0x00, + 0x00, 0x7C, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x8B, 0x70, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x8B, 0x70, 0x00, 0x00, 0xC0, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x06, 0x00, 0xCF, 0xFC, 0xFC, 0xFC, 0x3F, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x05, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x12, 0x00, + 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x18, 0x00, 0xB0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, + 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x19, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x1C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x81, 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x1C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x08, 0x20, 0x84, 0x10, 0x00, 0x02, 0x68, 0x01, + 0x02, 0x00, 0x08, 0x20, 0x84, 0x10, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0x0B, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, + 0xFF, 0xFF, 0x3F, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, + 0x3F, 0x3F, 0xFF, 0xAA, 0xFF, 0xFF, 0xFF, 0x3F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0x5F, + 0xDE, 0xFF, 0xCF, 0xEF, 0xFF, 0xFF, 0xDC, 0x3F, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x50, 0x55, 0x55, 0xA5, 0x02, 0xDB, 0x36, + 0x00, 0x00, 0x10, 0x40, 0x00, 0x50, 0x55, 0x55, + 0xA5, 0x02, 0xDB, 0x36, 0x00, 0x00, 0x90, 0x47, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xA0, 0x00, 0xFC, 0x7F, 0x5F, + 0xDB, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static const u_int32_t +__CFUniCharPrecompSourceTable[] = { + 0x00000300, 0x00540000, 0x00000301, 0x00750054, + 0x00000302, 0x002000C9, 0x00000303, 0x001C00E9, + 0x00000304, 0x002C0105, 0x00000306, 0x00200131, + 0x00000307, 0x002E0151, 0x00000308, 0x0036017F, + 0x00000309, 0x001801B5, 0x0000030A, 0x000601CD, + 0x0000030B, 0x000601D3, 0x0000030C, 0x002501D9, + 0x0000030F, 0x000E01FE, 0x00000311, 0x000C020C, + 0x00000313, 0x000E0218, 0x00000314, 0x00100226, + 0x0000031B, 0x00040236, 0x00000323, 0x002A023A, + 0x00000324, 0x00020264, 0x00000325, 0x00020266, + 0x00000326, 0x00040268, 0x00000327, 0x0016026C, + 0x00000328, 0x000A0282, 0x0000032D, 0x000C028C, + 0x0000032E, 0x00020298, 0x00000330, 0x0006029A, + 0x00000331, 0x001102A0, 0x00000338, 0x002C02B1, + 0x00000342, 0x001D02DD, 0x00000345, 0x003F02FA, + 0x00000653, 0x00010339, 0x00000654, 0x0006033A, + 0x00000655, 0x00010340, 0x0000093C, 0x00030341, + 0x000009BE, 0x00010344, 0x000009D7, 0x00010345, + 0x00000B3E, 0x00010346, 0x00000B56, 0x00010347, + 0x00000B57, 0x00010348, 0x00000BBE, 0x00020349, + 0x00000BD7, 0x0002034B, 0x00000C56, 0x0001034D, + 0x00000CC2, 0x0001034E, 0x00000CD5, 0x0003034F, + 0x00000CD6, 0x00010352, 0x00000D3E, 0x00020353, + 0x00000D57, 0x00010355, 0x00000DCA, 0x00020356, + 0x00000DCF, 0x00010358, 0x00000DDF, 0x00010359, + 0x0000102E, 0x0001035A, 0x00003099, 0x0030035B, + 0x0000309A, 0x000A038B +}; + +static const u_int32_t __CFUniCharPrecompositionTableLength = +(sizeof(__CFUniCharPrecompSourceTable) / (sizeof(u_int32_t) * 2)); + + +static const u_int16_t +__CFUniCharBMPPrecompDestinationTable[] = { + 0x0041, 0x00C0, 0x0045, 0x00C8, 0x0049, 0x00CC, 0x004E, 0x01F8, + 0x004F, 0x00D2, 0x0055, 0x00D9, 0x0057, 0x1E80, 0x0059, 0x1EF2, + 0x0061, 0x00E0, 0x0065, 0x00E8, 0x0069, 0x00EC, 0x006E, 0x01F9, + 0x006F, 0x00F2, 0x0075, 0x00F9, 0x0077, 0x1E81, 0x0079, 0x1EF3, + 0x00A8, 0x1FED, 0x00C2, 0x1EA6, 0x00CA, 0x1EC0, 0x00D4, 0x1ED2, + 0x00DC, 0x01DB, 0x00E2, 0x1EA7, 0x00EA, 0x1EC1, 0x00F4, 0x1ED3, + 0x00FC, 0x01DC, 0x0102, 0x1EB0, 0x0103, 0x1EB1, 0x0112, 0x1E14, + 0x0113, 0x1E15, 0x014C, 0x1E50, 0x014D, 0x1E51, 0x01A0, 0x1EDC, + 0x01A1, 0x1EDD, 0x01AF, 0x1EEA, 0x01B0, 0x1EEB, 0x0391, 0x1FBA, + 0x0395, 0x1FC8, 0x0397, 0x1FCA, 0x0399, 0x1FDA, 0x039F, 0x1FF8, + 0x03A5, 0x1FEA, 0x03A9, 0x1FFA, 0x03B1, 0x1F70, 0x03B5, 0x1F72, + 0x03B7, 0x1F74, 0x03B9, 0x1F76, 0x03BF, 0x1F78, 0x03C5, 0x1F7A, + 0x03C9, 0x1F7C, 0x03CA, 0x1FD2, 0x03CB, 0x1FE2, 0x0415, 0x0400, + 0x0418, 0x040D, 0x0435, 0x0450, 0x0438, 0x045D, 0x1F00, 0x1F02, + 0x1F01, 0x1F03, 0x1F08, 0x1F0A, 0x1F09, 0x1F0B, 0x1F10, 0x1F12, + 0x1F11, 0x1F13, 0x1F18, 0x1F1A, 0x1F19, 0x1F1B, 0x1F20, 0x1F22, + 0x1F21, 0x1F23, 0x1F28, 0x1F2A, 0x1F29, 0x1F2B, 0x1F30, 0x1F32, + 0x1F31, 0x1F33, 0x1F38, 0x1F3A, 0x1F39, 0x1F3B, 0x1F40, 0x1F42, + 0x1F41, 0x1F43, 0x1F48, 0x1F4A, 0x1F49, 0x1F4B, 0x1F50, 0x1F52, + 0x1F51, 0x1F53, 0x1F59, 0x1F5B, 0x1F60, 0x1F62, 0x1F61, 0x1F63, + 0x1F68, 0x1F6A, 0x1F69, 0x1F6B, 0x1FBF, 0x1FCD, 0x1FFE, 0x1FDD, + 0x0041, 0x00C1, 0x0043, 0x0106, 0x0045, 0x00C9, 0x0047, 0x01F4, + 0x0049, 0x00CD, 0x004B, 0x1E30, 0x004C, 0x0139, 0x004D, 0x1E3E, + 0x004E, 0x0143, 0x004F, 0x00D3, 0x0050, 0x1E54, 0x0052, 0x0154, + 0x0053, 0x015A, 0x0055, 0x00DA, 0x0057, 0x1E82, 0x0059, 0x00DD, + 0x005A, 0x0179, 0x0061, 0x00E1, 0x0063, 0x0107, 0x0065, 0x00E9, + 0x0067, 0x01F5, 0x0069, 0x00ED, 0x006B, 0x1E31, 0x006C, 0x013A, + 0x006D, 0x1E3F, 0x006E, 0x0144, 0x006F, 0x00F3, 0x0070, 0x1E55, + 0x0072, 0x0155, 0x0073, 0x015B, 0x0075, 0x00FA, 0x0077, 0x1E83, + 0x0079, 0x00FD, 0x007A, 0x017A, 0x00A8, 0x0385, 0x00C2, 0x1EA4, + 0x00C5, 0x01FA, 0x00C6, 0x01FC, 0x00C7, 0x1E08, 0x00CA, 0x1EBE, + 0x00CF, 0x1E2E, 0x00D4, 0x1ED0, 0x00D5, 0x1E4C, 0x00D8, 0x01FE, + 0x00DC, 0x01D7, 0x00E2, 0x1EA5, 0x00E5, 0x01FB, 0x00E6, 0x01FD, + 0x00E7, 0x1E09, 0x00EA, 0x1EBF, 0x00EF, 0x1E2F, 0x00F4, 0x1ED1, + 0x00F5, 0x1E4D, 0x00F8, 0x01FF, 0x00FC, 0x01D8, 0x0102, 0x1EAE, + 0x0103, 0x1EAF, 0x0112, 0x1E16, 0x0113, 0x1E17, 0x014C, 0x1E52, + 0x014D, 0x1E53, 0x0168, 0x1E78, 0x0169, 0x1E79, 0x01A0, 0x1EDA, + 0x01A1, 0x1EDB, 0x01AF, 0x1EE8, 0x01B0, 0x1EE9, 0x0391, 0x0386, + 0x0395, 0x0388, 0x0397, 0x0389, 0x0399, 0x038A, 0x039F, 0x038C, + 0x03A5, 0x038E, 0x03A9, 0x038F, 0x03B1, 0x03AC, 0x03B5, 0x03AD, + 0x03B7, 0x03AE, 0x03B9, 0x03AF, 0x03BF, 0x03CC, 0x03C5, 0x03CD, + 0x03C9, 0x03CE, 0x03CA, 0x0390, 0x03CB, 0x03B0, 0x03D2, 0x03D3, + 0x0413, 0x0403, 0x041A, 0x040C, 0x0433, 0x0453, 0x043A, 0x045C, + 0x1F00, 0x1F04, 0x1F01, 0x1F05, 0x1F08, 0x1F0C, 0x1F09, 0x1F0D, + 0x1F10, 0x1F14, 0x1F11, 0x1F15, 0x1F18, 0x1F1C, 0x1F19, 0x1F1D, + 0x1F20, 0x1F24, 0x1F21, 0x1F25, 0x1F28, 0x1F2C, 0x1F29, 0x1F2D, + 0x1F30, 0x1F34, 0x1F31, 0x1F35, 0x1F38, 0x1F3C, 0x1F39, 0x1F3D, + 0x1F40, 0x1F44, 0x1F41, 0x1F45, 0x1F48, 0x1F4C, 0x1F49, 0x1F4D, + 0x1F50, 0x1F54, 0x1F51, 0x1F55, 0x1F59, 0x1F5D, 0x1F60, 0x1F64, + 0x1F61, 0x1F65, 0x1F68, 0x1F6C, 0x1F69, 0x1F6D, 0x1FBF, 0x1FCE, + 0x1FFE, 0x1FDE, 0x0041, 0x00C2, 0x0043, 0x0108, 0x0045, 0x00CA, + 0x0047, 0x011C, 0x0048, 0x0124, 0x0049, 0x00CE, 0x004A, 0x0134, + 0x004F, 0x00D4, 0x0053, 0x015C, 0x0055, 0x00DB, 0x0057, 0x0174, + 0x0059, 0x0176, 0x005A, 0x1E90, 0x0061, 0x00E2, 0x0063, 0x0109, + 0x0065, 0x00EA, 0x0067, 0x011D, 0x0068, 0x0125, 0x0069, 0x00EE, + 0x006A, 0x0135, 0x006F, 0x00F4, 0x0073, 0x015D, 0x0075, 0x00FB, + 0x0077, 0x0175, 0x0079, 0x0177, 0x007A, 0x1E91, 0x1EA0, 0x1EAC, + 0x1EA1, 0x1EAD, 0x1EB8, 0x1EC6, 0x1EB9, 0x1EC7, 0x1ECC, 0x1ED8, + 0x1ECD, 0x1ED9, 0x0041, 0x00C3, 0x0045, 0x1EBC, 0x0049, 0x0128, + 0x004E, 0x00D1, 0x004F, 0x00D5, 0x0055, 0x0168, 0x0056, 0x1E7C, + 0x0059, 0x1EF8, 0x0061, 0x00E3, 0x0065, 0x1EBD, 0x0069, 0x0129, + 0x006E, 0x00F1, 0x006F, 0x00F5, 0x0075, 0x0169, 0x0076, 0x1E7D, + 0x0079, 0x1EF9, 0x00C2, 0x1EAA, 0x00CA, 0x1EC4, 0x00D4, 0x1ED6, + 0x00E2, 0x1EAB, 0x00EA, 0x1EC5, 0x00F4, 0x1ED7, 0x0102, 0x1EB4, + 0x0103, 0x1EB5, 0x01A0, 0x1EE0, 0x01A1, 0x1EE1, 0x01AF, 0x1EEE, + 0x01B0, 0x1EEF, 0x0041, 0x0100, 0x0045, 0x0112, 0x0047, 0x1E20, + 0x0049, 0x012A, 0x004F, 0x014C, 0x0055, 0x016A, 0x0059, 0x0232, + 0x0061, 0x0101, 0x0065, 0x0113, 0x0067, 0x1E21, 0x0069, 0x012B, + 0x006F, 0x014D, 0x0075, 0x016B, 0x0079, 0x0233, 0x00C4, 0x01DE, + 0x00C6, 0x01E2, 0x00D5, 0x022C, 0x00D6, 0x022A, 0x00DC, 0x01D5, + 0x00E4, 0x01DF, 0x00E6, 0x01E3, 0x00F5, 0x022D, 0x00F6, 0x022B, + 0x00FC, 0x01D6, 0x01EA, 0x01EC, 0x01EB, 0x01ED, 0x0226, 0x01E0, + 0x0227, 0x01E1, 0x022E, 0x0230, 0x022F, 0x0231, 0x0391, 0x1FB9, + 0x0399, 0x1FD9, 0x03A5, 0x1FE9, 0x03B1, 0x1FB1, 0x03B9, 0x1FD1, + 0x03C5, 0x1FE1, 0x0418, 0x04E2, 0x0423, 0x04EE, 0x0438, 0x04E3, + 0x0443, 0x04EF, 0x1E36, 0x1E38, 0x1E37, 0x1E39, 0x1E5A, 0x1E5C, + 0x1E5B, 0x1E5D, 0x0041, 0x0102, 0x0045, 0x0114, 0x0047, 0x011E, + 0x0049, 0x012C, 0x004F, 0x014E, 0x0055, 0x016C, 0x0061, 0x0103, + 0x0065, 0x0115, 0x0067, 0x011F, 0x0069, 0x012D, 0x006F, 0x014F, + 0x0075, 0x016D, 0x0228, 0x1E1C, 0x0229, 0x1E1D, 0x0391, 0x1FB8, + 0x0399, 0x1FD8, 0x03A5, 0x1FE8, 0x03B1, 0x1FB0, 0x03B9, 0x1FD0, + 0x03C5, 0x1FE0, 0x0410, 0x04D0, 0x0415, 0x04D6, 0x0416, 0x04C1, + 0x0418, 0x0419, 0x0423, 0x040E, 0x0430, 0x04D1, 0x0435, 0x04D7, + 0x0436, 0x04C2, 0x0438, 0x0439, 0x0443, 0x045E, 0x1EA0, 0x1EB6, + 0x1EA1, 0x1EB7, 0x0041, 0x0226, 0x0042, 0x1E02, 0x0043, 0x010A, + 0x0044, 0x1E0A, 0x0045, 0x0116, 0x0046, 0x1E1E, 0x0047, 0x0120, + 0x0048, 0x1E22, 0x0049, 0x0130, 0x004D, 0x1E40, 0x004E, 0x1E44, + 0x004F, 0x022E, 0x0050, 0x1E56, 0x0052, 0x1E58, 0x0053, 0x1E60, + 0x0054, 0x1E6A, 0x0057, 0x1E86, 0x0058, 0x1E8A, 0x0059, 0x1E8E, + 0x005A, 0x017B, 0x0061, 0x0227, 0x0062, 0x1E03, 0x0063, 0x010B, + 0x0064, 0x1E0B, 0x0065, 0x0117, 0x0066, 0x1E1F, 0x0067, 0x0121, + 0x0068, 0x1E23, 0x006D, 0x1E41, 0x006E, 0x1E45, 0x006F, 0x022F, + 0x0070, 0x1E57, 0x0072, 0x1E59, 0x0073, 0x1E61, 0x0074, 0x1E6B, + 0x0077, 0x1E87, 0x0078, 0x1E8B, 0x0079, 0x1E8F, 0x007A, 0x017C, + 0x015A, 0x1E64, 0x015B, 0x1E65, 0x0160, 0x1E66, 0x0161, 0x1E67, + 0x017F, 0x1E9B, 0x1E62, 0x1E68, 0x1E63, 0x1E69, 0x0041, 0x00C4, + 0x0045, 0x00CB, 0x0048, 0x1E26, 0x0049, 0x00CF, 0x004F, 0x00D6, + 0x0055, 0x00DC, 0x0057, 0x1E84, 0x0058, 0x1E8C, 0x0059, 0x0178, + 0x0061, 0x00E4, 0x0065, 0x00EB, 0x0068, 0x1E27, 0x0069, 0x00EF, + 0x006F, 0x00F6, 0x0074, 0x1E97, 0x0075, 0x00FC, 0x0077, 0x1E85, + 0x0078, 0x1E8D, 0x0079, 0x00FF, 0x00D5, 0x1E4E, 0x00F5, 0x1E4F, + 0x016A, 0x1E7A, 0x016B, 0x1E7B, 0x0399, 0x03AA, 0x03A5, 0x03AB, + 0x03B9, 0x03CA, 0x03C5, 0x03CB, 0x03D2, 0x03D4, 0x0406, 0x0407, + 0x0410, 0x04D2, 0x0415, 0x0401, 0x0416, 0x04DC, 0x0417, 0x04DE, + 0x0418, 0x04E4, 0x041E, 0x04E6, 0x0423, 0x04F0, 0x0427, 0x04F4, + 0x042B, 0x04F8, 0x042D, 0x04EC, 0x0430, 0x04D3, 0x0435, 0x0451, + 0x0436, 0x04DD, 0x0437, 0x04DF, 0x0438, 0x04E5, 0x043E, 0x04E7, + 0x0443, 0x04F1, 0x0447, 0x04F5, 0x044B, 0x04F9, 0x044D, 0x04ED, + 0x0456, 0x0457, 0x04D8, 0x04DA, 0x04D9, 0x04DB, 0x04E8, 0x04EA, + 0x04E9, 0x04EB, 0x0041, 0x1EA2, 0x0045, 0x1EBA, 0x0049, 0x1EC8, + 0x004F, 0x1ECE, 0x0055, 0x1EE6, 0x0059, 0x1EF6, 0x0061, 0x1EA3, + 0x0065, 0x1EBB, 0x0069, 0x1EC9, 0x006F, 0x1ECF, 0x0075, 0x1EE7, + 0x0079, 0x1EF7, 0x00C2, 0x1EA8, 0x00CA, 0x1EC2, 0x00D4, 0x1ED4, + 0x00E2, 0x1EA9, 0x00EA, 0x1EC3, 0x00F4, 0x1ED5, 0x0102, 0x1EB2, + 0x0103, 0x1EB3, 0x01A0, 0x1EDE, 0x01A1, 0x1EDF, 0x01AF, 0x1EEC, + 0x01B0, 0x1EED, 0x0041, 0x00C5, 0x0055, 0x016E, 0x0061, 0x00E5, + 0x0075, 0x016F, 0x0077, 0x1E98, 0x0079, 0x1E99, 0x004F, 0x0150, + 0x0055, 0x0170, 0x006F, 0x0151, 0x0075, 0x0171, 0x0423, 0x04F2, + 0x0443, 0x04F3, 0x0041, 0x01CD, 0x0043, 0x010C, 0x0044, 0x010E, + 0x0045, 0x011A, 0x0047, 0x01E6, 0x0048, 0x021E, 0x0049, 0x01CF, + 0x004B, 0x01E8, 0x004C, 0x013D, 0x004E, 0x0147, 0x004F, 0x01D1, + 0x0052, 0x0158, 0x0053, 0x0160, 0x0054, 0x0164, 0x0055, 0x01D3, + 0x005A, 0x017D, 0x0061, 0x01CE, 0x0063, 0x010D, 0x0064, 0x010F, + 0x0065, 0x011B, 0x0067, 0x01E7, 0x0068, 0x021F, 0x0069, 0x01D0, + 0x006A, 0x01F0, 0x006B, 0x01E9, 0x006C, 0x013E, 0x006E, 0x0148, + 0x006F, 0x01D2, 0x0072, 0x0159, 0x0073, 0x0161, 0x0074, 0x0165, + 0x0075, 0x01D4, 0x007A, 0x017E, 0x00DC, 0x01D9, 0x00FC, 0x01DA, + 0x01B7, 0x01EE, 0x0292, 0x01EF, 0x0041, 0x0200, 0x0045, 0x0204, + 0x0049, 0x0208, 0x004F, 0x020C, 0x0052, 0x0210, 0x0055, 0x0214, + 0x0061, 0x0201, 0x0065, 0x0205, 0x0069, 0x0209, 0x006F, 0x020D, + 0x0072, 0x0211, 0x0075, 0x0215, 0x0474, 0x0476, 0x0475, 0x0477, + 0x0041, 0x0202, 0x0045, 0x0206, 0x0049, 0x020A, 0x004F, 0x020E, + 0x0052, 0x0212, 0x0055, 0x0216, 0x0061, 0x0203, 0x0065, 0x0207, + 0x0069, 0x020B, 0x006F, 0x020F, 0x0072, 0x0213, 0x0075, 0x0217, + 0x0391, 0x1F08, 0x0395, 0x1F18, 0x0397, 0x1F28, 0x0399, 0x1F38, + 0x039F, 0x1F48, 0x03A9, 0x1F68, 0x03B1, 0x1F00, 0x03B5, 0x1F10, + 0x03B7, 0x1F20, 0x03B9, 0x1F30, 0x03BF, 0x1F40, 0x03C1, 0x1FE4, + 0x03C5, 0x1F50, 0x03C9, 0x1F60, 0x0391, 0x1F09, 0x0395, 0x1F19, + 0x0397, 0x1F29, 0x0399, 0x1F39, 0x039F, 0x1F49, 0x03A1, 0x1FEC, + 0x03A5, 0x1F59, 0x03A9, 0x1F69, 0x03B1, 0x1F01, 0x03B5, 0x1F11, + 0x03B7, 0x1F21, 0x03B9, 0x1F31, 0x03BF, 0x1F41, 0x03C1, 0x1FE5, + 0x03C5, 0x1F51, 0x03C9, 0x1F61, 0x004F, 0x01A0, 0x0055, 0x01AF, + 0x006F, 0x01A1, 0x0075, 0x01B0, 0x0041, 0x1EA0, 0x0042, 0x1E04, + 0x0044, 0x1E0C, 0x0045, 0x1EB8, 0x0048, 0x1E24, 0x0049, 0x1ECA, + 0x004B, 0x1E32, 0x004C, 0x1E36, 0x004D, 0x1E42, 0x004E, 0x1E46, + 0x004F, 0x1ECC, 0x0052, 0x1E5A, 0x0053, 0x1E62, 0x0054, 0x1E6C, + 0x0055, 0x1EE4, 0x0056, 0x1E7E, 0x0057, 0x1E88, 0x0059, 0x1EF4, + 0x005A, 0x1E92, 0x0061, 0x1EA1, 0x0062, 0x1E05, 0x0064, 0x1E0D, + 0x0065, 0x1EB9, 0x0068, 0x1E25, 0x0069, 0x1ECB, 0x006B, 0x1E33, + 0x006C, 0x1E37, 0x006D, 0x1E43, 0x006E, 0x1E47, 0x006F, 0x1ECD, + 0x0072, 0x1E5B, 0x0073, 0x1E63, 0x0074, 0x1E6D, 0x0075, 0x1EE5, + 0x0076, 0x1E7F, 0x0077, 0x1E89, 0x0079, 0x1EF5, 0x007A, 0x1E93, + 0x01A0, 0x1EE2, 0x01A1, 0x1EE3, 0x01AF, 0x1EF0, 0x01B0, 0x1EF1, + 0x0055, 0x1E72, 0x0075, 0x1E73, 0x0041, 0x1E00, 0x0061, 0x1E01, + 0x0053, 0x0218, 0x0054, 0x021A, 0x0073, 0x0219, 0x0074, 0x021B, + 0x0043, 0x00C7, 0x0044, 0x1E10, 0x0045, 0x0228, 0x0047, 0x0122, + 0x0048, 0x1E28, 0x004B, 0x0136, 0x004C, 0x013B, 0x004E, 0x0145, + 0x0052, 0x0156, 0x0053, 0x015E, 0x0054, 0x0162, 0x0063, 0x00E7, + 0x0064, 0x1E11, 0x0065, 0x0229, 0x0067, 0x0123, 0x0068, 0x1E29, + 0x006B, 0x0137, 0x006C, 0x013C, 0x006E, 0x0146, 0x0072, 0x0157, + 0x0073, 0x015F, 0x0074, 0x0163, 0x0041, 0x0104, 0x0045, 0x0118, + 0x0049, 0x012E, 0x004F, 0x01EA, 0x0055, 0x0172, 0x0061, 0x0105, + 0x0065, 0x0119, 0x0069, 0x012F, 0x006F, 0x01EB, 0x0075, 0x0173, + 0x0044, 0x1E12, 0x0045, 0x1E18, 0x004C, 0x1E3C, 0x004E, 0x1E4A, + 0x0054, 0x1E70, 0x0055, 0x1E76, 0x0064, 0x1E13, 0x0065, 0x1E19, + 0x006C, 0x1E3D, 0x006E, 0x1E4B, 0x0074, 0x1E71, 0x0075, 0x1E77, + 0x0048, 0x1E2A, 0x0068, 0x1E2B, 0x0045, 0x1E1A, 0x0049, 0x1E2C, + 0x0055, 0x1E74, 0x0065, 0x1E1B, 0x0069, 0x1E2D, 0x0075, 0x1E75, + 0x0042, 0x1E06, 0x0044, 0x1E0E, 0x004B, 0x1E34, 0x004C, 0x1E3A, + 0x004E, 0x1E48, 0x0052, 0x1E5E, 0x0054, 0x1E6E, 0x005A, 0x1E94, + 0x0062, 0x1E07, 0x0064, 0x1E0F, 0x0068, 0x1E96, 0x006B, 0x1E35, + 0x006C, 0x1E3B, 0x006E, 0x1E49, 0x0072, 0x1E5F, 0x0074, 0x1E6F, + 0x007A, 0x1E95, 0x003C, 0x226E, 0x003D, 0x2260, 0x003E, 0x226F, + 0x2190, 0x219A, 0x2192, 0x219B, 0x2194, 0x21AE, 0x21D0, 0x21CD, + 0x21D2, 0x21CF, 0x21D4, 0x21CE, 0x2203, 0x2204, 0x2208, 0x2209, + 0x220B, 0x220C, 0x2223, 0x2224, 0x2225, 0x2226, 0x223C, 0x2241, + 0x2243, 0x2244, 0x2245, 0x2247, 0x2248, 0x2249, 0x224D, 0x226D, + 0x2261, 0x2262, 0x2264, 0x2270, 0x2265, 0x2271, 0x2272, 0x2274, + 0x2273, 0x2275, 0x2276, 0x2278, 0x2277, 0x2279, 0x227A, 0x2280, + 0x227B, 0x2281, 0x227C, 0x22E0, 0x227D, 0x22E1, 0x2282, 0x2284, + 0x2283, 0x2285, 0x2286, 0x2288, 0x2287, 0x2289, 0x2291, 0x22E2, + 0x2292, 0x22E3, 0x22A2, 0x22AC, 0x22A8, 0x22AD, 0x22A9, 0x22AE, + 0x22AB, 0x22AF, 0x22B2, 0x22EA, 0x22B3, 0x22EB, 0x22B4, 0x22EC, + 0x22B5, 0x22ED, 0x00A8, 0x1FC1, 0x03B1, 0x1FB6, 0x03B7, 0x1FC6, + 0x03B9, 0x1FD6, 0x03C5, 0x1FE6, 0x03C9, 0x1FF6, 0x03CA, 0x1FD7, + 0x03CB, 0x1FE7, 0x1F00, 0x1F06, 0x1F01, 0x1F07, 0x1F08, 0x1F0E, + 0x1F09, 0x1F0F, 0x1F20, 0x1F26, 0x1F21, 0x1F27, 0x1F28, 0x1F2E, + 0x1F29, 0x1F2F, 0x1F30, 0x1F36, 0x1F31, 0x1F37, 0x1F38, 0x1F3E, + 0x1F39, 0x1F3F, 0x1F50, 0x1F56, 0x1F51, 0x1F57, 0x1F59, 0x1F5F, + 0x1F60, 0x1F66, 0x1F61, 0x1F67, 0x1F68, 0x1F6E, 0x1F69, 0x1F6F, + 0x1FBF, 0x1FCF, 0x1FFE, 0x1FDF, 0x0391, 0x1FBC, 0x0397, 0x1FCC, + 0x03A9, 0x1FFC, 0x03AC, 0x1FB4, 0x03AE, 0x1FC4, 0x03B1, 0x1FB3, + 0x03B7, 0x1FC3, 0x03C9, 0x1FF3, 0x03CE, 0x1FF4, 0x1F00, 0x1F80, + 0x1F01, 0x1F81, 0x1F02, 0x1F82, 0x1F03, 0x1F83, 0x1F04, 0x1F84, + 0x1F05, 0x1F85, 0x1F06, 0x1F86, 0x1F07, 0x1F87, 0x1F08, 0x1F88, + 0x1F09, 0x1F89, 0x1F0A, 0x1F8A, 0x1F0B, 0x1F8B, 0x1F0C, 0x1F8C, + 0x1F0D, 0x1F8D, 0x1F0E, 0x1F8E, 0x1F0F, 0x1F8F, 0x1F20, 0x1F90, + 0x1F21, 0x1F91, 0x1F22, 0x1F92, 0x1F23, 0x1F93, 0x1F24, 0x1F94, + 0x1F25, 0x1F95, 0x1F26, 0x1F96, 0x1F27, 0x1F97, 0x1F28, 0x1F98, + 0x1F29, 0x1F99, 0x1F2A, 0x1F9A, 0x1F2B, 0x1F9B, 0x1F2C, 0x1F9C, + 0x1F2D, 0x1F9D, 0x1F2E, 0x1F9E, 0x1F2F, 0x1F9F, 0x1F60, 0x1FA0, + 0x1F61, 0x1FA1, 0x1F62, 0x1FA2, 0x1F63, 0x1FA3, 0x1F64, 0x1FA4, + 0x1F65, 0x1FA5, 0x1F66, 0x1FA6, 0x1F67, 0x1FA7, 0x1F68, 0x1FA8, + 0x1F69, 0x1FA9, 0x1F6A, 0x1FAA, 0x1F6B, 0x1FAB, 0x1F6C, 0x1FAC, + 0x1F6D, 0x1FAD, 0x1F6E, 0x1FAE, 0x1F6F, 0x1FAF, 0x1F70, 0x1FB2, + 0x1F74, 0x1FC2, 0x1F7C, 0x1FF2, 0x1FB6, 0x1FB7, 0x1FC6, 0x1FC7, + 0x1FF6, 0x1FF7, 0x0627, 0x0622, 0x0627, 0x0623, 0x0648, 0x0624, + 0x064A, 0x0626, 0x06C1, 0x06C2, 0x06D2, 0x06D3, 0x06D5, 0x06C0, + 0x0627, 0x0625, 0x0928, 0x0929, 0x0930, 0x0931, 0x0933, 0x0934, + 0x09C7, 0x09CB, 0x09C7, 0x09CC, 0x0B47, 0x0B4B, 0x0B47, 0x0B48, + 0x0B47, 0x0B4C, 0x0BC6, 0x0BCA, 0x0BC7, 0x0BCB, 0x0B92, 0x0B94, + 0x0BC6, 0x0BCC, 0x0C46, 0x0C48, 0x0CC6, 0x0CCA, 0x0CBF, 0x0CC0, + 0x0CC6, 0x0CC7, 0x0CCA, 0x0CCB, 0x0CC6, 0x0CC8, 0x0D46, 0x0D4A, + 0x0D47, 0x0D4B, 0x0D46, 0x0D4C, 0x0DD9, 0x0DDA, 0x0DDC, 0x0DDD, + 0x0DD9, 0x0DDC, 0x0DD9, 0x0DDE, 0x1025, 0x1026, 0x3046, 0x3094, + 0x304B, 0x304C, 0x304D, 0x304E, 0x304F, 0x3050, 0x3051, 0x3052, + 0x3053, 0x3054, 0x3055, 0x3056, 0x3057, 0x3058, 0x3059, 0x305A, + 0x305B, 0x305C, 0x305D, 0x305E, 0x305F, 0x3060, 0x3061, 0x3062, + 0x3064, 0x3065, 0x3066, 0x3067, 0x3068, 0x3069, 0x306F, 0x3070, + 0x3072, 0x3073, 0x3075, 0x3076, 0x3078, 0x3079, 0x307B, 0x307C, + 0x309D, 0x309E, 0x30A6, 0x30F4, 0x30AB, 0x30AC, 0x30AD, 0x30AE, + 0x30AF, 0x30B0, 0x30B1, 0x30B2, 0x30B3, 0x30B4, 0x30B5, 0x30B6, + 0x30B7, 0x30B8, 0x30B9, 0x30BA, 0x30BB, 0x30BC, 0x30BD, 0x30BE, + 0x30BF, 0x30C0, 0x30C1, 0x30C2, 0x30C4, 0x30C5, 0x30C6, 0x30C7, + 0x30C8, 0x30C9, 0x30CF, 0x30D0, 0x30D2, 0x30D3, 0x30D5, 0x30D6, + 0x30D8, 0x30D9, 0x30DB, 0x30DC, 0x30EF, 0x30F7, 0x30F0, 0x30F8, + 0x30F1, 0x30F9, 0x30F2, 0x30FA, 0x30FD, 0x30FE, 0x306F, 0x3071, + 0x3072, 0x3074, 0x3075, 0x3077, 0x3078, 0x307A, 0x307B, 0x307D, + 0x30CF, 0x30D1, 0x30D2, 0x30D4, 0x30D5, 0x30D7, 0x30D8, 0x30DA, + 0x30DB, 0x30DD +}; + +static const u_int8_t +__CFUniCharCombiningBitmap[] = { + 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x00, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, + 0x0D, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, + 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x13, 0x00, + + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x78, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xFE, 0xFF, 0xFB, 0xFF, 0xFF, 0xBB, + 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xF8, 0x3F, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xC0, 0xFF, 0x9F, 0x3D, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0xFF, 0xFF, + 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xC0, 0xFF, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x0E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, + 0xFF, 0x3F, 0x1E, 0x00, 0x0C, 0x00, 0x00, 0x00, + 0x0E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, + 0x9F, 0x39, 0x80, 0x00, 0x0C, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, + 0x87, 0x39, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, + 0x0E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, + 0xBF, 0x3B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x0E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, + 0x8F, 0x39, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, + 0xC7, 0x3D, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x0E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, + 0xDF, 0x3D, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, + 0xDF, 0x3D, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, + 0xCF, 0x3D, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x84, 0x5F, 0xFF, 0x00, 0x00, 0x0C, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0x07, + 0x80, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0x1B, + 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0xA0, 0xC2, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFE, 0xFF, + 0xDF, 0x00, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x1F, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xC7, 0x03, + 0x00, 0x00, 0xC0, 0x03, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x1C, 0x00, + 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0C, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xFF, + 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xFE, 0xFF, 0x3F, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, + 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static const u_int8_t +__CFUniCharCombiningPropertyBitmap[] = { + 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x00, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, + 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, + 0x0F, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x14, 0x00, + 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, + 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, + 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE8, 0xDC, 0xDC, + 0xDC, 0xDC, 0xE8, 0xD8, 0xDC, 0xDC, 0xDC, 0xDC, + 0xDC, 0xCA, 0xCA, 0xDC, 0xDC, 0xDC, 0xDC, 0xCA, + 0xCA, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, + 0xDC, 0xDC, 0xDC, 0xDC, 0x01, 0x01, 0x01, 0x01, + 0x01, 0xDC, 0xDC, 0xDC, 0xDC, 0xE6, 0xE6, 0xE6, + 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xF0, 0xE6, 0xDC, + 0xDC, 0xDC, 0xE6, 0xE6, 0xE6, 0xDC, 0xDC, 0x00, + 0xE6, 0xE6, 0xE6, 0xDC, 0xDC, 0xDC, 0xDC, 0xE6, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xEA, 0xE9, + 0xEA, 0xEA, 0xE9, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, + 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xE6, 0xE6, 0xE6, 0xE6, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xDC, 0xE6, 0xE6, 0xE6, 0xE6, 0xDC, 0xE6, + 0xE6, 0xE6, 0xDE, 0xDC, 0xE6, 0xE6, 0xE6, 0xE6, + 0xE6, 0xE6, 0x00, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, + 0xE6, 0xE6, 0xDC, 0xE6, 0xE6, 0xDE, 0xE4, 0xE6, + 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, + 0x12, 0x13, 0x00, 0x14, 0x15, 0x16, 0x00, 0x17, + 0x00, 0x18, 0x19, 0x00, 0xE6, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, + 0x20, 0x21, 0x22, 0xE6, 0xE6, 0xDC, 0xDC, 0xE6, + 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xE6, + 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0x00, 0x00, 0xE6, + 0xE6, 0xE6, 0xE6, 0xDC, 0xE6, 0x00, 0x00, 0xE6, + 0xE6, 0x00, 0xDC, 0xE6, 0xE6, 0xDC, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xE6, 0xDC, 0xE6, 0xE6, 0xDC, 0xE6, 0xE6, 0xDC, + 0xDC, 0xDC, 0xE6, 0xDC, 0xDC, 0xE6, 0xDC, 0xE6, + 0xE6, 0xE6, 0xDC, 0xE6, 0xDC, 0xE6, 0xDC, 0xE6, + 0xDC, 0xE6, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0xE6, 0xDC, 0xE6, 0xE6, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x5B, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x67, 0x67, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x6B, 0x6B, 0x6B, 0x6B, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x76, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7A, 0x7A, 0x7A, 0x7A, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xDC, 0xDC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0x00, 0xDC, + 0x00, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x81, 0x82, 0x00, 0x84, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x82, 0x82, 0x82, 0x82, 0x00, 0x00, + 0x82, 0x00, 0xE6, 0xE6, 0x09, 0x00, 0xE6, 0xE6, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xE4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xDE, 0xE6, 0xDC, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xE6, 0xE6, 0x01, 0x01, 0xE6, 0xE6, 0xE6, 0xE6, + 0x01, 0x01, 0x01, 0xE6, 0xE6, 0x00, 0x00, 0x00, + 0x00, 0xE6, 0x00, 0x00, 0x00, 0x01, 0x01, 0xE6, + 0xDC, 0xE6, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xDA, 0xE4, 0xE8, 0xDE, 0xE0, 0xE0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1A, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xE6, 0xE6, 0xE6, 0xE6, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + + + + +#endif /* lf_hfs_utfconvdata_h */ + diff --git a/livefiles_hfs_plugin/lf_hfs_utils.c b/livefiles_hfs_plugin/lf_hfs_utils.c new file mode 100644 index 0000000..3092a33 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_utils.c @@ -0,0 +1,130 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_utils.c + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 19/03/2018. + */ + +#include "lf_hfs_utils.h" +#include "lf_hfs_vfsutils.h" + +/* + * General routine to allocate a hash table. + */ +void * +hashinit(int elements, u_long *hashmask) +{ + int hashsize = 0; + LIST_HEAD(generic, generic) *hashtbl; + int i; + + if (elements <= 0) + return NULL; + for (hashsize = 1; hashsize <= elements; hashsize <<= 1) + { + continue; + } + + hashsize >>= 1; + hashtbl = hfs_malloc(hashsize * sizeof(*hashtbl)); + if (hashtbl != NULL) + { + for (i = 0; i < hashsize; i++) + { + LIST_INIT(&hashtbl[i]); + } + *hashmask = hashsize - 1; + } + return (hashtbl); +} + +/* + * General routine to free a hash table. + */ +void +hashDeinit(void* pvHashTbl) +{ + LIST_HEAD(generic, generic) *hashtbl = pvHashTbl; + hfs_free(hashtbl); +} + +/* + * to_bsd_time - convert from Mac OS time (seconds since 1/1/1904) + * to BSD time (seconds since 1/1/1970) + */ +time_t +to_bsd_time(u_int32_t hfs_time) +{ + u_int32_t gmt = hfs_time; + + if (gmt > MAC_GMT_FACTOR) + gmt -= MAC_GMT_FACTOR; + else + gmt = 0; /* don't let date go negative! */ + + return (time_t)gmt; +} + +/* + * to_hfs_time - convert from BSD time (seconds since 1/1/1970) + * to Mac OS time (seconds since 1/1/1904) + */ +u_int32_t +to_hfs_time(time_t bsd_time) +{ + u_int32_t hfs_time = (u_int32_t)bsd_time; + + /* don't adjust zero - treat as uninitialzed */ + if (hfs_time != 0) + hfs_time += MAC_GMT_FACTOR; + + return (hfs_time); +} + +void +microuptime(struct timeval *tvp) +{ + struct timespec ts; + clock_gettime( CLOCK_MONOTONIC, &ts ); + TIMESPEC_TO_TIMEVAL(tvp, &ts); +} + +void +microtime(struct timeval *tvp) +{ + struct timespec ts; + clock_gettime( CLOCK_REALTIME, &ts ); + TIMESPEC_TO_TIMEVAL(tvp, &ts); +} + +void* lf_hfs_utils_allocate_and_copy_string( char *pcName, size_t uLen ) +{ + //Check the validity of the uLen + if (uLen > kHFSPlusMaxFileNameChars) { + return NULL; + } + + //Checkk the validity of the pcName + if (strlen(pcName) != uLen) { + return NULL; + } + + void *pvTmp = hfs_malloc( uLen+1 ); + if ( pvTmp == NULL ) { + return NULL; + } + + memcpy(pvTmp, pcName, uLen); + //Add Null terminated at the end of the name + char *pcLastChar = pvTmp + uLen; + *pcLastChar = '\0'; + + return pvTmp; +} + +off_t +blk_to_bytes(uint32_t blk, uint32_t blk_size) +{ + return (off_t)blk * blk_size; // Avoid the overflow +} diff --git a/livefiles_hfs_plugin/lf_hfs_utils.h b/livefiles_hfs_plugin/lf_hfs_utils.h new file mode 100644 index 0000000..712d10f --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_utils.h @@ -0,0 +1,42 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_utils.h + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 19/03/2018. + */ + +#ifndef lf_hfs_utils_h +#define lf_hfs_utils_h + +#include +#include +#include "lf_hfs_locks.h" +#include "lf_hfs.h" +#include "lf_hfs_logger.h" + +#define hfs_assert(expr) \ + do { \ + if ( (expr) == (0) ) \ + { \ + LFHFS_LOG( LEVEL_ERROR, \ + "HFS ASSERT [%s] [%d]\n", \ + __FILE__, \ + __LINE__); \ + assert( 0 ); \ + } \ + } while (0) + +#define MAC_GMT_FACTOR 2082844800UL + + +void* hashinit(int elements, u_long *hashmask); +void hashDeinit(void* pvHashTbl); +time_t to_bsd_time(u_int32_t hfs_time); +u_int32_t to_hfs_time(time_t bsd_time); +void microuptime(struct timeval *tvp); +void microtime(struct timeval *tvp); +void* lf_hfs_utils_allocate_and_copy_string( char *pcName, size_t uLen ); +off_t blk_to_bytes(uint32_t blk, uint32_t blk_size); + +#endif /* lf_hfs_utils_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_vfsops.c b/livefiles_hfs_plugin/lf_hfs_vfsops.c new file mode 100644 index 0000000..4dab9c8 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_vfsops.c @@ -0,0 +1,2103 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_vfsops.c + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#include "lf_hfs_common.h" +#include +#include +#include +#include +#include +#include +#include +#include "lf_hfs_logger.h" +#include "lf_hfs_mount.h" +#include "lf_hfs.h" +#include "lf_hfs_catalog.h" +#include "lf_hfs_cnode.h" +#include "lf_hfs_chash.h" +#include "lf_hfs_format.h" +#include "lf_hfs_locks.h" +#include "lf_hfs_endian.h" +#include "lf_hfs_locks.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_raw_read_write.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_volume_allocation.h" +#include "lf_hfs_catalog.h" +#include "lf_hfs_link.h" +#include "lf_hfs_vnops.h" +#include "lf_hfs_generic_buf.h" +#include "lf_hfs_fsops_handler.h" +#include "lf_hfs_journal.h" +#include "lf_hfs_fileops_handler.h" + +#include + +static void hfs_locks_destroy(struct hfsmount *hfsmp); +static int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args); + + +static int +setup_posix_file_action_for_fsck(posix_spawn_file_actions_t *file_actions, int fd) +{ + int error; + + if (file_actions == NULL || fd < 0) + { + return EINVAL; + } + + error = posix_spawn_file_actions_init(file_actions); + if (error) + { + goto out; + } + + error = posix_spawn_file_actions_addinherit_np(file_actions, 0); + if (error) + { + goto out; + } + + error = posix_spawn_file_actions_addinherit_np(file_actions, 1); + if (error) + { + goto out; + } + + error = posix_spawn_file_actions_addinherit_np(file_actions, 2); + if (error) + { + goto out; + } + + error = posix_spawn_file_actions_addinherit_np(file_actions, fd); + +out: + return error; +} + +static int +setup_spawnattr_for_fsck(posix_spawnattr_t *spawn_attr) +{ + int error; + + error = posix_spawnattr_init(spawn_attr); + if (error) + { + goto out; + } + error = posix_spawnattr_setflags(spawn_attr, POSIX_SPAWN_CLOEXEC_DEFAULT); + +out: + return error; +} + + +// fsck_mount_and_replay: executed on fsck_hfs -quick +// Try to mount, and if a journaled volume, play the journal. +// Returned values: +// OK if: +// 1) On journaled volumes, the journal has been replayed and the dirty bit cleared. +// 2) On non-journalled volumes, the dirty is cleared. +// EINVAL if: +// 1) On non-journalled volumes the dirty bit is set. Please run fsck_hfs to fix. +// 2) On journalled volume, the replay failed. Try fsck_hfs. +int fsck_mount_and_replay(int iFd) { + int iErr = 0; + + LFHFS_LOG(LEVEL_DEBUG, "fsck_mount_and_replay %d", iFd); + + UVFSFileNode sRootNode; + + iErr = LFHFS_Taste(iFd); + if (iErr) { + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_Taste returned %d", iErr); + return iErr; + } + + UVFSScanVolsRequest sScanVolsReq = {0}; + UVFSScanVolsReply sScanVolsReply = {0}; + iErr = LFHFS_ScanVols(iFd, &sScanVolsReq, &sScanVolsReply); + if (iErr) { + LFHFS_LOG(LEVEL_DEBUG, "LFHFS_ScanVol returned %d", iErr); + return iErr; + } + + // Mount and replay journal if possible + iErr = LFHFS_Mount(iFd, 0, 0, NULL, &sRootNode); // On journaled volumes, this replays the journal. + // Non-journaled volumes fails to mount if dirty (Unmounted == 0). + if (iErr) { + LFHFS_LOG(LEVEL_DEBUG, "fsck_mount_and_replay: LFHFS_Mount returned %d", iErr); + return EINVAL; + } + + LFHFS_Unmount (sRootNode, UVFSUnmountHintNone); + + return iErr; +} + +#define PATH_TO_FSCK "/System/Library/Filesystems/hfs.fs/Contents/Resources/fsck_hfs" + +int +fsck_hfs(int fd, check_flags_t how) +{ + pid_t child; + pid_t child_found; + int child_status; + extern char **environ; + char fdescfs_path[24]; + posix_spawn_file_actions_t file_actions; + int result; + posix_spawnattr_t spawn_attr; + + /* + * XXXJRT There are dragons related to how the journal is replayed in + * fsck_hfs. Until we can sort out the mess, disable running fsck_hfs. + * USB: Re-enable Detonator fsck_hfs + */ + if (how == QUICK_CHECK) { + if (fsck_mount_and_replay(fd) == 0) { + return(0); + } + } + + LFHFS_LOG(LEVEL_DEFAULT, "fsck_hfs - fsck start for %d", fd); + snprintf(fdescfs_path, sizeof(fdescfs_path), "/dev/fd/%d", fd); + const char * argv[] = {"fsck_hfs", "-q", fdescfs_path, NULL}; + + switch (how) + { + case QUICK_CHECK: + /* Do nothing, already setup for this */ + break; + case CHECK: + argv[1] = "-n"; + break; + case CHECK_AND_REPAIR: + argv[1] = "-y"; + break; + default: + LFHFS_LOG(LEVEL_ERROR, "Invalid how flags for the check, ignoring; %d", how); + break; + } + + LFHFS_LOG(LEVEL_DEBUG, "fsck_hfs params: %s %s %s", argv[1], argv[2], argv[3]); + result = setup_posix_file_action_for_fsck(&file_actions, fd); + if (result) + { + goto out; + } + + result = setup_spawnattr_for_fsck(&spawn_attr); + if (result) + { + posix_spawn_file_actions_destroy(&file_actions); + goto out; + } + + result = posix_spawn(&child, + PATH_TO_FSCK, + &file_actions, + &spawn_attr, + (char * const *)argv, + environ); + + posix_spawn_file_actions_destroy(&file_actions); + posix_spawnattr_destroy(&spawn_attr); + if (result) + { + LFHFS_LOG(LEVEL_ERROR, "posix_spawn fsck_hfs: error=%d", result); + goto out; + } + + // Wait for child to finish, XXXab: revisit, need sensible timeout? + do { + child_found = waitpid(child, &child_status, 0); + } while (child_found == -1 && errno == EINTR); + + if (child_found == -1) + { + result = errno; + LFHFS_LOG(LEVEL_ERROR, "waitpid fsck_hfs: errno=%d", result); + goto out; + } + + if (WIFEXITED(child_status)) + { + result = WEXITSTATUS(child_status); + if (result) + { + LFHFS_LOG(LEVEL_ERROR, "fsck_hfs: exited with status %d", result); + result = EILSEQ; + } else { + LFHFS_LOG(LEVEL_ERROR, "fsck_hfs: exited with status %d", result); + } + } + else + { + result = WTERMSIG(child_status); + LFHFS_LOG(LEVEL_ERROR, "fsck_hfs: terminated by signal %d", result); + result = EINTR; + } + +out: + LFHFS_LOG(LEVEL_DEFAULT, "fsck_hfs - fsck finish for %d with err %d", fd, result); + return result; +} + +int +hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data) +{ + struct hfsmount *hfsmp = NULL; + int retval = 0; + if ( devvp == NULL ) + { + retval = EINVAL; + goto fail; + } + + retval = hfs_mountfs(devvp, mp, NULL); + if (retval) + { + // ENOTSUP is for regular HFS -> just fail + if (retval != ENOTSUP) + { + //Failed during mount, try to run fsck to fix and try mount again + retval = fsck_hfs(devvp->psFSRecord->iFD, CHECK_AND_REPAIR); + + // fsck succeeded, try to mount + if (!retval) { + retval = hfs_mountfs(devvp, mp, NULL); + if (!retval) + goto mount_passed; + } + } + + LFHFS_LOG(LEVEL_ERROR, "hfs_mount: hfs_mountfs returned error=%d\n", retval); + goto fail; + } +mount_passed: + /* After hfs_mountfs succeeds, we should have valid hfsmp */ + hfsmp = VFSTOHFS(mp); + + /* Set up the maximum defrag file size */ + hfsmp->hfs_defrag_max = HFS_INITIAL_DEFRAG_SIZE; + + if (!data) + { + // Root mount + hfsmp->hfs_uid = UNKNOWNUID; + hfsmp->hfs_gid = UNKNOWNGID; + hfsmp->hfs_dir_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */ + hfsmp->hfs_file_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */ + + /* Establish the free block reserve. */ + hfsmp->reserveBlocks = (uint32_t) ((u_int64_t)hfsmp->totalBlocks * HFS_MINFREE) / 100; + hfsmp->reserveBlocks = MIN(hfsmp->reserveBlocks, HFS_MAXRESERVE / hfsmp->blockSize); + } + +fail: + return (retval); +} + +static int hfs_InitialMount(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, HFSPlusVolumeHeader **vhp, off_t *embeddedOffset, struct hfsmount **hfsmp, bool bFailForDirty) +{ + int retval = 0; + HFSMasterDirectoryBlock *mdbp = NULL; + void* pvBuffer = NULL; + int mntwrapper; + u_int64_t disksize; + u_int64_t log_blkcnt; + u_int32_t log_blksize; + u_int32_t phys_blksize; + u_int32_t minblksize; + u_int32_t iswritable; + u_int64_t mdb_offset; + u_int32_t device_features = 0; + + mntwrapper = 0; + minblksize = kHFSBlockSize; + + /* Get the logical block size (treated as physical block size everywhere) */ + if (ioctl(devvp->psFSRecord->iFD, DKIOCGETBLOCKSIZE, &log_blksize)) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: DKIOCGETBLOCKSIZE failed\n"); + retval = ENXIO; + goto error_exit; + } + if (log_blksize == 0 || log_blksize > 1024*1024*1024) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_mountfs: logical block size 0x%x looks bad. Not mounting.\n", log_blksize); + retval = ENXIO; + goto error_exit; + } + + /* Get the physical block size. */ + if (ioctl(devvp->psFSRecord->iFD, DKIOCGETPHYSICALBLOCKSIZE, &phys_blksize)) + { + if ((retval != ENOTSUP) && (retval != ENOTTY)) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: DKIOCGETPHYSICALBLOCKSIZE failed\n"); + retval = ENXIO; + goto error_exit; + } + /* If device does not support this ioctl, assume that physical + * block size is same as logical block size + */ + phys_blksize = log_blksize; + } + + if (phys_blksize == 0 || phys_blksize > MAXBSIZE) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_mountfs: physical block size 0x%x looks bad. Not mounting.\n", phys_blksize); + retval = ENXIO; + goto error_exit; + } + + /* Get the number of physical blocks. */ + if (ioctl(devvp->psFSRecord->iFD, DKIOCGETBLOCKCOUNT, &log_blkcnt)) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: DKIOCGETBLOCKCOUNT failed\n"); + retval = ENXIO; + goto error_exit; + } + + /* Compute an accurate disk size (i.e. within 512 bytes) */ + disksize = (u_int64_t)log_blkcnt * (u_int64_t)log_blksize; + + /* + * At this point: + * minblksize is the minimum physical block size + * log_blksize has our preferred physical block size + * log_blkcnt has the total number of physical blocks + */ + mdbp = hfs_mallocz(kMDBSize); + if (mdbp == NULL) + { + retval = ENOMEM; + goto error_exit; + } + + pvBuffer = hfs_malloc(phys_blksize); + if (pvBuffer == NULL) + { + retval = ENOMEM; + goto error_exit; + } + + mdb_offset = (uint64_t) HFS_PRI_SECTOR(log_blksize); + retval = raw_readwrite_read_mount( devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (phys_blksize/log_blksize)), phys_blksize, pvBuffer, phys_blksize, NULL, NULL); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: raw_readwrite_read_mount failed with %d\n", retval); + goto error_exit; + } + + bcopy(pvBuffer + HFS_PRI_OFFSET(phys_blksize), mdbp, kMDBSize); + hfs_free(pvBuffer); + pvBuffer = NULL; + + *hfsmp = hfs_malloc(sizeof(struct hfsmount)); + if (*hfsmp == NULL) + { + retval = ENOMEM; + goto error_exit; + } + memset( *hfsmp, 0, sizeof(struct hfsmount) ); + + //Copy read only flag + if (mp->mnt_flag == MNT_RDONLY) (*hfsmp)->hfs_flags = HFS_READ_ONLY; + + hfs_chashinit_finish(*hfsmp); + + /* Init the ID lookup hashtable */ + hfs_idhash_init (*hfsmp); + + /* + * See if the disk supports unmap (trim). + * + * NOTE: vfs_init_io_attributes has not been called yet, so we can't use the io_flags field + * returned by vfs_ioattr. We need to call VNOP_IOCTL ourselves. + */ + if (ioctl(devvp->psFSRecord->iFD, DKIOCGETFEATURES, &device_features) == 0) + { + if (device_features & DK_FEATURE_UNMAP) + { + (*hfsmp)->hfs_flags |= HFS_UNMAP; + } + + if(device_features & DK_FEATURE_BARRIER) + { + (*hfsmp)->hfs_flags |= HFS_FEATURE_BARRIER; + } + } + + /* + * Init the volume information structure + */ + lf_lck_mtx_init(&(*hfsmp)->hfs_mutex); + lf_lck_mtx_init(&(*hfsmp)->sync_mutex); + lf_lck_rw_init(&(*hfsmp)->hfs_global_lock); + lf_lck_spin_init(&(*hfsmp)->vcbFreeExtLock); + + if (mp) + { + mp->psHfsmount = (*hfsmp); + } + + (*hfsmp)->hfs_mp = mp; /* Make VFSTOHFS work */ + (*hfsmp)->hfs_raw_dev = 0; //vnode_specrdev(devvp); + (*hfsmp)->hfs_devvp = devvp; + (*hfsmp)->hfs_logical_block_size = log_blksize; + (*hfsmp)->hfs_logical_block_count = log_blkcnt; + (*hfsmp)->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt; + (*hfsmp)->hfs_physical_block_size = phys_blksize; + (*hfsmp)->hfs_log_per_phys = (phys_blksize / log_blksize); + (*hfsmp)->hfs_flags |= HFS_WRITEABLE_MEDIA; + + if (mp && (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS)) + { + (*hfsmp)->hfs_flags |= HFS_UNKNOWN_PERMS; + } + + /* MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */ + if (mp && (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS)) + { + (*hfsmp)->hfs_uid = UNKNOWNUID; + (*hfsmp)->hfs_gid = UNKNOWNGID; + // vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid); /* tell the VFS */ + (*hfsmp)->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */ + (*hfsmp)->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */ + } + + /* Find out if disk media is writable. */ + if (ioctl(devvp->psFSRecord->iFD, DKIOCISWRITABLE, &iswritable) == 0) + { + if (iswritable) + { + (*hfsmp)->hfs_flags |= HFS_WRITEABLE_MEDIA; + } + else + { + (*hfsmp)->hfs_flags &= ~HFS_WRITEABLE_MEDIA; + } + } + + // Reservations + rl_init(&(*hfsmp)->hfs_reserved_ranges[0]); + rl_init(&(*hfsmp)->hfs_reserved_ranges[1]); + + // record the current time at which we're mounting this volume + struct timeval tv; + microuptime(&tv); + (*hfsmp)->hfs_mount_time = tv.tv_sec; + + /* Mount an HFS Plus disk */ + int jnl_disable = 0; + + /* Mount a standard HFS disk */ + if ((SWAP_BE16(mdbp->drSigWord) == kHFSSigWord) && (mntwrapper || (SWAP_BE16(mdbp->drEmbedSigWord) != kHFSPlusSigWord))) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: Not supporting standard HFS\n"); + retval = ENOTSUP; + goto error_exit; + } + /* Get the embedded Volume Header */ + else if (SWAP_BE16(mdbp->drEmbedSigWord) == kHFSPlusSigWord) + { + *embeddedOffset = SWAP_BE16(mdbp->drAlBlSt) * kHFSBlockSize; + *embeddedOffset += (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.startBlock) * (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz); + + /* + * If the embedded volume doesn't start on a block + * boundary, then switch the device to a 512-byte + * block size so everything will line up on a block + * boundary. + */ + if ((*embeddedOffset % log_blksize) != 0) + { + // LF not support DKIOCSETBLOCKSIZE, return error. + LFHFS_LOG(LEVEL_DEFAULT, "hfs_mountfs: embedded volume offset not a multiple of physical block size (%d); switching to 512\n", log_blksize); + retval = ENXIO; + goto error_exit; + } + + disksize = (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.blockCount) * (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz); + + (*hfsmp)->hfs_logical_block_count = disksize / log_blksize; + + (*hfsmp)->hfs_logical_bytes = (uint64_t) (*hfsmp)->hfs_logical_block_count * (uint64_t) (*hfsmp)->hfs_logical_block_size; + + mdb_offset = (uint64_t)((*embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize)); + + pvBuffer = hfs_malloc(phys_blksize); + if (pvBuffer == NULL) + { + retval = ENOMEM; + goto error_exit; + } + + retval = raw_readwrite_read_mount( devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (phys_blksize/log_blksize)), phys_blksize, pvBuffer, phys_blksize, NULL, NULL); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: raw_readwrite_read_mount (2) failed with %d\n", retval); + goto error_exit; + } + + bcopy(pvBuffer + HFS_PRI_OFFSET(phys_blksize), mdbp, kMDBSize); + *vhp = (HFSPlusVolumeHeader*) mdbp; + hfs_free(pvBuffer); + pvBuffer = NULL; + } + else + { /* pure HFS+ */ + *embeddedOffset = 0; + *vhp = (HFSPlusVolumeHeader*) mdbp; + } + + retval = hfs_ValidateHFSPlusVolumeHeader(*hfsmp, *vhp); + if (retval) + goto error_exit; + + /* + * If allocation block size is less than the physical block size, + * invalidate the buffer read in using native physical block size + * to ensure data consistency. + * + * HFS Plus reserves one allocation block for the Volume Header. + * If the physical size is larger, then when we read the volume header, + * we will also end up reading in the next allocation block(s). + * If those other allocation block(s) is/are modified, and then the volume + * header is modified, the write of the volume header's buffer will write + * out the old contents of the other allocation blocks. + * + * We assume that the physical block size is same as logical block size. + * The physical block size value is used to round down the offsets for + * reading and writing the primary and alternate volume headers. + * + * The same logic is also in hfs_MountHFSPlusVolume to ensure that + * hfs_mountfs, hfs_MountHFSPlusVolume and later are doing the I/Os + * using same block size. + */ + if (SWAP_BE32((*vhp)->blockSize) < (*hfsmp)->hfs_physical_block_size) + { + phys_blksize = (*hfsmp)->hfs_logical_block_size; + (*hfsmp)->hfs_physical_block_size = (*hfsmp)->hfs_logical_block_size; + (*hfsmp)->hfs_log_per_phys = 1; + + if (retval) + goto error_exit; + } + + /* + * On inconsistent disks, do not allow read-write mount + * unless it is the boot volume being mounted. We also + * always want to replay the journal if the journal_replay_only + * flag is set because that will (most likely) get the + * disk into a consistent state before fsck_hfs starts + * looking at it. + */ + if ( (mp && !(mp->mnt_flag & MNT_ROOTFS)) + && (SWAP_BE32((*vhp)->attributes) & kHFSVolumeInconsistentMask) + && !((*hfsmp)->hfs_flags & HFS_READ_ONLY)) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: failed to mount non-root inconsistent disk\n"); + retval = EINVAL; + goto error_exit; + } + + (*hfsmp)->jnl = NULL; + (*hfsmp)->jvp = NULL; + if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS) && args->journal_disable) + { + jnl_disable = 1; + } + + /* + * We only initialize the journal here if the last person + * to mount this volume was journaling aware. Otherwise + * we delay journal initialization until later at the end + * of hfs_MountHFSPlusVolume() because the last person who + * mounted it could have messed things up behind our back + * (so we need to go find the .journal file, make sure it's + * the right size, re-sync up if it was moved, etc). + */ + uint32_t lastMountedVersion = SWAP_BE32((*vhp)->lastMountedVersion); + uint32_t attributes = SWAP_BE32((*vhp)->attributes); + if ( (lastMountedVersion == kHFSJMountVersion) && + (attributes & kHFSVolumeJournaledMask) && + !jnl_disable) + { + + // if we're able to init the journal, mark the mount + // point as journaled. + if ((retval = hfs_early_journal_init(*hfsmp, *vhp, args, *embeddedOffset, mdb_offset, mdbp)) != 0) + { + if (retval == EROFS) + { + // EROFS is a special error code that means the volume has an external + // journal which we couldn't find. in that case we do not want to + // rewrite the volume header - we'll just refuse to mount the volume. + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: hfs_early_journal_init indicated external jnl \n"); + retval = EINVAL; + goto error_exit; + } + + // if the journal failed to open, then set the lastMountedVersion + // to be "FSK!" which fsck_hfs will see and force the fsck instead + // of just bailing out because the volume is journaled. + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: hfs_early_journal_init failed, setting to FSK \n"); + HFSPlusVolumeHeader *jvhp; + + (*hfsmp)->hfs_flags |= HFS_NEED_JNL_RESET; + + if (mdb_offset == 0) + { + mdb_offset = (uint64_t)((*embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize)); + } + + pvBuffer = hfs_malloc(phys_blksize); + if (pvBuffer == NULL) + { + retval = ENOMEM; + goto error_exit; + } + + retval = raw_readwrite_read_mount( devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (*hfsmp)->hfs_log_per_phys), phys_blksize, pvBuffer, phys_blksize, NULL, NULL); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: raw_readwrite_read_mount (3) failed with %d\n", retval); + goto error_exit; + } + + jvhp = (HFSPlusVolumeHeader *)(pvBuffer + HFS_PRI_OFFSET(phys_blksize)); + + if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) + { + LFHFS_LOG(LEVEL_DEFAULT, "hfs_mountfs: Journal replay fail. Writing lastMountVersion as FSK!\n"); + + jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion); + retval = raw_readwrite_write_mount( devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (*hfsmp)->hfs_log_per_phys), phys_blksize, pvBuffer, phys_blksize, NULL, NULL ); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: raw_readwrite_write_mount (1) failed with %d\n", retval); + goto error_exit; + } + hfs_free(pvBuffer); + pvBuffer = NULL; + } + + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: hfs_early_journal_init failed, erroring out \n"); + retval = EINVAL; + goto error_exit; + } + } + + retval = hfs_MountHFSPlusVolume(*hfsmp, *vhp, *embeddedOffset, disksize, bFailForDirty); + /* + * If the backend didn't like our physical blocksize + * then retry with physical blocksize of 512. + */ + if ((retval == ENXIO) && (log_blksize > 512) && (log_blksize != minblksize)) + { + // LF not support DKIOCSETBLOCKSIZE, return error. + LFHFS_LOG(LEVEL_DEFAULT, "hfs_mountfs: could not use physical block size (%d).\n", log_blksize); + goto error_exit; + } + else if ( retval ) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: hfs_MountHFSPlusVolume encountered failure %d \n", retval); + goto error_exit; + } + + return (retval); + +error_exit: + if (pvBuffer) + hfs_free(pvBuffer); + + hfs_free(mdbp); + + if (*hfsmp) + { + hfs_locks_destroy(*hfsmp); + hfs_delete_chash(*hfsmp); + hfs_idhash_destroy (*hfsmp); + + hfs_free(*hfsmp); + *hfsmp = NULL; + } + return (retval); +} + + +int hfs_ScanVolGetVolName(int iFd, char* pcVolumeName) +{ + int retval = 0; + + HFSPlusVolumeHeader *vhp; + off_t embeddedOffset; + struct hfsmount *hfsmp; + struct mount* psMount = hfs_mallocz(sizeof(struct mount)); + struct vnode* psDevVnode = hfs_mallocz(sizeof(struct vnode)); + struct cnode* psDevCnode = hfs_mallocz(sizeof(struct cnode)); + struct filefork* psDevFileFork = hfs_mallocz(sizeof(struct filefork)); + FileSystemRecord_s *psFSRecord = hfs_mallocz(sizeof(FileSystemRecord_s)); + + if ( psMount == NULL || psDevVnode == NULL || psDevCnode == NULL || psDevFileFork == NULL || psFSRecord == NULL ) + { + retval = ENOMEM; + LFHFS_LOG(LEVEL_ERROR, "hfs_ScanVolGetVolName: failed to malloc initial system files\n"); + goto exit; + } + + psFSRecord->iFD = iFd; + psDevVnode->psFSRecord = psFSRecord; + psDevVnode->sFSParams.vnfs_marksystem = 1; + psDevVnode->bIsMountVnode = true; + + // Initializing inputs for hfs_mount + psDevFileFork->ff_data.cf_blocks = 3; + psDevFileFork->ff_data.cf_extents[0].blockCount = 1; + psDevFileFork->ff_data.cf_extents[0].startBlock = 0; + + psDevVnode->sFSParams.vnfs_fsnode = psDevCnode; + psDevCnode->c_vp = psDevVnode; + psDevVnode->is_rsrc = false; + psDevCnode->c_datafork = psDevFileFork; + psDevVnode->sFSParams.vnfs_mp = psMount; + + retval = hfs_InitialMount(psDevVnode, psMount, 0, &vhp, &embeddedOffset, &hfsmp, false); + + if (retval) + { + goto exit; + } + else + { + strlcpy(pcVolumeName, (char*) hfsmp->vcbVN, UVFS_SCANVOLS_VOLNAME_MAX); + } + + if (vhp) free(vhp); + if (hfsmp) + { + if (hfsmp->jnl) { + journal_release(hfsmp->jnl); + hfsmp->jnl = NULL; + } + + hfsUnmount(hfsmp); + + hfs_locks_destroy(hfsmp); + hfs_delete_chash(hfsmp); + hfs_idhash_destroy (hfsmp); + + hfs_free(hfsmp); + hfsmp = NULL; + } + +exit: + if (retval) { + LFHFS_LOG(LEVEL_ERROR, "hfs_ScanVolGetVolName: failed with error %d, returning empty name and no error\n",retval); + pcVolumeName[0] = '\0'; + } + + if (psMount) free (psMount); + if (psDevVnode) free (psDevVnode); + if (psDevCnode) free (psDevCnode); + if (psDevFileFork) free (psDevFileFork); + if (psFSRecord) free (psFSRecord); + + return 0; +} + +/* + * Common code for mount and mountroot + */ +static int +hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args) +{ + int retval = 0; + + HFSPlusVolumeHeader *vhp; + off_t embeddedOffset; + struct hfsmount *hfsmp; + retval = hfs_InitialMount(devvp, mp, args, &vhp, &embeddedOffset, &hfsmp, true); + if ( retval ) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: hfs_InitialMount encountered failure %d \n", retval); + //No need to go to error_exit, since everything got reset at the Initial Mount + return retval; + } + + retval = hfs_CollectBtreeStats(hfsmp, vhp, embeddedOffset, args); + free(vhp); + if ( retval ) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: hfs_CollectBtreeStats encountered failure %d \n", retval); + goto error_exit; + } + + // save off a snapshot of the mtime from the previous mount + // (for matador). + hfsmp->hfs_last_mounted_mtime = hfsmp->hfs_mtime; + + if ( retval ) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_mountfs: encountered failure %d \n", retval); + goto error_exit; + } + + LFHFS_LOG(LEVEL_DEFAULT, "hfs_mountfs: mounted %s on device %s\n", (hfsmp->vcbVN[0] ? (const char*) hfsmp->vcbVN : "unknown"), "unknown device"); + + hfs_flushvolumeheader(hfsmp, 0); + + return (0); + +error_exit: + if (vhp) free(vhp); + + if (hfsmp) + { + hfsUnmount(hfsmp); + + hfs_locks_destroy(hfsmp); + hfs_delete_chash(hfsmp); + hfs_idhash_destroy (hfsmp); + + hfs_free(hfsmp); + hfsmp = NULL; + } + return (retval); +} + +/* + * Destroy all locks, mutexes and spinlocks in hfsmp on unmount or failed mount + */ +static void +hfs_locks_destroy(struct hfsmount *hfsmp) +{ + + lf_lck_mtx_destroy(&hfsmp->hfs_mutex); + lf_lck_mtx_destroy(&hfsmp->sync_mutex); + lf_lck_rw_destroy(&hfsmp->hfs_global_lock); + lf_lck_spin_destroy(&hfsmp->vcbFreeExtLock); + + return; +} + + +/* + * Flush any dirty in-memory mount data to the on-disk + * volume header. + * + * Note: the on-disk volume signature is intentionally + * not flushed since the on-disk "H+" and "HX" signatures + * are always stored in-memory as "H+". + */ +int +hfs_flushvolumeheader(struct hfsmount *hfsmp, hfs_flush_volume_header_options_t options) +{ + int retval = 0; + + ExtendedVCB *vcb = HFSTOVCB(hfsmp); + bool critical = false; + daddr64_t avh_sector; + bool altflush = ISSET(options, HFS_FVH_WRITE_ALT); + + void *pvVolHdrData = NULL; + GenericLFBuf *psVolHdrBuf = NULL; + void *pvVolHdr2Data = NULL; + GenericLFBuf *psVolHdr2Buf = NULL; + void *pvAltHdrData = NULL; + GenericLFBuf *psAltHdrBuf = NULL; + + + if (ISSET(options, HFS_FVH_FLUSH_IF_DIRTY) && !hfs_header_needs_flushing(hfsmp)) { + return 0; + } + + if (hfsmp->hfs_flags & HFS_READ_ONLY) { + return 0; + } + + if (options & HFS_FVH_MARK_UNMOUNT) { + HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask; + } else { + HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask; + } + + daddr64_t priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size)); + + if (!(options & HFS_FVH_SKIP_TRANSACTION)) { + if (hfs_start_transaction(hfsmp) != 0) { + return EINVAL; + } + } + + psVolHdrBuf = lf_hfs_generic_buf_allocate(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, GEN_BUF_PHY_BLOCK); + if (psVolHdrBuf == NULL) { + retval = ENOMEM; + goto err_exit; + } + pvVolHdrData = psVolHdrBuf->pvData; + + retval = lf_hfs_generic_buf_read(psVolHdrBuf); + if (retval) { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d reading VH blk (vol=%s)\n", retval, vcb->vcbVN); + goto err_exit; + } + + HFSPlusVolumeHeader* volumeHeader = (HFSPlusVolumeHeader *)(pvVolHdrData + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size)); + + /* + * Sanity check what we just read. If it's bad, try the alternate instead. + */ + u_int16_t signature = SWAP_BE16 (volumeHeader->signature); + u_int16_t hfsversion = SWAP_BE16 (volumeHeader->version); + + if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) || + (hfsversion < kHFSPlusVersion) || (hfsversion > 100) || + (SWAP_BE32 (volumeHeader->blockSize) != vcb->blockSize)) + { + LFHFS_LOG(LEVEL_DEFAULT, "hfs_flushvolumeheader: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d\n", vcb->vcbVN, signature, hfsversion, SWAP_BE32 (volumeHeader->blockSize)); + hfs_mark_inconsistent(hfsmp, HFS_INCONSISTENCY_DETECTED); + + /* Almost always we read AVH relative to the partition size */ + avh_sector = hfsmp->hfs_partition_avh_sector; + + if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) + { + /* + * The two altVH offsets do not match --- which means that a smaller file + * system exists in a larger partition. Verify that we have the correct + * alternate volume header sector as per the current parititon size. + * The GPT device that we are mounted on top could have changed sizes + * without us knowing. + * + * We're in a transaction, so it's safe to modify the partition_avh_sector + * field if necessary. + */ + + uint64_t sector_count = 0; + + /* Get underlying device block count */ + retval = ioctl(hfsmp->hfs_devvp->psFSRecord->iFD, DKIOCGETBLOCKCOUNT, §or_count); + if (retval) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d getting block count (%s) \n", retval, vcb->vcbVN); + retval = ENXIO; + goto err_exit; + } + + /* Partition size was changed without our knowledge */ + if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) + { + hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count); + /* Note: hfs_fs_avh_sector will remain unchanged */ + LFHFS_LOG(LEVEL_DEFAULT, "hfs_flushvolumeheader: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n", hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector); + + /* + * We just updated the offset for AVH relative to + * the partition size, so the content of that AVH + * will be invalid. But since we are also maintaining + * a valid AVH relative to the file system size, we + * can read it since primary VH and partition AVH + * are not valid. + */ + avh_sector = hfsmp->hfs_fs_avh_sector; + } + } + + LFHFS_LOG(LEVEL_DEFAULT, "hfs_flushvolumeheader: trying alternate (for %s) avh_sector=%qu\n", (avh_sector == hfsmp->hfs_fs_avh_sector) ? "file system" : "partition", avh_sector); + + if (avh_sector) + { + psAltHdrBuf = lf_hfs_generic_buf_allocate(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(avh_sector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, GEN_BUF_PHY_BLOCK); + if (psAltHdrBuf == NULL) { + retval = ENOMEM; + goto err_exit; + } + pvAltHdrData = psAltHdrBuf->pvData; + + retval = lf_hfs_generic_buf_read(psAltHdrBuf); + + if (retval) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d reading alternate VH blk (vol=%s)\n", retval, vcb->vcbVN); + goto err_exit; + } + + HFSPlusVolumeHeader * altVH = (HFSPlusVolumeHeader *)(pvAltHdrData + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size)); + signature = SWAP_BE16(altVH->signature); + hfsversion = SWAP_BE16(altVH->version); + + if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) || + (hfsversion < kHFSPlusVersion) || (kHFSPlusVersion > 100) || + (SWAP_BE32(altVH->blockSize) != vcb->blockSize)) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: corrupt alternate VH on %s, sig 0x%04x, ver %d, blksize %d\n", vcb->vcbVN, signature, hfsversion, SWAP_BE32(altVH->blockSize)); + retval = EIO; + goto err_exit; + } + + /* The alternate is plausible, so use it. */ + bcopy(altVH, volumeHeader, kMDBSize); + lf_hfs_generic_buf_release(psAltHdrBuf); + pvAltHdrData = NULL; + } + else + { + /* No alternate VH, nothing more we can do. */ + retval = EIO; + goto err_exit; + } + } + + if (hfsmp->jnl) + { + journal_modify_block_start(hfsmp->jnl, psVolHdrBuf); + } + + /* + * For embedded HFS+ volumes, update create date if it changed + * (ie from a setattrlist call) + */ + if ((vcb->hfsPlusIOPosOffset != 0) && (SWAP_BE32 (volumeHeader->createDate) != vcb->localCreateDate)) + { + HFSMasterDirectoryBlock *mdb; + + psVolHdr2Buf = lf_hfs_generic_buf_allocate(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size), hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, GEN_BUF_PHY_BLOCK); + if (psVolHdr2Buf == NULL) { + retval = ENOMEM; + goto err_exit; + } + void *pvVolHdr2Data = psVolHdr2Buf->pvData; + + retval = lf_hfs_generic_buf_read(psVolHdr2Buf); + + if (retval) + { + lf_hfs_generic_buf_release(psVolHdr2Buf); + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d reading alternate VH blk (vol=%s)\n", retval, vcb->vcbVN); + goto err_exit; + } + + mdb = (HFSMasterDirectoryBlock *)(pvVolHdr2Data + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size)); + + if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate ) + { + if (hfsmp->jnl) + { + journal_modify_block_start(hfsmp->jnl, psVolHdr2Buf); + } + mdb->drCrDate = SWAP_BE32 (vcb->localCreateDate); /* pick up the new create date */ + if (hfsmp->jnl) + { + journal_modify_block_end(hfsmp->jnl, psVolHdr2Buf, NULL, NULL); + } + else + { + retval = raw_readwrite_write_mount( hfsmp->hfs_devvp, HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size), hfsmp->hfs_log_per_phys), hfsmp->hfs_physical_block_size, pvVolHdr2Data, hfsmp->hfs_physical_block_size, NULL, NULL); + + lf_hfs_generic_buf_release(psVolHdr2Buf); + pvVolHdr2Data = NULL; + if (retval) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d writing VH blk (vol=%s)\n", retval, vcb->vcbVN); + goto err_exit; + } + } + } + else + { + lf_hfs_generic_buf_release(psVolHdr2Buf); /* just release it */ + pvVolHdr2Data = NULL; + } + } + + hfs_lock_mount (hfsmp); + + /* Note: only update the lower 16 bits worth of attributes */ + volumeHeader->attributes = SWAP_BE32 (vcb->vcbAtrb); + volumeHeader->journalInfoBlock = SWAP_BE32 (vcb->vcbJinfoBlock); + if (hfsmp->jnl) + { + volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSJMountVersion); + } + else + { + volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSPlusMountVersion); + } + volumeHeader->createDate = SWAP_BE32 (vcb->localCreateDate); /* volume create date is in local time */ + volumeHeader->modifyDate = SWAP_BE32 (to_hfs_time(vcb->vcbLsMod)); + volumeHeader->backupDate = SWAP_BE32 (to_hfs_time(vcb->vcbVolBkUp)); + volumeHeader->fileCount = SWAP_BE32 (vcb->vcbFilCnt); + volumeHeader->folderCount = SWAP_BE32 (vcb->vcbDirCnt); + volumeHeader->totalBlocks = SWAP_BE32 (vcb->totalBlocks); + volumeHeader->freeBlocks = SWAP_BE32 (vcb->freeBlocks + vcb->reclaimBlocks); + volumeHeader->nextAllocation = SWAP_BE32 (vcb->nextAllocation); + volumeHeader->rsrcClumpSize = SWAP_BE32 (vcb->vcbClpSiz); + volumeHeader->dataClumpSize = SWAP_BE32 (vcb->vcbClpSiz); + volumeHeader->nextCatalogID = SWAP_BE32 (vcb->vcbNxtCNID); + volumeHeader->writeCount = SWAP_BE32 (vcb->vcbWrCnt); + volumeHeader->encodingsBitmap = SWAP_BE64 (vcb->encodingsBitmap); + + if (bcmp(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo)) != 0) + { + bcopy(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo)); + critical = true; + } + + if (!altflush && !ISSET(options, HFS_FVH_FLUSH_IF_DIRTY)) + { + goto done; + } + + /* Sync Extents over-flow file meta data */ + struct filefork * fp = VTOF(vcb->extentsRefNum); + if (FTOC(fp)->c_flag & C_MODIFIED) + { + for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++) + { + volumeHeader->extentsFile.extents[iExtentCounter].startBlock = SWAP_BE32 (fp->ff_extents[iExtentCounter].startBlock); + volumeHeader->extentsFile.extents[iExtentCounter].blockCount = SWAP_BE32 (fp->ff_extents[iExtentCounter].blockCount); + } + volumeHeader->extentsFile.logicalSize = SWAP_BE64 (fp->ff_size); + volumeHeader->extentsFile.totalBlocks = SWAP_BE32 (fp->ff_blocks); + volumeHeader->extentsFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize); + FTOC(fp)->c_flag &= ~C_MODIFIED; + altflush = true; + } + + /* Sync Catalog file meta data */ + fp = VTOF(vcb->catalogRefNum); + if (FTOC(fp)->c_flag & C_MODIFIED) + { + for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++) + { + volumeHeader->catalogFile.extents[iExtentCounter].startBlock = SWAP_BE32 (fp->ff_extents[iExtentCounter].startBlock); + volumeHeader->catalogFile.extents[iExtentCounter].blockCount = SWAP_BE32 (fp->ff_extents[iExtentCounter].blockCount); + } + volumeHeader->catalogFile.logicalSize = SWAP_BE64 (fp->ff_size); + volumeHeader->catalogFile.totalBlocks = SWAP_BE32 (fp->ff_blocks); + volumeHeader->catalogFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize); + FTOC(fp)->c_flag &= ~C_MODIFIED; + altflush = true; + } + + /* Sync Allocation file meta data */ + fp = VTOF(vcb->allocationsRefNum); + if (FTOC(fp)->c_flag & C_MODIFIED) + { + for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++) + { + volumeHeader->allocationFile.extents[iExtentCounter].startBlock = SWAP_BE32 (fp->ff_extents[iExtentCounter].startBlock); + volumeHeader->allocationFile.extents[iExtentCounter].blockCount = SWAP_BE32 (fp->ff_extents[iExtentCounter].blockCount); + } + volumeHeader->allocationFile.logicalSize = SWAP_BE64 (fp->ff_size); + volumeHeader->allocationFile.totalBlocks = SWAP_BE32 (fp->ff_blocks); + volumeHeader->allocationFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize); + FTOC(fp)->c_flag &= ~C_MODIFIED; + altflush = true; + } + + /* Sync Attribute file meta data */ + if (hfsmp->hfs_attribute_vp) + { + fp = VTOF(hfsmp->hfs_attribute_vp); + for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++) + { + volumeHeader->attributesFile.extents[iExtentCounter].startBlock = SWAP_BE32 (fp->ff_extents[iExtentCounter].startBlock); + volumeHeader->attributesFile.extents[iExtentCounter].blockCount = SWAP_BE32 (fp->ff_extents[iExtentCounter].blockCount); + } + if (ISSET(FTOC(fp)->c_flag, C_MODIFIED)) + { + FTOC(fp)->c_flag &= ~C_MODIFIED; + altflush = true; + } + volumeHeader->attributesFile.logicalSize = SWAP_BE64 (fp->ff_size); + volumeHeader->attributesFile.totalBlocks = SWAP_BE32 (fp->ff_blocks); + volumeHeader->attributesFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize); + } + + /* Sync Startup file meta data */ + if (hfsmp->hfs_startup_vp) + { + fp = VTOF(hfsmp->hfs_startup_vp); + if (FTOC(fp)->c_flag & C_MODIFIED) + { + for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++) + { + volumeHeader->startupFile.extents[iExtentCounter].startBlock = SWAP_BE32 (fp->ff_extents[iExtentCounter].startBlock); + volumeHeader->startupFile.extents[iExtentCounter].blockCount = SWAP_BE32 (fp->ff_extents[iExtentCounter].blockCount); + } + volumeHeader->startupFile.logicalSize = SWAP_BE64 (fp->ff_size); + volumeHeader->startupFile.totalBlocks = SWAP_BE32 (fp->ff_blocks); + volumeHeader->startupFile.clumpSize = SWAP_BE32 (fp->ff_clumpsize); + FTOC(fp)->c_flag &= ~C_MODIFIED; + altflush = true; + } + } + + if (altflush) + critical = true; + +done: + MarkVCBClean(hfsmp); + hfs_unlock_mount (hfsmp); + + /* If requested, flush out the alternate volume header */ + if (altflush) { + /* + * The two altVH offsets do not match --- which means that a smaller file + * system exists in a larger partition. Verify that we have the correct + * alternate volume header sector as per the current parititon size. + * The GPT device that we are mounted on top could have changed sizes + * without us knowning. + * + * We're in a transaction, so it's safe to modify the partition_avh_sector + * field if necessary. + */ + if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) + { + uint64_t sector_count; + + /* Get underlying device block count */ + retval = ioctl(hfsmp->hfs_devvp->psFSRecord->iFD, DKIOCGETBLOCKCOUNT, §or_count); + if (retval) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d getting block count (%s) \n", retval, vcb->vcbVN); + retval = ENXIO; + goto err_exit; + } + + /* Partition size was changed without our knowledge */ + if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) + { + hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count); + /* Note: hfs_fs_avh_sector will remain unchanged */ + LFHFS_LOG(LEVEL_DEFAULT, "hfs_flushvolumeheader: altflush: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n", + hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector); + } + } + + /* + * First see if we need to write I/O to the "secondary" AVH + * located at FS Size - 1024 bytes, because this one will + * always go into the journal. We put this AVH into the journal + * because even if the filesystem size has shrunk, this LBA should be + * reachable after the partition-size modification has occurred. + * The one where we need to be careful is partitionsize-1024, since the + * partition size should hopefully shrink. + * + * Most of the time this block will not execute. + */ + if ((hfsmp->hfs_fs_avh_sector) && (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) + { + if (pvAltHdrData != NULL) + { + panic("We shouldn't be here!"); + hfs_assert(0); + } + + psAltHdrBuf = lf_hfs_generic_buf_allocate(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, GEN_BUF_PHY_BLOCK); + + if (psAltHdrBuf == NULL) { + retval = ENOMEM; + goto err_exit; + } + pvAltHdrData = psAltHdrBuf->pvData; + + retval = lf_hfs_generic_buf_read(psAltHdrBuf); + if (retval) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d reading alternate VH blk (vol=%s)\n", retval, vcb->vcbVN); + goto err_exit; + } + + if (hfsmp->jnl) + { + journal_modify_block_start(hfsmp->jnl, psAltHdrBuf); + } + + bcopy(volumeHeader, pvAltHdrData + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size), kMDBSize); + + if (hfsmp->jnl) + { + journal_modify_block_end(hfsmp->jnl, psAltHdrBuf, NULL, NULL); + } + else + { + retval = raw_readwrite_write_mount( hfsmp->hfs_devvp, HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys), hfsmp->hfs_physical_block_size, pvAltHdrData, hfsmp->hfs_physical_block_size, NULL, NULL); + if (retval) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d writing VH blk (vol=%s)\n", retval, vcb->vcbVN); + goto err_exit; + } + lf_hfs_generic_buf_release(psAltHdrBuf); + pvAltHdrData = NULL; + } + } + + /* + * Flush out alternate volume header located at 1024 bytes before + * end of the partition as part of journal transaction. In + * most cases, this will be the only alternate volume header + * that we need to worry about because the file system size is + * same as the partition size, therefore hfs_fs_avh_sector is + * same as hfs_partition_avh_sector. This is the "priority" AVH. + * + * However, do not always put this I/O into the journal. If we skipped the + * FS-Size AVH write above, then we will put this I/O into the journal as + * that indicates the two were in sync. However, if the FS size is + * not the same as the partition size, we are tracking two. We don't + * put it in the journal in that case, since if the partition + * size changes between uptimes, and we need to replay the journal, + * this I/O could generate an EIO if during replay it is now trying + * to access blocks beyond the device EOF. + */ + if (hfsmp->hfs_partition_avh_sector) + { + if (pvAltHdrData != NULL) + { + panic("We shouldn't be here!"); + hfs_assert(0); + } + + psAltHdrBuf = lf_hfs_generic_buf_allocate(hfsmp->hfs_devvp, + HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys), + hfsmp->hfs_physical_block_size, GEN_BUF_PHY_BLOCK); + if (psAltHdrBuf == NULL) { + retval = ENOMEM; + goto err_exit; + } + pvAltHdrData = psAltHdrBuf->pvData; + + retval = lf_hfs_generic_buf_read(psAltHdrBuf); + + if (retval) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d reading alternate VH blk (vol=%s)\n", retval, vcb->vcbVN); + goto err_exit; + } + + /* only one AVH, put this I/O in the journal. */ + if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) { + journal_modify_block_start(hfsmp->jnl, psAltHdrBuf); + } + + bcopy(volumeHeader, pvAltHdrData + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size), kMDBSize); + + /* If journaled and we only have one AVH to track */ + if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) { + journal_modify_block_end (hfsmp->jnl, psAltHdrBuf, NULL, NULL); + } + else + { + /* + * If we don't have a journal or there are two AVH's at the + * moment, then this one doesn't go in the journal. Note that + * this one may generate I/O errors, since the partition + * can be resized behind our backs at any moment and this I/O + * may now appear to be beyond the device EOF. + */ + retval = raw_readwrite_write_mount( hfsmp->hfs_devvp, HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys), hfsmp->hfs_physical_block_size, pvAltHdrData, hfsmp->hfs_physical_block_size, NULL, NULL); + if (retval) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d writing VH blk (vol=%s)\n", retval, vcb->vcbVN); + goto err_exit; + } + lf_hfs_generic_buf_release(psAltHdrBuf); + pvAltHdrData = NULL; + hfs_flush(hfsmp, HFS_FLUSH_CACHE); + } + } + } + + /* Finish modifying the block for the primary VH */ + if (hfsmp->jnl) { + journal_modify_block_end(hfsmp->jnl, psVolHdrBuf, NULL, NULL); + } + else + { + retval = raw_readwrite_write_mount( hfsmp->hfs_devvp, HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys), hfsmp->hfs_physical_block_size, pvVolHdrData, hfsmp->hfs_physical_block_size, NULL, NULL); + /* When critical data changes, flush the device cache */ + if (critical && (retval == 0)) + { + hfs_flush(hfsmp, HFS_FLUSH_CACHE); + } + + lf_hfs_generic_buf_release(psVolHdrBuf); + pvVolHdrData = NULL; + if (retval) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_flushvolumeheader: err %d reading VH blk (vol=%s)\n", retval, vcb->vcbVN); + goto err_exit; + } + } + if (!(options & HFS_FVH_SKIP_TRANSACTION)) { + hfs_end_transaction(hfsmp); + } + + return (retval); + +err_exit: + if (pvVolHdrData) + lf_hfs_generic_buf_release(psVolHdrBuf); + if (pvVolHdr2Data) + lf_hfs_generic_buf_release(psVolHdr2Buf); + if (pvAltHdrData) + lf_hfs_generic_buf_release(psAltHdrBuf); + + if (!(options & HFS_FVH_SKIP_TRANSACTION)) { + hfs_end_transaction(hfsmp); + } + return retval; +} + +/* If a runtime corruption is detected, set the volume inconsistent + * bit in the volume attributes. The volume inconsistent bit is a persistent + * bit which represents that the volume is corrupt and needs repair. + * The volume inconsistent bit can be set from the kernel when it detects + * runtime corruption or from file system repair utilities like fsck_hfs when + * a repair operation fails. The bit should be cleared only from file system + * verify/repair utility like fsck_hfs when a verify/repair succeeds. + */ +void hfs_mark_inconsistent(struct hfsmount *hfsmp, hfs_inconsistency_reason_t reason) +{ + hfs_lock_mount (hfsmp); + if ((hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) == 0) + { + hfsmp->vcbAtrb |= kHFSVolumeInconsistentMask; + MarkVCBDirty(hfsmp); + } + if ((hfsmp->hfs_flags & HFS_READ_ONLY)==0) + { + switch (reason) + { + case HFS_INCONSISTENCY_DETECTED: + LFHFS_LOG(LEVEL_ERROR, "hfs_mark_inconsistent: Runtime corruption detected on %s, fsck will be forced on next mount.\n",hfsmp->vcbVN); + break; + case HFS_ROLLBACK_FAILED: + LFHFS_LOG(LEVEL_ERROR, "hfs_mark_inconsistent: Failed to roll back; volume `%s' might be inconsistent; fsck will be forced on next mount.\n", hfsmp->vcbVN); + break; + case HFS_OP_INCOMPLETE: + LFHFS_LOG(LEVEL_ERROR, "hfs_mark_inconsistent: Failed to complete operation; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",hfsmp->vcbVN); + break; + case HFS_FSCK_FORCED: + LFHFS_LOG(LEVEL_ERROR, "hfs_mark_inconsistent: fsck requested for `%s'; fsck will be forced on next mount.\n",hfsmp->vcbVN); + break; + } + } + hfs_unlock_mount (hfsmp); +} + +/* + * Creates a UUID from a unique "name" in the HFS UUID Name space. + * See version 3 UUID. + */ +void +hfs_getvoluuid(struct hfsmount *hfsmp, uuid_t result_uuid) +{ + + if (uuid_is_null(hfsmp->hfs_full_uuid)) { + uuid_t result; + + CC_MD5_CTX md5c; + uint8_t rawUUID[8]; + + ((uint32_t *)rawUUID)[0] = hfsmp->vcbFndrInfo[6]; + ((uint32_t *)rawUUID)[1] = hfsmp->vcbFndrInfo[7]; + + CC_MD5_Init( &md5c ); + CC_MD5_Update( &md5c, HFS_UUID_NAMESPACE_ID, sizeof( uuid_t ) ); + CC_MD5_Update( &md5c, rawUUID, sizeof (rawUUID) ); + CC_MD5_Final( result, &md5c ); + + result[6] = 0x30 | ( result[6] & 0x0F ); + result[8] = 0x80 | ( result[8] & 0x3F ); + + uuid_copy(hfsmp->hfs_full_uuid, result); + } + uuid_copy (result_uuid, hfsmp->hfs_full_uuid); + +} + +/* + * Call into the allocator code and perform a full scan of the bitmap file. + * + * This allows us to TRIM unallocated ranges if needed, and also to build up + * an in-memory summary table of the state of the allocated blocks. + */ +void hfs_scan_blocks (struct hfsmount *hfsmp) +{ + /* + * Take the allocation file lock. Journal transactions will block until + * we're done here. + */ + int flags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + + /* + * We serialize here with the HFS mount lock as we're mounting. + * + * The mount can only proceed once this thread has acquired the bitmap + * lock, since we absolutely do not want someone else racing in and + * getting the bitmap lock, doing a read/write of the bitmap file, + * then us getting the bitmap lock. + * + * To prevent this, the mount thread takes the HFS mount mutex, starts us + * up, then immediately msleeps on the scan_var variable in the mount + * point as a condition variable. This serialization is safe since + * if we race in and try to proceed while they're still holding the lock, + * we'll block trying to acquire the global lock. Since the mount thread + * acquires the HFS mutex before starting this function in a new thread, + * any lock acquisition on our part must be linearizably AFTER the mount thread's. + * + * Note that the HFS mount mutex is always taken last, and always for only + * a short time. In this case, we just take it long enough to mark the + * scan-in-flight bit. + */ + (void) hfs_lock_mount (hfsmp); + hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_INFLIGHT; + hfs_unlock_mount (hfsmp); + + /* Initialize the summary table */ + if (hfs_init_summary (hfsmp)) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_scan_blocks: could not initialize summary table for %s\n", hfsmp->vcbVN); + } + + /* + * ScanUnmapBlocks assumes that the bitmap lock is held when you + * call the function. We don't care if there were any errors issuing unmaps. + * + * It will also attempt to build up the summary table for subsequent + * allocator use, as configured. + */ + (void) ScanUnmapBlocks(hfsmp); + + (void) hfs_lock_mount (hfsmp); + hfsmp->scan_var &= ~HFS_ALLOCATOR_SCAN_INFLIGHT; + hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_COMPLETED; + hfs_unlock_mount (hfsmp); + + hfs_systemfile_unlock(hfsmp, flags); +} + +int +hfs_GetInfoByID(struct hfsmount *hfsmp, cnid_t cnid, UVFSFileAttributes *file_attrs, char pcName[MAX_UTF8_NAME_LENGTH]) +{ + u_int32_t linkref = 0; + struct vnode *psVnode = NULL; + struct cat_desc cndesc; + struct cat_attr cnattr; + struct cat_fork cnfork; + int error = 0; + + /* Check for cnids that should't be exported. */ + if ((cnid < kHFSFirstUserCatalogNodeID) && + (cnid != kHFSRootFolderID && cnid != kHFSRootParentID)) { + return (ENOENT); + } + /* Don't export our private directories. */ + if (cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid || + cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) { + return (ENOENT); + } + /* + * Check the hash first + */ + psVnode = hfs_chash_getvnode(hfsmp, cnid, 0, 0, 0); + if (psVnode) { + goto getAttrAndDone; + } + + bzero(&cndesc, sizeof(cndesc)); + bzero(&cnattr, sizeof(cnattr)); + bzero(&cnfork, sizeof(cnfork)); + + /* + * Not in hash, lookup in catalog + */ + if (cnid == kHFSRootParentID) { + static char hfs_rootname[] = "/"; + + cndesc.cd_nameptr = (const u_int8_t *)&hfs_rootname[0]; + cndesc.cd_namelen = 1; + cndesc.cd_parentcnid = kHFSRootParentID; + cndesc.cd_cnid = kHFSRootFolderID; + cndesc.cd_flags = CD_ISDIR; + + cnattr.ca_fileid = kHFSRootFolderID; + cnattr.ca_linkcount = 1; + cnattr.ca_entries = 1; + cnattr.ca_dircount = 1; + cnattr.ca_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO); + } else { + int lockflags; + cnid_t pid; + const char *nameptr; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + error = cat_idlookup(hfsmp, cnid, 0, 0, &cndesc, &cnattr, &cnfork); + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error) { + return (error); + } + + /* + * Check for a raw hardlink inode and save its linkref. + */ + pid = cndesc.cd_parentcnid; + nameptr = (const char *)cndesc.cd_nameptr; + if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) && + cndesc.cd_namelen > HFS_INODE_PREFIX_LEN && + (bcmp(nameptr, HFS_INODE_PREFIX, HFS_INODE_PREFIX_LEN) == 0)) { + linkref = (uint32_t) strtoul(&nameptr[HFS_INODE_PREFIX_LEN], NULL, 10); + + } else if ((pid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) && + cndesc.cd_namelen > HFS_DIRINODE_PREFIX_LEN && + (bcmp(nameptr, HFS_DIRINODE_PREFIX, HFS_DIRINODE_PREFIX_LEN) == 0)) { + linkref = (uint32_t) strtoul(&nameptr[HFS_DIRINODE_PREFIX_LEN], NULL, 10); + + } else if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) && + cndesc.cd_namelen > HFS_DELETE_PREFIX_LEN && + (bcmp(nameptr, HFS_DELETE_PREFIX, HFS_DELETE_PREFIX_LEN) == 0)) { + cat_releasedesc(&cndesc); + return (ENOENT); /* open unlinked file */ + } + } + + /* + * Finish initializing cnode descriptor for hardlinks. + * + * We need a valid name and parent for reverse lookups. + */ + if (linkref) { + cnid_t lastid; + struct cat_desc linkdesc; + int linkerr = 0; + + cnattr.ca_linkref = linkref; + bzero (&linkdesc, sizeof (linkdesc)); + + /* + * If the caller supplied the raw inode value, then we don't know exactly + * which hardlink they wanted. It's likely that they acquired the raw inode + * value BEFORE the item became a hardlink, in which case, they probably + * want the oldest link. So request the oldest link from the catalog. + * + * Unfortunately, this requires that we iterate through all N hardlinks. On the plus + * side, since we know that we want the last linkID, we can also have this one + * call give us back the name of the last ID, since it's going to have it in-hand... + */ + linkerr = hfs_lookup_lastlink (hfsmp, linkref, &lastid, &linkdesc); + if ((linkerr == 0) && (lastid != 0)) { + /* + * Release any lingering buffers attached to our local descriptor. + * Then copy the name and other business into the cndesc + */ + cat_releasedesc (&cndesc); + bcopy (&linkdesc, &cndesc, sizeof(linkdesc)); + } + /* If it failed, the linkref code will just use whatever it had in-hand below. */ + + int newvnode_flags = 0; + error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cnfork, &psVnode, &newvnode_flags); + if (error == 0) { + VTOC(psVnode)->c_flag |= C_HARDLINK; + } + } + else + { + int newvnode_flags = 0; + + void *buf = hfs_malloc(MAX_UTF8_NAME_LENGTH); + if (buf == NULL) { + return (ENOMEM); + } + + /* Supply hfs_getnewvnode with a component name. */ + struct componentname cn = { + .cn_nameiop = LOOKUP, + .cn_flags = ISLASTCN, + .cn_pnlen = MAXPATHLEN, + .cn_namelen = cndesc.cd_namelen, + .cn_pnbuf = buf, + .cn_nameptr = buf + }; + + bcopy(cndesc.cd_nameptr, cn.cn_nameptr, cndesc.cd_namelen + 1); + error = hfs_getnewvnode(hfsmp, NULL, &cn, &cndesc, 0, &cnattr, &cnfork, &psVnode, &newvnode_flags); + if (error == 0 && (VTOC(psVnode)->c_flag & C_HARDLINK)) { + hfs_savelinkorigin(VTOC(psVnode), cndesc.cd_parentcnid); + } + + hfs_free(buf); + } + cat_releasedesc(&cndesc); + +getAttrAndDone: + if (!error) vnode_GetAttrInternal (psVnode, file_attrs); + if (psVnode != NULL) hfs_unlock(VTOC(psVnode)); + + if (error || psVnode == NULL || psVnode->sFSParams.vnfs_cnp->cn_nameptr == NULL){ + hfs_vnop_reclaim(psVnode); + return EFAULT; + } + + if (cnid == kHFSRootFolderID) + pcName[0] = 0; + else { + strlcpy(pcName, (char*) psVnode->sFSParams.vnfs_cnp->cn_nameptr, MAX_UTF8_NAME_LENGTH); + } + + error = hfs_vnop_reclaim(psVnode); + + return (error); +} + +/* + * Look up an HFS object by ID. + * + * The object is returned with an iocount reference and the cnode locked. + * + * If the object is a file then it will represent the data fork. + */ +int +hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock, int allow_deleted) +{ + struct vnode *vp = NULL; + struct cat_desc cndesc; + struct cat_attr cnattr; + struct cat_fork cnfork; + + u_int32_t linkref = 0; + + int error; + + /* Check for cnids that should't be exported. */ + if ((cnid < kHFSFirstUserCatalogNodeID) && + (cnid != kHFSRootFolderID && cnid != kHFSRootParentID)) { + return (ENOENT); + } + /* Don't export our private directories. */ + if (cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid || + cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) { + return (ENOENT); + } + /* + * Check the hash first + */ + vp = hfs_chash_getvnode(hfsmp, cnid, 0, skiplock, allow_deleted); + if (vp) { + *vpp = vp; + return(0); + } + + bzero(&cndesc, sizeof(cndesc)); + bzero(&cnattr, sizeof(cnattr)); + bzero(&cnfork, sizeof(cnfork)); + + /* + * Not in hash, lookup in catalog + */ + if (cnid == kHFSRootParentID) { + static char hfs_rootname[] = "/"; + + cndesc.cd_nameptr = (const u_int8_t *)&hfs_rootname[0]; + cndesc.cd_namelen = 1; + cndesc.cd_parentcnid = kHFSRootParentID; + cndesc.cd_cnid = kHFSRootFolderID; + cndesc.cd_flags = CD_ISDIR; + + cnattr.ca_fileid = kHFSRootFolderID; + cnattr.ca_linkcount = 1; + cnattr.ca_entries = 1; + cnattr.ca_dircount = 1; + cnattr.ca_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO); + } else { + int lockflags; + cnid_t pid; + const char *nameptr; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + error = cat_idlookup(hfsmp, cnid, 0, 0, &cndesc, &cnattr, &cnfork); + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error) { + *vpp = NULL; + return (error); + } + + /* + * Check for a raw hardlink inode and save its linkref. + */ + pid = cndesc.cd_parentcnid; + nameptr = (const char *)cndesc.cd_nameptr; + if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) && + cndesc.cd_namelen > HFS_INODE_PREFIX_LEN && + (bcmp(nameptr, HFS_INODE_PREFIX, HFS_INODE_PREFIX_LEN) == 0)) { + linkref = (uint32_t) strtoul(&nameptr[HFS_INODE_PREFIX_LEN], NULL, 10); + + } else if ((pid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) && + cndesc.cd_namelen > HFS_DIRINODE_PREFIX_LEN && + (bcmp(nameptr, HFS_DIRINODE_PREFIX, HFS_DIRINODE_PREFIX_LEN) == 0)) { + linkref = (uint32_t) strtoul(&nameptr[HFS_DIRINODE_PREFIX_LEN], NULL, 10); + + } else if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) && + cndesc.cd_namelen > HFS_DELETE_PREFIX_LEN && + (bcmp(nameptr, HFS_DELETE_PREFIX, HFS_DELETE_PREFIX_LEN) == 0)) { + *vpp = NULL; + cat_releasedesc(&cndesc); + return (ENOENT); /* open unlinked file */ + } + } + + /* + * Finish initializing cnode descriptor for hardlinks. + * + * We need a valid name and parent for reverse lookups. + */ + if (linkref) { + cnid_t lastid; + struct cat_desc linkdesc; + int linkerr = 0; + + cnattr.ca_linkref = linkref; + bzero (&linkdesc, sizeof (linkdesc)); + + /* + * If the caller supplied the raw inode value, then we don't know exactly + * which hardlink they wanted. It's likely that they acquired the raw inode + * value BEFORE the item became a hardlink, in which case, they probably + * want the oldest link. So request the oldest link from the catalog. + * + * Unfortunately, this requires that we iterate through all N hardlinks. On the plus + * side, since we know that we want the last linkID, we can also have this one + * call give us back the name of the last ID, since it's going to have it in-hand... + */ + linkerr = hfs_lookup_lastlink (hfsmp, linkref, &lastid, &linkdesc); + if ((linkerr == 0) && (lastid != 0)) { + /* + * Release any lingering buffers attached to our local descriptor. + * Then copy the name and other business into the cndesc + */ + cat_releasedesc (&cndesc); + bcopy (&linkdesc, &cndesc, sizeof(linkdesc)); + } + /* If it failed, the linkref code will just use whatever it had in-hand below. */ + } + + if (linkref) { + int newvnode_flags = 0; + error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cnfork, &vp, &newvnode_flags); + if (error == 0) { + VTOC(vp)->c_flag |= C_HARDLINK; + + //TBD - this set is for vfs -> since we have the C_HARDLINK + // currently disable this set. + //vnode_setmultipath(vp); + } + } + else + { + int newvnode_flags = 0; + + void *buf = hfs_malloc(MAXPATHLEN); + + /* Supply hfs_getnewvnode with a component name. */ + struct componentname cn = { + .cn_nameiop = LOOKUP, + .cn_flags = ISLASTCN, + .cn_pnlen = MAXPATHLEN, + .cn_namelen = cndesc.cd_namelen, + .cn_pnbuf = buf, + .cn_nameptr = buf + }; + + bcopy(cndesc.cd_nameptr, cn.cn_nameptr, cndesc.cd_namelen + 1); + error = hfs_getnewvnode(hfsmp, NULL, &cn, &cndesc, 0, &cnattr, &cnfork, &vp, &newvnode_flags); + + if (error == 0 && (VTOC(vp)->c_flag & C_HARDLINK)) { + hfs_savelinkorigin(VTOC(vp), cndesc.cd_parentcnid); + } + + hfs_free(buf); + } + cat_releasedesc(&cndesc); + + *vpp = vp; + if (vp && skiplock) { + hfs_unlock(VTOC(vp)); + } + return (error); +} + +/* + * Return the root of a filesystem. + */ +int hfs_vfs_root(struct mount *mp, struct vnode **vpp) +{ + return hfs_vget(VFSTOHFS(mp), (cnid_t)kHFSRootFolderID, vpp, 1, 0); +} + +/* + * unmount system call + */ +int hfs_unmount(struct mount *mp) +{ + struct hfsmount *hfsmp = VFSTOHFS(mp); + int retval = E_NONE; + + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) + { + if (hfsmp->hfs_summary_table) + { + int err = 0; + /* + * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress + */ + if (hfsmp->hfs_allocation_vp) + { + err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + } + hfs_free(hfsmp->hfs_summary_table); + hfsmp->hfs_summary_table = NULL; + hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE; + + if (err == 0 && hfsmp->hfs_allocation_vp) + { + hfs_unlock (VTOC(hfsmp->hfs_allocation_vp)); + } + } + } + + /* + * Invalidate our caches and release metadata vnodes + */ + if (hfsmp->jnl) { + journal_release(hfsmp->jnl); + hfsmp->jnl = NULL; + } + + hfsUnmount(hfsmp); + int iFD = hfsmp->hfs_devvp->psFSRecord->iFD; + // Remove Buffer cache entries realted to the mount + lf_hfs_generic_buf_cache_clear_by_iFD(iFD); + + vnode_rele(hfsmp->hfs_devvp); + + hfs_locks_destroy(hfsmp); + hfs_delete_chash(hfsmp); + hfs_idhash_destroy(hfsmp); + + hfs_assert(TAILQ_EMPTY(&hfsmp->hfs_reserved_ranges[HFS_TENTATIVE_BLOCKS]) && TAILQ_EMPTY(&hfsmp->hfs_reserved_ranges[HFS_LOCKED_BLOCKS])); + hfs_assert(!hfsmp->lockedBlocks); + + hfs_free(hfsmp); + + return (retval); +} +/* Update volume encoding bitmap (HFS Plus only) + * + * Mark a legacy text encoding as in-use (as needed) + * in the volume header of this HFS+ filesystem. + */ +void +hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding) +{ +#define kIndexMacUkrainian 48 /* MacUkrainian encoding is 152 */ +#define kIndexMacFarsi 49 /* MacFarsi encoding is 140 */ + + u_int32_t index; + + switch (encoding) + { + case kTextEncodingMacUkrainian: + index = kIndexMacUkrainian; + break; + case kTextEncodingMacFarsi: + index = kIndexMacFarsi; + break; + default: + index = encoding; + break; + } + + /* Only mark the encoding as in-use if it wasn't already set */ + if (index < 64 && (hfsmp->encodingsBitmap & (u_int64_t)(1ULL << index)) == 0) { + hfs_lock_mount (hfsmp); + hfsmp->encodingsBitmap |= (u_int64_t)(1ULL << index); + MarkVCBDirty(hfsmp); + hfs_unlock_mount(hfsmp); + } +} + +/* + * Update volume stats + * + * On journal volumes this will cause a volume header flush + */ +int +hfs_volupdate(struct hfsmount *hfsmp, enum volop op, int inroot) +{ + struct timeval tv; + microtime(&tv); + hfs_lock_mount (hfsmp); + + MarkVCBDirty(hfsmp); + hfsmp->hfs_mtime = tv.tv_sec; + + switch (op) { + case VOL_UPDATE: + break; + case VOL_MKDIR: + if (hfsmp->hfs_dircount != 0xFFFFFFFF) + ++hfsmp->hfs_dircount; + if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF) + ++hfsmp->vcbNmRtDirs; + break; + case VOL_RMDIR: + if (hfsmp->hfs_dircount != 0) + --hfsmp->hfs_dircount; + if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF) + --hfsmp->vcbNmRtDirs; + break; + case VOL_MKFILE: + if (hfsmp->hfs_filecount != 0xFFFFFFFF) + ++hfsmp->hfs_filecount; + if (inroot && hfsmp->vcbNmFls != 0xFFFF) + ++hfsmp->vcbNmFls; + break; + case VOL_RMFILE: + if (hfsmp->hfs_filecount != 0) + --hfsmp->hfs_filecount; + if (inroot && hfsmp->vcbNmFls != 0xFFFF) + --hfsmp->vcbNmFls; + break; + } + + hfs_unlock_mount (hfsmp); + hfs_flushvolumeheader(hfsmp, 0); + + return (0); +} + diff --git a/livefiles_hfs_plugin/lf_hfs_vfsops.h b/livefiles_hfs_plugin/lf_hfs_vfsops.h new file mode 100644 index 0000000..a9e046b --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_vfsops.h @@ -0,0 +1,44 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_vfsops.h + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 20/03/2018. + */ + +#ifndef lf_hfs_vfsops_h +#define lf_hfs_vfsops_h + +#include "lf_hfs.h" +#include "lf_hfs_dirops_handler.h" + +typedef enum { +// HFS_FVH_WAIT = 0x0001, // Livefiles always waits for non-journal volume header writes. + HFS_FVH_WRITE_ALT = 0x0002, + HFS_FVH_FLUSH_IF_DIRTY = 0x0004, + HFS_FVH_MARK_UNMOUNT = 0x0008, + HFS_FVH_SKIP_TRANSACTION = 0x0010, // This volume flush is called from within an hfs-transaction +} hfs_flush_volume_header_options_t; + +enum volop{ + VOL_UPDATE, + VOL_MKDIR, + VOL_RMDIR, + VOL_MKFILE, + VOL_RMFILE +}; + +void hfs_mark_inconsistent(struct hfsmount *hfsmp, hfs_inconsistency_reason_t reason); +int hfs_flushvolumeheader(struct hfsmount *hfsmp, hfs_flush_volume_header_options_t options); +int hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data); +int hfs_ScanVolGetVolName(int iFd, char* pcVolumeName); +void hfs_getvoluuid(struct hfsmount *hfsmp, uuid_t result_uuid); +void hfs_scan_blocks (struct hfsmount *hfsmp); +int hfs_vfs_root(struct mount *mp, struct vnode **vpp); +int hfs_unmount(struct mount *mp); +void hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding); +int hfs_volupdate(struct hfsmount *hfsmp, enum volop op, int inroot); +int hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock, int allow_deleted); +int hfs_GetInfoByID(struct hfsmount *hfsmp, cnid_t cnid, UVFSFileAttributes *file_attrs, char pcName[MAX_UTF8_NAME_LENGTH]); +int fsck_hfs(int fd, check_flags_t how); +#endif /* lf_hfs_vfsops_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_vfsutils.c b/livefiles_hfs_plugin/lf_hfs_vfsutils.c new file mode 100644 index 0000000..4448661 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_vfsutils.c @@ -0,0 +1,2610 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_vfsutils.c + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#include +#include +#include +#include +#include +#include + +#include "lf_hfs.h" +#include "lf_hfs_locks.h" +#include "lf_hfs_format.h" +#include "lf_hfs.h" +#include "lf_hfs_endian.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_mount.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_raw_read_write.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_file_mgr_internal.h" +#include "lf_hfs_btrees_internal.h" +#include "lf_hfs_format.h" +#include "lf_hfs_file_extent_mapping.h" +#include "lf_hfs_sbunicode.h" +#include "lf_hfs_xattr.h" +#include "lf_hfs_unicode_wrappers.h" +#include "lf_hfs_link.h" +#include "lf_hfs_btree.h" +#include "lf_hfs_journal.h" + +static int hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_args); +u_int32_t GetFileInfo(ExtendedVCB *vcb, const char *name, + struct cat_attr *fattr, struct cat_fork *forkinfo); + +//******************************************************************************* +// Routine: hfs_MountHFSVolume +// +// +//******************************************************************************* +unsigned char hfs_catname[] = "Catalog B-tree"; +unsigned char hfs_extname[] = "Extents B-tree"; +unsigned char hfs_vbmname[] = "Volume Bitmap"; +unsigned char hfs_attrname[] = "Attribute B-tree"; +unsigned char hfs_startupname[] = "Startup File"; + +//******************************************************************************* +// +// Sanity check Volume Header Block: +// Input argument *vhp is a pointer to a HFSPlusVolumeHeader block that has +// not been endian-swapped and represents the on-disk contents of this sector. +// This routine will not change the endianness of vhp block. +// +//******************************************************************************* +int hfs_ValidateHFSPlusVolumeHeader(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp) +{ + u_int16_t signature = SWAP_BE16(vhp->signature); + u_int16_t hfs_version = SWAP_BE16(vhp->version); + + if (signature == kHFSPlusSigWord) + { + if (hfs_version != kHFSPlusVersion) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_ValidateHFSPlusVolumeHeader: invalid HFS+ version: %x\n", hfs_version); + + return (EINVAL); + } + } else if (signature == kHFSXSigWord) + { + if (hfs_version != kHFSXVersion) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_ValidateHFSPlusVolumeHeader: invalid HFSX version: %x\n", hfs_version); + return (EINVAL); + } + } else + { + /* Removed printf for invalid HFS+ signature because it gives + * false error for UFS root volume + */ + LFHFS_LOG(LEVEL_DEBUG, "hfs_ValidateHFSPlusVolumeHeader: unknown Volume Signature : %x\n", signature); + return (EINVAL); + } + + /* Block size must be at least 512 and a power of 2 */ + u_int32_t blockSize = SWAP_BE32(vhp->blockSize); + if (blockSize < 512 || !powerof2(blockSize)) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_ValidateHFSPlusVolumeHeader: invalid blocksize (%d) \n", blockSize); + return (EINVAL); + } + + if (blockSize < hfsmp->hfs_logical_block_size) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_ValidateHFSPlusVolumeHeader: invalid physical blocksize (%d), hfs_logical_blocksize (%d) \n", + blockSize, hfsmp->hfs_logical_block_size); + return (EINVAL); + } + return 0; +} + +//******************************************************************************* +// Routine: hfs_MountHFSPlusVolume +// +// +//******************************************************************************* + +int hfs_CollectBtreeStats(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, off_t embeddedOffset, void *args) +{ + int retval = 0; + register ExtendedVCB *vcb = HFSTOVCB(hfsmp); + u_int32_t blockSize; blockSize = SWAP_BE32(vhp->blockSize); + + /* + * pull in the volume UUID while we are still single-threaded. + * This brings the volume UUID into the cached one dangling off of the HFSMP + * Otherwise it would have to be computed on first access. + */ + uuid_t throwaway; + hfs_getvoluuid (hfsmp, throwaway); + + /* + * We now always initiate a full bitmap scan even if the volume is read-only because this is + * our only shot to do I/Os of dramaticallly different sizes than what the buffer cache ordinarily + * expects. TRIMs will not be delivered to the underlying media if the volume is not + * read-write though. + */ + hfsmp->scan_var = 0; + + hfs_scan_blocks(hfsmp); + + if (hfsmp->jnl && (hfsmp->hfs_flags & HFS_READ_ONLY) == 0) + { + hfs_flushvolumeheader(hfsmp, 0); + } + + /* kHFSHasFolderCount is only supported/updated on HFSX volumes */ + if ((hfsmp->hfs_flags & HFS_X) != 0) + { + hfsmp->hfs_flags |= HFS_FOLDERCOUNT; + } + + // Check if we need to do late journal initialization. This only + // happens if a previous version of MacOS X (or 9) touched the disk. + // In that case hfs_late_journal_init() will go re-locate the journal + // and journal_info_block files and validate that they're still kosher. + if ( (vcb->vcbAtrb & kHFSVolumeJournaledMask) && + (SWAP_BE32(vhp->lastMountedVersion) != kHFSJMountVersion) && + (hfsmp->jnl == NULL)) + { + + retval = hfs_late_journal_init(hfsmp, vhp, args); + if (retval != 0) + { + if (retval == EROFS) + { + // EROFS is a special error code that means the volume has an external + // journal which we couldn't find. in that case we do not want to + // rewrite the volume header - we'll just refuse to mount the volume. + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_late_journal_init returned (%d), maybe an external jnl?\n", retval); + retval = EINVAL; + goto ErrorExit; + } + + hfsmp->jnl = NULL; + + // if the journal failed to open, then set the lastMountedVersion + // to be "FSK!" which fsck_hfs will see and force the fsck instead + // of just bailing out because the volume is journaled. + if (!(hfsmp->hfs_flags & HFS_READ_ONLY)) + { + hfsmp->hfs_flags |= HFS_NEED_JNL_RESET; + + uint64_t mdb_offset = (uint64_t)((embeddedOffset / blockSize) + HFS_PRI_SECTOR(blockSize)); + + void *pvBuffer = hfs_malloc(hfsmp->hfs_physical_block_size); + if (pvBuffer == NULL) + { + retval = ENOMEM; + goto ErrorExit; + } + + retval = raw_readwrite_read_mount( hfsmp->hfs_devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), hfsmp->hfs_physical_block_size, pvBuffer, hfsmp->hfs_physical_block_size, NULL, NULL); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: JNL header raw_readwrite_read_mount failed with %d\n", retval); + hfs_free(pvBuffer); + goto ErrorExit; + } + + HFSPlusVolumeHeader *jvhp = (HFSPlusVolumeHeader *)(pvBuffer + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size)); + + if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_MountHFSPlusVolume: Journal replay fail. Writing lastMountVersion as FSK!\n"); + jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion); + + retval = raw_readwrite_write_mount( hfsmp->hfs_devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), hfsmp->hfs_physical_block_size, pvBuffer, hfsmp->hfs_physical_block_size, NULL, NULL); + hfs_free(pvBuffer); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: JNL header raw_readwrite_write_mount failed with %d\n", retval); + goto ErrorExit; + } + } + else + { + hfs_free(pvBuffer); + } + } + + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_late_journal_init returned (%d)\n", retval); + retval = EINVAL; + goto ErrorExit; + } + else if (hfsmp->jnl) + { + hfsmp->hfs_mp->mnt_flag |= MNT_JOURNALED; + } + } + else if (hfsmp->jnl || ((vcb->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY))) + { + struct cat_attr jinfo_attr, jnl_attr; + if (hfsmp->hfs_flags & HFS_READ_ONLY) + { + vcb->vcbAtrb &= ~kHFSVolumeJournaledMask; + } + + // if we're here we need to fill in the fileid's for the + // journal and journal_info_block. + hfsmp->hfs_jnlinfoblkid = GetFileInfo(vcb, ".journal_info_block", &jinfo_attr, NULL); + hfsmp->hfs_jnlfileid = GetFileInfo(vcb, ".journal", &jnl_attr, NULL); + if (hfsmp->hfs_jnlinfoblkid == 0 || hfsmp->hfs_jnlfileid == 0) + { + LFHFS_LOG(LEVEL_DEFAULT, "hfs_MountHFSPlusVolume: danger! couldn't find the file-id's for the journal or journal_info_block\n"); + LFHFS_LOG(LEVEL_DEFAULT, "hfs_MountHFSPlusVolume: jnlfileid %llu, jnlinfoblkid %llu\n", hfsmp->hfs_jnlfileid, hfsmp->hfs_jnlinfoblkid); + } + + if (hfsmp->hfs_flags & HFS_READ_ONLY) + { + vcb->vcbAtrb |= kHFSVolumeJournaledMask; + } + + if (hfsmp->jnl == NULL) + { + hfsmp->hfs_mp->mnt_flag &= ~(u_int64_t)((unsigned int)MNT_JOURNALED); + } + } + + if ( !(vcb->vcbAtrb & kHFSVolumeHardwareLockMask) ) // if the disk is not write protected + { + MarkVCBDirty( vcb ); // mark VCB dirty so it will be written + } + + /* + * Distinguish 3 potential cases involving content protection: + * 1. mount point bit set; vcbAtrb does not support it. Fail. + * 2. mount point bit set; vcbattrb supports it. we're good. + * 3. mount point bit not set; vcbatrb supports it, turn bit on, then good. + */ + if (hfsmp->hfs_mp->mnt_flag & MNT_CPROTECT) + { + /* Does the mount point support it ? */ + if ((vcb->vcbAtrb & kHFSContentProtectionMask) == 0) + { + /* Case 1 above */ + retval = EINVAL; + goto ErrorExit; + } + } + else + { + /* not requested in the mount point. Is it in FS? */ + if (vcb->vcbAtrb & kHFSContentProtectionMask) + { + /* Case 3 above */ + hfsmp->hfs_mp->mnt_flag |= MNT_CPROTECT; + } + } + +#if LF_HFS_CHECK_UNMAPPED // TBD: + /* + * Establish a metadata allocation zone. + */ + hfs_metadatazone_init(hfsmp, false); + + + /* + * Make any metadata zone adjustments. + */ + if (hfsmp->hfs_flags & HFS_METADATA_ZONE) + { + /* Keep the roving allocator out of the metadata zone. */ + if (vcb->nextAllocation >= hfsmp->hfs_metazone_start && + vcb->nextAllocation <= hfsmp->hfs_metazone_end) + { + HFS_UPDATE_NEXT_ALLOCATION(hfsmp, hfsmp->hfs_metazone_end + 1); + } + } + else +#endif + { + if (vcb->nextAllocation <= 1) + { + vcb->nextAllocation = hfsmp->hfs_min_alloc_start; + } + } + vcb->sparseAllocation = hfsmp->hfs_min_alloc_start; + + /* Setup private/hidden directories for hardlinks. */ + hfs_privatedir_init(hfsmp, FILE_HARDLINKS); + hfs_privatedir_init(hfsmp, DIR_HARDLINKS); + + hfs_remove_orphans(hfsmp); + + /* See if we need to erase unused Catalog nodes due to . */ + retval = hfs_erase_unused_nodes(hfsmp); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_erase_unused_nodes returned (%d) for %s \n", retval, hfsmp->vcbVN); + goto ErrorExit; + } + + /* Enable extent-based extended attributes by default */ + hfsmp->hfs_flags |= HFS_XATTR_EXTENTS; + + return (0); + +ErrorExit: + /* + * A fatal error occurred and the volume cannot be mounted, so + * release any resources that we acquired... + */ + hfsUnmount(hfsmp); + + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: encountered error (%d)\n", retval); + + return (retval); +} + + +int hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, off_t embeddedOffset, u_int64_t disksize, bool bFailForDirty) +{ + int retval = 0; + + register ExtendedVCB *vcb; + struct cat_desc cndesc; + struct cat_attr cnattr; + struct cat_fork cfork; + u_int32_t blockSize; + uint64_t spare_sectors; + int newvnode_flags = 0; + BTreeInfoRec btinfo; + + u_int16_t signature = SWAP_BE16(vhp->signature); + + retval = hfs_ValidateHFSPlusVolumeHeader(hfsmp, vhp); + if (retval) + return retval; + + if (signature == kHFSXSigWord) + { + /* The in-memory signature is always 'H+'. */ + signature = kHFSPlusSigWord; + hfsmp->hfs_flags |= HFS_X; + } + + blockSize = SWAP_BE32(vhp->blockSize); + /* don't mount a writable volume if its dirty, it must be cleaned by fsck_hfs */ + if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0 && + hfsmp->jnl == NULL && + (SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) == 0 && + bFailForDirty) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: cannot mount dirty non-journaled volumes\n"); + return (EINVAL); + } + + /* Make sure we can live with the physical block size. */ + if ((disksize & (hfsmp->hfs_logical_block_size - 1)) || + (embeddedOffset & (hfsmp->hfs_logical_block_size - 1))) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_logical_blocksize (%d) \n",hfsmp->hfs_logical_block_size); + return (ENXIO); + } + + /* + * If allocation block size is less than the physical block size, + * same data could be cached in two places and leads to corruption. + * + * HFS Plus reserves one allocation block for the Volume Header. + * If the physical size is larger, then when we read the volume header, + * we will also end up reading in the next allocation block(s). + * If those other allocation block(s) is/are modified, and then the volume + * header is modified, the write of the volume header's buffer will write + * out the old contents of the other allocation blocks. + * + * We assume that the physical block size is same as logical block size. + * The physical block size value is used to round down the offsets for + * reading and writing the primary and alternate volume headers. + * + * The same logic to ensure good hfs_physical_block_size is also in + * hfs_mountfs so that hfs_mountfs, hfs_MountHFSPlusVolume and + * later are doing the I/Os using same block size. + */ + if (blockSize < hfsmp->hfs_physical_block_size) + { + hfsmp->hfs_physical_block_size = hfsmp->hfs_logical_block_size; + hfsmp->hfs_log_per_phys = 1; + } + + /* + * The VolumeHeader seems OK: transfer info from it into VCB + * Note - the VCB starts out clear (all zeros) + */ + vcb = HFSTOVCB(hfsmp); + + vcb->vcbSigWord = signature; + vcb->vcbJinfoBlock = SWAP_BE32(vhp->journalInfoBlock); + vcb->vcbLsMod = to_bsd_time(SWAP_BE32(vhp->modifyDate)); + vcb->vcbAtrb = SWAP_BE32(vhp->attributes); + vcb->vcbClpSiz = SWAP_BE32(vhp->rsrcClumpSize); + vcb->vcbNxtCNID = SWAP_BE32(vhp->nextCatalogID); + vcb->vcbVolBkUp = to_bsd_time(SWAP_BE32(vhp->backupDate)); + vcb->vcbWrCnt = SWAP_BE32(vhp->writeCount); + vcb->vcbFilCnt = SWAP_BE32(vhp->fileCount); + vcb->vcbDirCnt = SWAP_BE32(vhp->folderCount); + + /* copy 32 bytes of Finder info */ + bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo)); + + vcb->vcbAlBlSt = 0; /* hfs+ allocation blocks start at first block of volume */ + if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) + { + vcb->vcbWrCnt++; /* compensate for write of Volume Header on last flush */ + } + + /* Now fill in the Extended VCB info */ + vcb->nextAllocation = SWAP_BE32(vhp->nextAllocation); + vcb->totalBlocks = SWAP_BE32(vhp->totalBlocks); + vcb->allocLimit = vcb->totalBlocks; + vcb->freeBlocks = SWAP_BE32(vhp->freeBlocks); + vcb->blockSize = blockSize; + vcb->encodingsBitmap = SWAP_BE64(vhp->encodingsBitmap); + vcb->localCreateDate = SWAP_BE32(vhp->createDate); + + vcb->hfsPlusIOPosOffset = (uint32_t) embeddedOffset; + + /* Default to no free block reserve */ + vcb->reserveBlocks = 0; + + /* + * Update the logical block size in the mount struct + * (currently set up from the wrapper MDB) using the + * new blocksize value: + */ + hfsmp->hfs_logBlockSize = BestBlockSizeFit(vcb->blockSize, MAXBSIZE, hfsmp->hfs_logical_block_size); + vcb->vcbVBMIOSize = MIN(vcb->blockSize, MAXPHYSIO); + + /* + * Validate and initialize the location of the alternate volume header. + * + * Note that there may be spare sectors beyond the end of the filesystem that still + * belong to our partition. + */ + spare_sectors = hfsmp->hfs_logical_block_count - (((uint64_t)vcb->totalBlocks * blockSize) / hfsmp->hfs_logical_block_size); + + /* + * Differentiate between "innocuous" spare sectors and the more unusual + * degenerate case: + * + * *** Innocuous spare sectors exist if: + * + * A) the number of bytes assigned to the partition (by multiplying logical + * block size * logical block count) is greater than the filesystem size + * (by multiplying allocation block count and allocation block size) + * + * and + * + * B) the remainder is less than the size of a full allocation block's worth of bytes. + * + * This handles the normal case where there may be a few extra sectors, but the two + * are fundamentally in sync. + * + * *** Degenerate spare sectors exist if: + * A) The number of bytes assigned to the partition (by multiplying logical + * block size * logical block count) is greater than the filesystem size + * (by multiplying allocation block count and block size). + * + * and + * + * B) the remainder is greater than a full allocation's block worth of bytes. + * In this case, a smaller file system exists in a larger partition. + * This can happen in various ways, including when volume is resized but the + * partition is yet to be resized. Under this condition, we have to assume that + * a partition management software may resize the partition to match + * the file system size in the future. Therefore we should update + * alternate volume header at two locations on the disk, + * a. 1024 bytes before end of the partition + * b. 1024 bytes before end of the file system + */ + + if (spare_sectors > (uint64_t)(blockSize / hfsmp->hfs_logical_block_size)) + { + /* + * Handle the degenerate case above. FS < partition size. + * AVH located at 1024 bytes from the end of the partition + */ + hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, hfsmp->hfs_logical_block_count); + + /* AVH located at 1024 bytes from the end of the filesystem */ + hfsmp->hfs_fs_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, (((uint64_t)vcb->totalBlocks * blockSize) / hfsmp->hfs_logical_block_size)); + } + else + { + /* Innocuous spare sectors; Partition & FS notion are in sync */ + hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) + HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, hfsmp->hfs_logical_block_count); + + hfsmp->hfs_fs_avh_sector = hfsmp->hfs_partition_avh_sector; + } + + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: partition_avh_sector=%qu, fs_avh_sector=%qu\n", hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector); + + bzero(&cndesc, sizeof(cndesc)); + cndesc.cd_parentcnid = kHFSRootParentID; + cndesc.cd_flags |= CD_ISMETA; + bzero(&cnattr, sizeof(cnattr)); + cnattr.ca_linkcount = 1; + cnattr.ca_mode = S_IFREG; + + /* + * Set up Extents B-tree vnode + */ + cndesc.cd_nameptr = hfs_extname; + cndesc.cd_namelen = strlen((char *)hfs_extname); + cndesc.cd_cnid = cnattr.ca_fileid = kHFSExtentsFileID; + + cfork.cf_size = SWAP_BE64 (vhp->extentsFile.logicalSize); + cfork.cf_new_size= 0; + cfork.cf_clump = SWAP_BE32 (vhp->extentsFile.clumpSize); + cfork.cf_blocks = SWAP_BE32 (vhp->extentsFile.totalBlocks); + cfork.cf_vblocks = 0; + cnattr.ca_blocks = cfork.cf_blocks; + + for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++) + { + cfork.cf_extents[iExtentCounter].startBlock = SWAP_BE32 (vhp->extentsFile.extents[iExtentCounter].startBlock); + cfork.cf_extents[iExtentCounter].blockCount = SWAP_BE32 (vhp->extentsFile.extents[iExtentCounter].blockCount); + } + + retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork, &hfsmp->hfs_extents_vp, &newvnode_flags); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting extentoverflow BT\n", retval); + goto ErrorExit; + } + + hfsmp->hfs_extents_cp = VTOC(hfsmp->hfs_extents_vp); + retval = MacToVFSError(BTOpenPath(VTOF(hfsmp->hfs_extents_vp), (KeyCompareProcPtr) CompareExtentKeysPlus)); + + hfs_unlock(hfsmp->hfs_extents_cp); + + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: BTOpenPath returned (%d) getting extentoverflow BT\n", retval); + goto ErrorExit; + } + + /* + * Set up Catalog B-tree vnode + */ + cndesc.cd_nameptr = hfs_catname; + cndesc.cd_namelen = strlen((char *)hfs_catname); + cndesc.cd_cnid = cnattr.ca_fileid = kHFSCatalogFileID; + + cfork.cf_size = SWAP_BE64 (vhp->catalogFile.logicalSize); + cfork.cf_clump = SWAP_BE32 (vhp->catalogFile.clumpSize); + cfork.cf_blocks = SWAP_BE32 (vhp->catalogFile.totalBlocks); + cfork.cf_vblocks = 0; + cnattr.ca_blocks = cfork.cf_blocks; + + for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++) + { + cfork.cf_extents[iExtentCounter].startBlock = SWAP_BE32 (vhp->catalogFile.extents[iExtentCounter].startBlock); + cfork.cf_extents[iExtentCounter].blockCount = SWAP_BE32 (vhp->catalogFile.extents[iExtentCounter].blockCount); + } + + retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork, &hfsmp->hfs_catalog_vp, &newvnode_flags); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting catalog BT\n", retval); + goto ErrorExit; + } + hfsmp->hfs_catalog_cp = VTOC(hfsmp->hfs_catalog_vp); + retval = MacToVFSError(BTOpenPath(VTOF(hfsmp->hfs_catalog_vp), (KeyCompareProcPtr) CompareExtendedCatalogKeys)); + + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: BTOpenPath returned (%d) getting catalog BT\n", retval); + hfs_unlock(hfsmp->hfs_catalog_cp); + goto ErrorExit; + } + + if ((hfsmp->hfs_flags & HFS_X) && + BTGetInformation(VTOF(hfsmp->hfs_catalog_vp), 0, &btinfo) == 0) + { + if (btinfo.keyCompareType == kHFSBinaryCompare) + { + hfsmp->hfs_flags |= HFS_CASE_SENSITIVE; + /* Install a case-sensitive key compare */ + (void) BTOpenPath(VTOF(hfsmp->hfs_catalog_vp), (KeyCompareProcPtr)cat_binarykeycompare); + } + } + + hfs_unlock(hfsmp->hfs_catalog_cp); + + /* + * Set up Allocation file vnode + */ + cndesc.cd_nameptr = hfs_vbmname; + cndesc.cd_namelen = strlen((char *)hfs_vbmname); + cndesc.cd_cnid = cnattr.ca_fileid = kHFSAllocationFileID; + + cfork.cf_size = SWAP_BE64 (vhp->allocationFile.logicalSize); + cfork.cf_clump = SWAP_BE32 (vhp->allocationFile.clumpSize); + cfork.cf_blocks = SWAP_BE32 (vhp->allocationFile.totalBlocks); + cfork.cf_vblocks = 0; + cnattr.ca_blocks = cfork.cf_blocks; + + for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++) + { + cfork.cf_extents[iExtentCounter].startBlock = SWAP_BE32 (vhp->allocationFile.extents[iExtentCounter].startBlock); + cfork.cf_extents[iExtentCounter].blockCount = SWAP_BE32 (vhp->allocationFile.extents[iExtentCounter].blockCount); + } + + retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork, &hfsmp->hfs_allocation_vp, &newvnode_flags); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting bitmap\n", retval); + goto ErrorExit; + } + hfsmp->hfs_allocation_cp = VTOC(hfsmp->hfs_allocation_vp); + hfs_unlock(hfsmp->hfs_allocation_cp); + + /* + * Set up Attribute B-tree vnode + */ + if (vhp->attributesFile.totalBlocks != 0) { + cndesc.cd_nameptr = hfs_attrname; + cndesc.cd_namelen = strlen((char *)hfs_attrname); + cndesc.cd_cnid = cnattr.ca_fileid = kHFSAttributesFileID; + + cfork.cf_size = SWAP_BE64 (vhp->attributesFile.logicalSize); + cfork.cf_clump = SWAP_BE32 (vhp->attributesFile.clumpSize); + cfork.cf_blocks = SWAP_BE32 (vhp->attributesFile.totalBlocks); + cfork.cf_vblocks = 0; + cnattr.ca_blocks = cfork.cf_blocks; + + for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++) + { + cfork.cf_extents[iExtentCounter].startBlock = SWAP_BE32 (vhp->attributesFile.extents[iExtentCounter].startBlock); + cfork.cf_extents[iExtentCounter].blockCount = SWAP_BE32 (vhp->attributesFile.extents[iExtentCounter].blockCount); + } + retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork, &hfsmp->hfs_attribute_vp, &newvnode_flags); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting EA BT\n", retval); + goto ErrorExit; + } + hfsmp->hfs_attribute_cp = VTOC(hfsmp->hfs_attribute_vp); + + retval = MacToVFSError(BTOpenPath(VTOF(hfsmp->hfs_attribute_vp),(KeyCompareProcPtr) hfs_attrkeycompare)); + hfs_unlock(hfsmp->hfs_attribute_cp); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: BTOpenPath returned (%d) getting EA BT\n", retval); + goto ErrorExit; + } + + /* Initialize vnode for virtual attribute data file that spans the + * entire file system space for performing I/O to attribute btree + * We hold iocount on the attrdata vnode for the entire duration + * of mount (similar to btree vnodes) + */ + retval = init_attrdata_vnode(hfsmp); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: init_attrdata_vnode returned (%d) for virtual EA file\n", retval); + goto ErrorExit; + } + } + + /* + * Set up Startup file vnode + */ + if (vhp->startupFile.totalBlocks != 0) { + cndesc.cd_nameptr = hfs_startupname; + cndesc.cd_namelen = strlen((char *)hfs_startupname); + cndesc.cd_cnid = cnattr.ca_fileid = kHFSStartupFileID; + + cfork.cf_size = SWAP_BE64 (vhp->startupFile.logicalSize); + cfork.cf_clump = SWAP_BE32 (vhp->startupFile.clumpSize); + cfork.cf_blocks = SWAP_BE32 (vhp->startupFile.totalBlocks); + cfork.cf_vblocks = 0; + cnattr.ca_blocks = cfork.cf_blocks; + for (int iExtentCounter = 0; iExtentCounter < kHFSPlusExtentDensity; iExtentCounter++) + { + cfork.cf_extents[iExtentCounter].startBlock = SWAP_BE32 (vhp->startupFile.extents[iExtentCounter].startBlock); + cfork.cf_extents[iExtentCounter].blockCount = SWAP_BE32 (vhp->startupFile.extents[iExtentCounter].blockCount); + } + + retval = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cfork, &hfsmp->hfs_startup_vp, &newvnode_flags); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting startup file\n", retval); + goto ErrorExit; + } + hfsmp->hfs_startup_cp = VTOC(hfsmp->hfs_startup_vp); + hfs_unlock(hfsmp->hfs_startup_cp); + } + + /* + * Pick up volume name and create date + * + * Acquiring the volume name should not manipulate the bitmap, only the catalog + * btree and possibly the extents overflow b-tree. + */ + retval = cat_idlookup(hfsmp, kHFSRootFolderID, 0, 0, &cndesc, &cnattr, NULL); + if (retval) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: cat_idlookup returned (%d) getting rootfolder \n", retval); + goto ErrorExit; + } + vcb->hfs_itime = cnattr.ca_itime; + vcb->volumeNameEncodingHint = cndesc.cd_encoding; + bcopy(cndesc.cd_nameptr, vcb->vcbVN, min(255, cndesc.cd_namelen)); + cat_releasedesc(&cndesc); + + return (0); + +ErrorExit: + /* + * A fatal error occurred and the volume cannot be mounted, so + * release any resources that we acquired... + */ + hfsUnmount(hfsmp); + + LFHFS_LOG(LEVEL_DEBUG, "hfs_MountHFSPlusVolume: encountered error (%d)\n", retval); + + return (retval); +} + +u_int32_t BestBlockSizeFit(u_int32_t allocationBlockSize, u_int32_t blockSizeLimit, u_int32_t baseMultiple) { + /* + Compute the optimal (largest) block size (no larger than allocationBlockSize) that is less than the + specified limit but still an even multiple of the baseMultiple. + */ + int baseBlockCount, blockCount; + u_int32_t trialBlockSize; + + if (allocationBlockSize % baseMultiple != 0) { + /* + Whoops: the allocation blocks aren't even multiples of the specified base: + no amount of dividing them into even parts will be a multiple, either then! + */ + return 512; /* Hope for the best */ + }; + + /* Try the obvious winner first, to prevent 12K allocation blocks, for instance, + from being handled as two 6K logical blocks instead of 3 4K logical blocks. + Even though the former (the result of the loop below) is the larger allocation + block size, the latter is more efficient: */ + if (allocationBlockSize % PAGE_SIZE == 0) return (u_int32_t)PAGE_SIZE; + + /* No clear winner exists: pick the largest even fraction <= MAXBSIZE: */ + baseBlockCount = allocationBlockSize / baseMultiple; /* Now guaranteed to be an even multiple */ + + for (blockCount = baseBlockCount; blockCount > 0; --blockCount) { + trialBlockSize = blockCount * baseMultiple; + if (allocationBlockSize % trialBlockSize == 0) { /* An even multiple? */ + if ((trialBlockSize <= blockSizeLimit) && + (trialBlockSize % baseMultiple == 0)) { + return trialBlockSize; + }; + }; + }; + + /* Note: we should never get here, since blockCount = 1 should always work, + but this is nice and safe and makes the compiler happy, too ... */ + return 512; +} + +/* + * Lock the HFS global journal lock + */ +int +hfs_lock_global (struct hfsmount *hfsmp, enum hfs_locktype locktype) +{ + pthread_t thread = pthread_self(); + + if (hfsmp->hfs_global_lockowner == thread) { + LFHFS_LOG(LEVEL_ERROR, "hfs_lock_global: locking against myself!"); + hfs_assert(0); + } + + if (locktype == HFS_SHARED_LOCK) { + lf_lck_rw_lock_shared (&hfsmp->hfs_global_lock); + hfsmp->hfs_global_lockowner = HFS_SHARED_OWNER; + } + else { + lf_lck_rw_lock_exclusive (&hfsmp->hfs_global_lock); + hfsmp->hfs_global_lockowner = thread; + } + + return 0; +} + +/* + * Unlock the HFS global journal lock + */ +void +hfs_unlock_global (struct hfsmount *hfsmp) +{ + pthread_t thread = pthread_self(); + + /* HFS_LOCK_EXCLUSIVE */ + if (hfsmp->hfs_global_lockowner == thread) { + hfsmp->hfs_global_lockowner = NULL; + lf_lck_rw_unlock_exclusive(&hfsmp->hfs_global_lock); + } + /* HFS_LOCK_SHARED */ + else { + lf_lck_rw_unlock_shared(&hfsmp->hfs_global_lock); + } +} + +int +hfs_start_transaction(struct hfsmount *hfsmp) +{ + int ret = 0, unlock_on_err = 0; + pthread_t thread = pthread_self(); + +#ifdef HFS_CHECK_LOCK_ORDER + /* + * You cannot start a transaction while holding a system + * file lock. (unless the transaction is nested.) + */ + if (hfsmp->jnl && journal_owner(hfsmp->jnl) != thread) { + if (hfsmp->hfs_catalog_cp && hfsmp->hfs_catalog_cp->c_lockowner == thread) { + LFHFS_LOG(LEVEL_ERROR, "hfs_start_transaction: bad lock order (cat before jnl)\n"); + hfs_assert(0); + } + if (hfsmp->hfs_attribute_cp && hfsmp->hfs_attribute_cp->c_lockowner == thread) { + LFHFS_LOG(LEVEL_ERROR, "hfs_start_transaction: bad lock order (attr before jnl)\n"); + hfs_assert(0); + } + if (hfsmp->hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == thread) { + LFHFS_LOG(LEVEL_ERROR, "hfs_start_transaction: bad lock order (ext before jnl)\n"); + hfs_assert(0); + } + } +#endif /* HFS_CHECK_LOCK_ORDER */ + +again: + + if (hfsmp->jnl) { + if (journal_owner(hfsmp->jnl) != thread) + { + /* + * The global lock should be held shared if journal is + * active to prevent disabling. If we're not the owner + * of the journal lock, verify that we're not already + * holding the global lock exclusive before moving on. + */ + if (hfsmp->hfs_global_lockowner == thread) { + ret = EBUSY; + goto out; + } + + hfs_lock_global (hfsmp, HFS_SHARED_LOCK); + + // Things could have changed + if (!hfsmp->jnl) { + hfs_unlock_global(hfsmp); + goto again; + } + unlock_on_err = 1; + } + } + else + { + // No journal + if (hfsmp->hfs_global_lockowner != thread) { + hfs_lock_global(hfsmp, HFS_EXCLUSIVE_LOCK); + + // Things could have changed + if (hfsmp->jnl) { + hfs_unlock_global(hfsmp); + goto again; + } + + ExtendedVCB * vcb = HFSTOVCB(hfsmp); + if (vcb->vcbAtrb & kHFSVolumeUnmountedMask) { + // clear kHFSVolumeUnmountedMask + hfs_flushvolumeheader(hfsmp, HFS_FVH_SKIP_TRANSACTION); + } + unlock_on_err = 1; + } + } + + if (hfsmp->jnl) + { + ret = journal_start_transaction(hfsmp->jnl); + } + else + { + ret = 0; + } + + if (ret == 0) + ++hfsmp->hfs_transaction_nesting; + + goto out; + +out: + if (ret != 0 && unlock_on_err) { + hfs_unlock_global (hfsmp); + } + + return ret; +} + +int +hfs_end_transaction(struct hfsmount *hfsmp) +{ + int ret; + + hfs_assert(!hfsmp->jnl || journal_owner(hfsmp->jnl) == pthread_self()); + hfs_assert(hfsmp->hfs_transaction_nesting > 0); + + if (hfsmp->jnl && hfsmp->hfs_transaction_nesting == 1) + hfs_flushvolumeheader(hfsmp, HFS_FVH_FLUSH_IF_DIRTY); + + bool need_unlock = !--hfsmp->hfs_transaction_nesting; + + if (hfsmp->jnl) + { + ret = journal_end_transaction(hfsmp->jnl); + } + else + { + ret = 0; + } + + if (need_unlock) { + hfs_unlock_global (hfsmp); + } + + return ret; +} + + +/* + * Flush the contents of the journal to the disk. + * + * - HFS_FLUSH_JOURNAL + * Wait to write in-memory journal to the disk consistently. + * This means that the journal still contains uncommitted + * transactions and the file system metadata blocks in + * the journal transactions might be written asynchronously + * to the disk. But there is no guarantee that they are + * written to the disk before returning to the caller. + * Note that this option is sufficient for file system + * data integrity as it guarantees consistent journal + * content on the disk. + * + * - HFS_FLUSH_JOURNAL_META + * Wait to write in-memory journal to the disk + * consistently, and also wait to write all asynchronous + * metadata blocks to its corresponding locations + * consistently on the disk. This is overkill in normal + * scenarios but is useful whenever the metadata blocks + * are required to be consistent on-disk instead of + * just the journalbeing consistent; like before live + * verification and live volume resizing. The update of the + * metadata doesn't include a barrier of track cache flush. + * + * - HFS_FLUSH_FULL + * HFS_FLUSH_JOURNAL + force a track cache flush to media + * + * - HFS_FLUSH_CACHE + * Force a track cache flush to media. + * + * - HFS_FLUSH_BARRIER + * Barrier-only flush to ensure write order + * + */ +errno_t hfs_flush(struct hfsmount *hfsmp, hfs_flush_mode_t mode) { + errno_t error = 0; + int options = 0; + dk_synchronize_t sync_req = { .options = DK_SYNCHRONIZE_OPTION_BARRIER }; + + switch (mode) { + case HFS_FLUSH_JOURNAL_META: + // wait for journal, metadata blocks and previous async flush to finish + SET(options, JOURNAL_WAIT_FOR_IO); + + // no break + + case HFS_FLUSH_JOURNAL: + case HFS_FLUSH_JOURNAL_BARRIER: + case HFS_FLUSH_FULL: + + if (mode == HFS_FLUSH_JOURNAL_BARRIER && + !(hfsmp->hfs_flags & HFS_FEATURE_BARRIER)) + mode = HFS_FLUSH_FULL; + + if (mode == HFS_FLUSH_FULL) + SET(options, JOURNAL_FLUSH_FULL); + + /* Only peek at hfsmp->jnl while holding the global lock */ + hfs_lock_global (hfsmp, HFS_SHARED_LOCK); + + if (hfsmp->jnl) { + ExtendedVCB * vcb = HFSTOVCB(hfsmp); + if (!(vcb->vcbAtrb & kHFSVolumeUnmountedMask)) { + // Set kHFSVolumeUnmountedMask + hfs_flushvolumeheader(hfsmp, HFS_FVH_MARK_UNMOUNT); + } + error = journal_flush(hfsmp->jnl, options); + } + + hfs_unlock_global (hfsmp); + + /* + * This may result in a double barrier as + * journal_flush may have issued a barrier itself + */ + if (mode == HFS_FLUSH_JOURNAL_BARRIER) + error = ioctl(hfsmp->hfs_devvp->psFSRecord->iFD, DKIOCSYNCHRONIZE, (caddr_t)&sync_req); + break; + + case HFS_FLUSH_CACHE: + // Do a full sync + sync_req.options = 0; + + // no break + + case HFS_FLUSH_BARRIER: + // If barrier only flush doesn't support, fall back to use full flush. + if (!(hfsmp->hfs_flags & HFS_FEATURE_BARRIER)) + sync_req.options = 0; + + error = ioctl(hfsmp->hfs_devvp->psFSRecord->iFD, DKIOCSYNCHRONIZE, (caddr_t)&sync_req); + break; + + default: + error = EINVAL; + } + + return error; +} + + +#define MALLOC_TRACER 0 + +#if MALLOC_TRACER +#define MALLOC_TRACER_SIZE 100000 +typedef struct { + void *pv; + size_t uSize; +} MallocTracer_S; +MallocTracer_S gpsMallocTracer[MALLOC_TRACER_SIZE]; +MallocTracer_S gpsFreeTracer[MALLOC_TRACER_SIZE]; +uint32_t guIndex = 0, guOutdex = 0, guSize=0, guTotal = 0; +uint64_t guTotalConsumption = 0; +#endif + +void* +hfs_malloc(size_t size) +{ + if (!size) { + panic("Malloc size is 0"); + } + void *pv = malloc(size); + +#if MALLOC_TRACER + gpsMallocTracer[guIndex].pv = pv; + gpsMallocTracer[guIndex].uSize = (uint32_t)size; + guIndex = (guIndex+1) % MALLOC_TRACER_SIZE; + guTotal++; + guSize++; + guTotalConsumption += size; +#endif + return pv; +} + +void +hfs_free(void *ptr) +{ + if (!ptr) + return; + + free(ptr); + +#if MALLOC_TRACER + gpsFreeTracer[guOutdex].pv = ptr; + bool bCont = true; + uint32_t u=guIndex; + do { + u = (u)?(u-1):(MALLOC_TRACER_SIZE-1); + if (gpsMallocTracer[u].pv == ptr) { + break; + } + bCont = (guTotalhfs_mutex)); +} + +/* + * Unlock the HFS mount lock + * + * Note: this is a mutex, not a rw lock! + */ +void hfs_unlock_mount (struct hfsmount *hfsmp) +{ + lf_lck_mtx_unlock (&(hfsmp->hfs_mutex)); +} + +/* + * ReleaseMetaFileVNode + * + * vp L - - + */ +static void ReleaseMetaFileVNode(struct vnode *vp) +{ + struct filefork *fp; + + if (vp && (fp = VTOF(vp))) + { + if (fp->fcbBTCBPtr != NULL) + { + (void)hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + (void) BTClosePath(fp); + hfs_unlock(VTOC(vp)); + } + + /* release the node even if BTClosePath fails */ + hfs_vnop_reclaim(vp); + } +} + +/************************************************************* + * + * Unmounts a hfs volume. + * At this point vflush() has been called (to dump all non-metadata files) + * + *************************************************************/ + +int +hfsUnmount( register struct hfsmount *hfsmp) +{ + + /* Get rid of our attribute data vnode (if any). This is done + * after the vflush() during mount, so we don't need to worry + * about any locks. + */ + if (hfsmp->hfs_attrdata_vp) { + ReleaseMetaFileVNode(hfsmp->hfs_attrdata_vp); + hfsmp->hfs_attrdata_vp = NULL; + } + + if (hfsmp->hfs_startup_vp) { + ReleaseMetaFileVNode(hfsmp->hfs_startup_vp); + hfsmp->hfs_startup_cp = NULL; + hfsmp->hfs_startup_vp = NULL; + } + + if (hfsmp->hfs_attribute_vp) { + ReleaseMetaFileVNode(hfsmp->hfs_attribute_vp); + hfsmp->hfs_attribute_cp = NULL; + hfsmp->hfs_attribute_vp = NULL; + } + + if (hfsmp->hfs_catalog_vp) { + ReleaseMetaFileVNode(hfsmp->hfs_catalog_vp); + hfsmp->hfs_catalog_cp = NULL; + hfsmp->hfs_catalog_vp = NULL; + } + + if (hfsmp->hfs_extents_vp) { + ReleaseMetaFileVNode(hfsmp->hfs_extents_vp); + hfsmp->hfs_extents_cp = NULL; + hfsmp->hfs_extents_vp = NULL; + } + + if (hfsmp->hfs_allocation_vp) { + ReleaseMetaFileVNode(hfsmp->hfs_allocation_vp); + hfsmp->hfs_allocation_cp = NULL; + hfsmp->hfs_allocation_vp = NULL; + } + return (0); +} + +/* + * RequireFileLock + * + * Check to see if a vnode is locked in the current context + * This is to be used for debugging purposes only!! + */ +void RequireFileLock(FileReference vp, int shareable) +{ + int locked; + + /* The extents btree and allocation bitmap are always exclusive. */ + if (VTOC(vp)->c_fileid == kHFSExtentsFileID || + VTOC(vp)->c_fileid == kHFSAllocationFileID) { + shareable = 0; + } + + locked = VTOC(vp)->c_lockowner == pthread_self(); + + if (!locked && !shareable) + { + switch (VTOC(vp)->c_fileid) { + case kHFSExtentsFileID: + LFHFS_LOG(LEVEL_ERROR, "RequireFileLock: extents btree not locked! v: 0x%08X\n #\n", (u_int)vp); + break; + case kHFSCatalogFileID: + LFHFS_LOG(LEVEL_ERROR, "RequireFileLock: catalog btree not locked! v: 0x%08X\n #\n", (u_int)vp); + break; + case kHFSAllocationFileID: + /* The allocation file can hide behind the jornal lock. */ + if (VTOHFS(vp)->jnl == NULL) + { + LFHFS_LOG(LEVEL_ERROR, "RequireFileLock: allocation file not locked! v: 0x%08X\n #\n", (u_int)vp); + } + return; + case kHFSStartupFileID: + LFHFS_LOG(LEVEL_ERROR, "RequireFileLock: startup file not locked! v: 0x%08X\n #\n", (u_int)vp); + break; + case kHFSAttributesFileID: + LFHFS_LOG(LEVEL_ERROR, "RequireFileLock: attributes btree not locked! v: 0x%08X\n #\n", (u_int)vp); + break; + default: + return; + } + hfs_assert(0); + } +} + +/* + * Test if fork has overflow extents. + * + * Returns: + * non-zero - overflow extents exist + * zero - overflow extents do not exist + */ +bool overflow_extents(struct filefork *fp) +{ + u_int32_t blocks; + + if (fp->ff_extents[7].blockCount == 0) + return false; + + blocks = fp->ff_extents[0].blockCount + + fp->ff_extents[1].blockCount + + fp->ff_extents[2].blockCount + + fp->ff_extents[3].blockCount + + fp->ff_extents[4].blockCount + + fp->ff_extents[5].blockCount + + fp->ff_extents[6].blockCount + + fp->ff_extents[7].blockCount; + + return fp->ff_blocks > blocks; +} + + +/* + * Lock HFS system file(s). + * + * This function accepts a @flags parameter which indicates which + * system file locks are required. The value it returns should be + * used in a subsequent call to hfs_systemfile_unlock. The caller + * should treat this value as opaque; it may or may not have a + * relation to the @flags field that is passed in. The *only* + * guarantee that we make is that a value of zero means that no locks + * were taken and that there is no need to call hfs_systemfile_unlock + * (although it is harmless to do so). Recursion is supported but + * care must still be taken to ensure correct lock ordering. Note + * that requests for certain locks may cause other locks to also be + * taken, including locks that are not possible to ask for via the + * @flags parameter. + */ +int +hfs_systemfile_lock(struct hfsmount *hfsmp, int flags, enum hfs_locktype locktype) +{ + pthread_t thread = pthread_self(); + + /* + * Locking order is Catalog file, Attributes file, Startup file, Bitmap file, Extents file + */ + if (flags & SFL_CATALOG) { + if (hfsmp->hfs_catalog_cp + && hfsmp->hfs_catalog_cp->c_lockowner != thread) { +#ifdef HFS_CHECK_LOCK_ORDER + if (hfsmp->hfs_attribute_cp && hfsmp->hfs_attribute_cp->c_lockowner == current_thread()) { + LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Attributes before Catalog)"); + hfs_assert(0); + } + if (hfsmp->hfs_startup_cp && hfsmp->hfs_startup_cp->c_lockowner == current_thread()) { + LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Startup before Catalog)"); + hfs_assert(0); + } + if (hfsmp-> hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == current_thread()) { + LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Extents before Catalog)"); + hfs_assert(0); + } +#endif /* HFS_CHECK_LOCK_ORDER */ + + (void) hfs_lock(hfsmp->hfs_catalog_cp, locktype, HFS_LOCK_DEFAULT); + /* + * When the catalog file has overflow extents then + * also acquire the extents b-tree lock if its not + * already requested. + */ + if (((flags & SFL_EXTENTS) == 0) && + (hfsmp->hfs_catalog_vp != NULL) && + (overflow_extents(VTOF(hfsmp->hfs_catalog_vp)))) { + flags |= SFL_EXTENTS; + } + } else { + flags &= ~SFL_CATALOG; + } + } + + if (flags & SFL_ATTRIBUTE) { + if (hfsmp->hfs_attribute_cp + && hfsmp->hfs_attribute_cp->c_lockowner != thread) { +#ifdef HFS_CHECK_LOCK_ORDER + if (hfsmp->hfs_startup_cp && hfsmp->hfs_startup_cp->c_lockowner == current_thread()) { + LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Startup before Attributes)"); + hfs_assert(0); + } + if (hfsmp->hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == current_thread()) { + LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Extents before Attributes)"); + hfs_assert(0); + } +#endif /* HFS_CHECK_LOCK_ORDER */ + + (void) hfs_lock(hfsmp->hfs_attribute_cp, locktype, HFS_LOCK_DEFAULT); + /* + * When the attribute file has overflow extents then + * also acquire the extents b-tree lock if its not + * already requested. + */ + if (((flags & SFL_EXTENTS) == 0) && + (hfsmp->hfs_attribute_vp != NULL) && + (overflow_extents(VTOF(hfsmp->hfs_attribute_vp)))) { + flags |= SFL_EXTENTS; + } + } else { + flags &= ~SFL_ATTRIBUTE; + } + } + + if (flags & SFL_STARTUP) { + if (hfsmp->hfs_startup_cp + && hfsmp->hfs_startup_cp->c_lockowner != thread) { +#ifdef HFS_CHECK_LOCK_ORDER + if (hfsmp-> hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == current_thread()) { + LFHFS_LOG(LEVEL_ERROR, "hfs_systemfile_lock: bad lock order (Extents before Startup)"); + hfs_assert(0); + } +#endif /* HFS_CHECK_LOCK_ORDER */ + + (void) hfs_lock(hfsmp->hfs_startup_cp, locktype, HFS_LOCK_DEFAULT); + /* + * When the startup file has overflow extents then + * also acquire the extents b-tree lock if its not + * already requested. + */ + if (((flags & SFL_EXTENTS) == 0) && + (hfsmp->hfs_startup_vp != NULL) && + (overflow_extents(VTOF(hfsmp->hfs_startup_vp)))) { + flags |= SFL_EXTENTS; + } + } else { + flags &= ~SFL_STARTUP; + } + } + + /* + * To prevent locks being taken in the wrong order, the extent lock + * gets a bitmap lock as well. + */ + if (flags & (SFL_BITMAP | SFL_EXTENTS)) { + if (hfsmp->hfs_allocation_cp) { + (void) hfs_lock(hfsmp->hfs_allocation_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + /* + * The bitmap lock is also grabbed when only extent lock + * was requested. Set the bitmap lock bit in the lock + * flags which callers will use during unlock. + */ + flags |= SFL_BITMAP; + + } else { + flags &= ~SFL_BITMAP; + } + } + + if (flags & SFL_EXTENTS) { + /* + * Since the extents btree lock is recursive we always + * need exclusive access. + */ + if (hfsmp->hfs_extents_cp) { + (void) hfs_lock(hfsmp->hfs_extents_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + } else { + flags &= ~SFL_EXTENTS; + } + } + + return (flags); +} + +/* + * unlock HFS system file(s). + */ +void +hfs_systemfile_unlock(struct hfsmount *hfsmp, int flags) +{ + if (!flags) + return; + + if (flags & SFL_STARTUP && hfsmp->hfs_startup_cp) { + hfs_unlock(hfsmp->hfs_startup_cp); + } + if (flags & SFL_ATTRIBUTE && hfsmp->hfs_attribute_cp) { + hfs_unlock(hfsmp->hfs_attribute_cp); + } + if (flags & SFL_CATALOG && hfsmp->hfs_catalog_cp) { + hfs_unlock(hfsmp->hfs_catalog_cp); + } + if (flags & SFL_BITMAP && hfsmp->hfs_allocation_cp) { + hfs_unlock(hfsmp->hfs_allocation_cp); + } + if (flags & SFL_EXTENTS && hfsmp->hfs_extents_cp) { + hfs_unlock(hfsmp->hfs_extents_cp); + } +} + +u_int32_t +hfs_freeblks(struct hfsmount * hfsmp, int wantreserve) +{ + u_int32_t freeblks; + u_int32_t rsrvblks; + u_int32_t loanblks; + + /* + * We don't bother taking the mount lock + * to look at these values since the values + * themselves are each updated atomically + * on aligned addresses. + */ + freeblks = hfsmp->freeBlocks; + rsrvblks = hfsmp->reserveBlocks; + loanblks = hfsmp->loanedBlocks + hfsmp->lockedBlocks; + if (wantreserve) { + if (freeblks > rsrvblks) + freeblks -= rsrvblks; + else + freeblks = 0; + } + if (freeblks > loanblks) + freeblks -= loanblks; + else + freeblks = 0; + + return (freeblks); +} + +/* + * Map HFS Common errors (negative) to BSD error codes (positive). + * Positive errors (ie BSD errors) are passed through unchanged. + */ +short MacToVFSError(OSErr err) +{ + if (err >= 0) + return err; + + /* BSD/VFS internal errnos */ + switch (err) { + case HFS_ERESERVEDNAME: /* -8 */ + return err; + } + + switch (err) { + case dskFulErr: /* -34 */ + case btNoSpaceAvail: /* -32733 */ + return ENOSPC; + case fxOvFlErr: /* -32750 */ + return EOVERFLOW; + + case btBadNode: /* -32731 */ + return EIO; + + case memFullErr: /* -108 */ + return ENOMEM; /* +12 */ + + case cmExists: /* -32718 */ + case btExists: /* -32734 */ + return EEXIST; /* +17 */ + + case cmNotFound: /* -32719 */ + case btNotFound: /* -32735 */ + return ENOENT; /* 28 */ + + case cmNotEmpty: /* -32717 */ + return ENOTEMPTY; /* 66 */ + + case cmFThdDirErr: /* -32714 */ + return EISDIR; /* 21 */ + + case fxRangeErr: /* -32751 */ + return ERANGE; + + case bdNamErr: /* -37 */ + return ENAMETOOLONG; /* 63 */ + + case paramErr: /* -50 */ + case fileBoundsErr: /* -1309 */ + return EINVAL; /* +22 */ + + case fsBTBadNodeSize: + return ENXIO; + + default: + return EIO; /* +5 */ + } +} + +/* + * Find the current thread's directory hint for a given index. + * + * Requires an exclusive lock on directory cnode. + * + * Use detach if the cnode lock must be dropped while the hint is still active. + */ +directoryhint_t* +hfs_getdirhint(struct cnode *dcp, int index, int detach) +{ + + directoryhint_t *hint; + boolean_t need_remove, need_init; + const u_int8_t* name; + struct timeval tv; + microtime(&tv); + + /* + * Look for an existing hint first. If not found, create a new one (when + * the list is not full) or recycle the oldest hint. Since new hints are + * always added to the head of the list, the last hint is always the + * oldest. + */ + TAILQ_FOREACH(hint, &dcp->c_hintlist, dh_link) + { + if (hint->dh_index == index) + break; + } + if (hint != NULL) + { /* found an existing hint */ + need_init = false; + need_remove = true; + } + else + { /* cannot find an existing hint */ + need_init = true; + if (dcp->c_dirhintcnt < HFS_MAXDIRHINTS) + { /* we don't need recycling */ + /* Create a default directory hint */ + hint = hfs_malloc(sizeof(struct directoryhint)); + ++dcp->c_dirhintcnt; + need_remove = false; + } + else + { + /* recycle the last (i.e., the oldest) hint */ + hint = TAILQ_LAST(&dcp->c_hintlist, hfs_hinthead); + if ((hint->dh_desc.cd_flags & CD_HASBUF) && (name = hint->dh_desc.cd_nameptr)) + { + hint->dh_desc.cd_nameptr = NULL; + hint->dh_desc.cd_namelen = 0; + hint->dh_desc.cd_flags &= ~CD_HASBUF; + hfs_free((void*)name); + } + need_remove = true; + } + } + + if (need_remove) + TAILQ_REMOVE(&dcp->c_hintlist, hint, dh_link); + + if (detach) + --dcp->c_dirhintcnt; + else + TAILQ_INSERT_HEAD(&dcp->c_hintlist, hint, dh_link); + + if (need_init) + { + hint->dh_index = index; + hint->dh_desc.cd_flags = 0; + hint->dh_desc.cd_encoding = 0; + hint->dh_desc.cd_namelen = 0; + hint->dh_desc.cd_nameptr = NULL; + hint->dh_desc.cd_parentcnid = dcp->c_fileid; + hint->dh_desc.cd_hint = dcp->c_childhint; + hint->dh_desc.cd_cnid = 0; + } + hint->dh_time = (uint32_t) tv.tv_sec; + return (hint); +} + +/* + * Insert a detached directory hint back into the list of dirhints. + * + * Requires an exclusive lock on directory cnode. + */ +void +hfs_insertdirhint(struct cnode *dcp, directoryhint_t * hint) +{ + directoryhint_t *test; + + TAILQ_FOREACH(test, &dcp->c_hintlist, dh_link) + { + if (test == hint) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_insertdirhint: hint %p already on list!", hint); + hfs_assert(0); + } + } + + TAILQ_INSERT_HEAD(&dcp->c_hintlist, hint, dh_link); + ++dcp->c_dirhintcnt; +} + +/* + * Release a single directory hint. + * + * Requires an exclusive lock on directory cnode. + */ +void +hfs_reldirhint(struct cnode *dcp, directoryhint_t * relhint) +{ + const u_int8_t * name; + directoryhint_t *hint; + + /* Check if item is on list (could be detached) */ + TAILQ_FOREACH(hint, &dcp->c_hintlist, dh_link) + { + if (hint == relhint) + { + TAILQ_REMOVE(&dcp->c_hintlist, relhint, dh_link); + --dcp->c_dirhintcnt; + break; + } + } + name = relhint->dh_desc.cd_nameptr; + if ((relhint->dh_desc.cd_flags & CD_HASBUF) && (name != NULL)) + { + relhint->dh_desc.cd_nameptr = NULL; + relhint->dh_desc.cd_namelen = 0; + relhint->dh_desc.cd_flags &= ~CD_HASBUF; + hfs_free((void*)name); + } + hfs_free(relhint); +} + +/* + * Perform a case-insensitive compare of two UTF-8 filenames. + * + * Returns 0 if the strings match. + */ +int +hfs_namecmp(const u_int8_t *str1, size_t len1, const u_int8_t *str2, size_t len2) +{ + u_int16_t *ustr1, *ustr2; + size_t ulen1, ulen2; + size_t maxbytes; + int cmp = -1; + + if (len1 != len2) + return (cmp); + + maxbytes = kHFSPlusMaxFileNameChars << 1; + ustr1 = hfs_malloc(maxbytes << 1); + ustr2 = ustr1 + (maxbytes >> 1); + + if (utf8_decodestr(str1, len1, ustr1, &ulen1, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0) + goto out; + if (utf8_decodestr(str2, len2, ustr2, &ulen2, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0) + goto out; + + ulen1 = ulen1 / sizeof(UniChar); + ulen2 = ulen2 / sizeof(UniChar); + cmp = FastUnicodeCompare(ustr1, ulen1, ustr2, ulen2); +out: + hfs_free(ustr1); + return (cmp); +} + +/* + * Perform a case-insensitive apendix cmp of two UTF-8 filenames. + * + * Returns 0 if the str2 is the same as the end of str1. + */ +int +hfs_apendixcmp(const u_int8_t *str1, size_t len1, const u_int8_t *str2, size_t len2) +{ + u_int16_t *ustr1, *ustr2, *original_allocation; + size_t ulen1, ulen2; + size_t maxbytes; + int cmp = -1; + + maxbytes = kHFSPlusMaxFileNameChars << 1; + ustr1 = hfs_malloc(maxbytes << 1); + ustr2 = ustr1 + (maxbytes >> 1); + original_allocation = ustr1; + + if (utf8_decodestr(str1, len1, ustr1, &ulen1, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0) + goto out; + if (utf8_decodestr(str2, len2, ustr2, &ulen2, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0) + goto out; + + ulen1 = ulen1 / sizeof(UniChar); + ulen2 = ulen2 / sizeof(UniChar); + ustr1+= ulen1 - ulen2; + cmp = FastUnicodeCompare(ustr1, ulen2, ustr2, ulen2); +out: + hfs_free(original_allocation); + return (cmp); +} + +/* + * Perform a case-insensitive strstr of two UTF-8 filenames. + * + * Returns 0 if the str2 in str1 match. + */ +int +hfs_strstr(const u_int8_t *str1, size_t len1, const u_int8_t *str2, size_t len2) +{ + u_int16_t *ustr1, *ustr2, *original_allocation; + size_t ulen1, ulen2; + size_t maxbytes; + int cmp = 0; + + maxbytes = kHFSPlusMaxFileNameChars << 1; + ustr1 = hfs_malloc(maxbytes << 1); + ustr2 = ustr1 + (maxbytes >> 1); + original_allocation = ustr1; + if (utf8_decodestr(str1, len1, ustr1, &ulen1, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0) + { + goto out; + } + if (utf8_decodestr(str2, len2, ustr2, &ulen2, maxbytes, ':', UTF_DECOMPOSED | UTF_ESCAPE_ILLEGAL) != 0) + { + goto out; + } + + ulen1 = ulen1 / sizeof(UniChar); + ulen2 = ulen2 / sizeof(UniChar); + + do { + if (ulen1-- < ulen2) + { + cmp = 1; + break; + } + } while (FastUnicodeCompare(ustr1++, ulen2, ustr2, ulen2) != 0); + +out: + hfs_free(original_allocation); + return cmp; +} + +/* + * Release directory hints for given directory + * + * Requires an exclusive lock on directory cnode. + */ +void +hfs_reldirhints(struct cnode *dcp, int stale_hints_only) +{ + struct timeval tv; + directoryhint_t *hint, *prev; + const u_int8_t * name; + + if (stale_hints_only) + microuptime(&tv); + + /* searching from the oldest to the newest, so we can stop early when releasing stale hints only */ + TAILQ_FOREACH_REVERSE_SAFE(hint, &dcp->c_hintlist, hfs_hinthead, dh_link, prev) { + if (stale_hints_only && (tv.tv_sec - hint->dh_time) < HFS_DIRHINT_TTL) + break; /* stop here if this entry is too new */ + name = hint->dh_desc.cd_nameptr; + if ((hint->dh_desc.cd_flags & CD_HASBUF) && (name != NULL)) { + hint->dh_desc.cd_nameptr = NULL; + hint->dh_desc.cd_namelen = 0; + hint->dh_desc.cd_flags &= ~CD_HASBUF; + hfs_free((void *)name); + } + TAILQ_REMOVE(&dcp->c_hintlist, hint, dh_link); + hfs_free(hint); + --dcp->c_dirhintcnt; + } +} + +/* hfs_erase_unused_nodes + * + * Check wheter a volume may suffer from unused Catalog B-tree nodes that + * are not zeroed (due to ). If so, just write + * zeroes to the unused nodes. + * + * How do we detect when a volume needs this repair? We can't always be + * certain. If a volume was created after a certain date, then it may have + * been created with the faulty newfs_hfs. Since newfs_hfs only created one + * clump, we can assume that if a Catalog B-tree is larger than its clump size, + * that means that the entire first clump must have been written to, which means + * there shouldn't be unused and unwritten nodes in that first clump, and this + * repair is not needed. + * + * We have defined a bit in the Volume Header's attributes to indicate when the + * unused nodes have been repaired. A newer newfs_hfs will set this bit. + * As will fsck_hfs when it repairs the unused nodes. + */ +int hfs_erase_unused_nodes(struct hfsmount *hfsmp) +{ + int result; + struct filefork *catalog; + int lockflags; + + if (hfsmp->vcbAtrb & kHFSUnusedNodeFixMask) + { + /* This volume has already been checked and repaired. */ + return 0; + } + + if ((hfsmp->localCreateDate < kHFSUnusedNodesFixDate)) + { + /* This volume is too old to have had the problem. */ + hfsmp->vcbAtrb |= kHFSUnusedNodeFixMask; + return 0; + } + + catalog = hfsmp->hfs_catalog_cp->c_datafork; + if (catalog->ff_size > catalog->ff_clumpsize) + { + /* The entire first clump must have been in use at some point. */ + hfsmp->vcbAtrb |= kHFSUnusedNodeFixMask; + return 0; + } + + /* + * If we get here, we need to zero out those unused nodes. + * + * We start a transaction and lock the catalog since we're going to be + * making on-disk changes. But note that BTZeroUnusedNodes doens't actually + * do its writing via the journal, because that would be too much I/O + * to fit in a transaction, and it's a pain to break it up into multiple + * transactions. (It behaves more like growing a B-tree would.) + */ + LFHFS_LOG(LEVEL_DEBUG, "hfs_erase_unused_nodes: updating volume %s.\n", hfsmp->vcbVN); + result = hfs_start_transaction(hfsmp); + if (result) + goto done; + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); + result = BTZeroUnusedNodes(catalog); +// vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_erase_unused_nodes"); + hfs_systemfile_unlock(hfsmp, lockflags); + hfs_end_transaction(hfsmp); + if (result == 0) + hfsmp->vcbAtrb |= kHFSUnusedNodeFixMask; + + LFHFS_LOG(LEVEL_DEBUG, "hfs_erase_unused_nodes: done updating volume %s.\n", hfsmp->vcbVN); + +done: + return result; +} + +/* + * On HFS Plus Volumes, there can be orphaned files or directories + * These are files or directories that were unlinked while busy. + * If the volume was not cleanly unmounted then some of these may + * have persisted and need to be removed. + */ +void +hfs_remove_orphans(struct hfsmount * hfsmp) +{ + BTreeIterator * iterator = NULL; + FSBufferDescriptor btdata; + struct HFSPlusCatalogFile filerec; + struct HFSPlusCatalogKey * keyp; + FCB *fcb; + ExtendedVCB *vcb; + char filename[32]; + char tempname[32]; + size_t namelen; + cat_cookie_t cookie; + int catlock = 0; + int catreserve = 0; + bool started_tr = false; + int lockflags; + int result; + int orphaned_files = 0; + int orphaned_dirs = 0; + + bzero(&cookie, sizeof(cookie)); + + if (hfsmp->hfs_flags & HFS_CLEANED_ORPHANS) + return; + + vcb = HFSTOVCB(hfsmp); + fcb = VTOF(hfsmp->hfs_catalog_vp); + + btdata.bufferAddress = &filerec; + btdata.itemSize = sizeof(filerec); + btdata.itemCount = 1; + + iterator = hfs_mallocz(sizeof(BTreeIterator)); + if (iterator == NULL) + return; + + /* Build a key to "temp" */ + keyp = (HFSPlusCatalogKey*)&iterator->key; + keyp->parentID = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; + keyp->nodeName.length = 4; /* "temp" */ + keyp->keyLength = kHFSPlusCatalogKeyMinimumLength + keyp->nodeName.length * 2; + keyp->nodeName.unicode[0] = 't'; + keyp->nodeName.unicode[1] = 'e'; + keyp->nodeName.unicode[2] = 'm'; + keyp->nodeName.unicode[3] = 'p'; + + /* + * Position the iterator just before the first real temp file/dir. + */ + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); + (void) BTSearchRecord(fcb, iterator, NULL, NULL, iterator); + hfs_systemfile_unlock(hfsmp, lockflags); + + /* Visit all the temp files/dirs in the HFS+ private directory. */ + for (;;) { + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); + result = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL); + hfs_systemfile_unlock(hfsmp, lockflags); + if (result) + break; + if (keyp->parentID != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) + break; + + (void) utf8_encodestr(keyp->nodeName.unicode, keyp->nodeName.length * 2, + (u_int8_t *)filename, &namelen, sizeof(filename), 0, UTF_ADD_NULL_TERM); + + (void) snprintf(tempname, sizeof(tempname), "%s%d", HFS_DELETE_PREFIX, filerec.fileID); + + /* + * Delete all files (and directories) named "tempxxx", + * where xxx is the file's cnid in decimal. + * + */ + if (bcmp(tempname, filename, namelen + 1) != 0) + continue; + + struct filefork dfork; + struct filefork rfork; + struct cnode cnode; + int mode = 0; + + bzero(&dfork, sizeof(dfork)); + bzero(&rfork, sizeof(rfork)); + bzero(&cnode, sizeof(cnode)); + + if (hfs_start_transaction(hfsmp) != 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: failed to start transaction\n"); + goto exit; + } + started_tr = true; + + /* + * Reserve some space in the Catalog file. + */ + if (cat_preflight(hfsmp, CAT_DELETE, &cookie) != 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: cat_preflight failed\n"); + goto exit; + } + catreserve = 1; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + catlock = 1; + + /* Build a fake cnode */ + cat_convertattr(hfsmp, (CatalogRecord *)&filerec, &cnode.c_attr, &dfork.ff_data, &rfork.ff_data); + cnode.c_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; + cnode.c_desc.cd_nameptr = (const u_int8_t *)filename; + cnode.c_desc.cd_namelen = namelen; + cnode.c_desc.cd_cnid = cnode.c_attr.ca_fileid; + cnode.c_blocks = dfork.ff_blocks + rfork.ff_blocks; + + /* Position iterator at previous entry */ + if (BTIterateRecord(fcb, kBTreePrevRecord, iterator, + NULL, NULL) != 0) { + break; + } + + /* Truncate the file to zero (both forks) */ + if (dfork.ff_blocks > 0) { + u_int64_t fsize; + + dfork.ff_cp = &cnode; + cnode.c_datafork = &dfork; + cnode.c_rsrcfork = NULL; + fsize = (u_int64_t)dfork.ff_blocks * (u_int64_t)HFSTOVCB(hfsmp)->blockSize; + while (fsize > 0) { + if (fsize > HFS_BIGFILE_SIZE) { + fsize -= HFS_BIGFILE_SIZE; + } else { + fsize = 0; + } + + if (TruncateFileC(vcb, (FCB*)&dfork, fsize, 1, 0, cnode.c_attr.ca_fileid, false) != 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: error truncating data fork!\n"); + break; + } + + // + // if we're iteratively truncating this file down, + // then end the transaction and start a new one so + // that no one transaction gets too big. + // + if (fsize > 0) { + /* Drop system file locks before starting + * another transaction to preserve lock order. + */ + hfs_systemfile_unlock(hfsmp, lockflags); + catlock = 0; + hfs_end_transaction(hfsmp); + + if (hfs_start_transaction(hfsmp) != 0) { + started_tr = false; + goto exit; + } + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + catlock = 1; + } + } + } + + if (rfork.ff_blocks > 0) { + rfork.ff_cp = &cnode; + cnode.c_datafork = NULL; + cnode.c_rsrcfork = &rfork; + if (TruncateFileC(vcb, (FCB*)&rfork, 0, 1, 1, cnode.c_attr.ca_fileid, false) != 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: error truncating rsrc fork!\n"); + break; + } + } + + // Deal with extended attributes + if (ISSET(cnode.c_attr.ca_recflags, kHFSHasAttributesMask)) { + // hfs_removeallattr uses its own transactions + hfs_systemfile_unlock(hfsmp, lockflags); + catlock = false; + hfs_end_transaction(hfsmp); + + hfs_removeallattr(hfsmp, cnode.c_attr.ca_fileid, &started_tr); + + if (!started_tr) { + if (hfs_start_transaction(hfsmp) != 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans:: failed to start transaction\n"); + goto exit; + } + started_tr = true; + } + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + } + + /* Remove the file or folder record from the Catalog */ + if (cat_delete(hfsmp, &cnode.c_desc, &cnode.c_attr) != 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: error deleting cat rec for id %d!\n", cnode.c_desc.cd_cnid); + hfs_systemfile_unlock(hfsmp, lockflags); + catlock = 0; + hfs_volupdate(hfsmp, VOL_UPDATE, 0); + break; + } + + mode = cnode.c_attr.ca_mode & S_IFMT; + + if (mode == S_IFDIR) { + orphaned_dirs++; + } + else { + orphaned_files++; + } + + /* Update parent and volume counts */ + hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries--; + if (mode == S_IFDIR) { + DEC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]); + } + + (void)cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS], + &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL); + + /* Drop locks and end the transaction */ + hfs_systemfile_unlock(hfsmp, lockflags); + cat_postflight(hfsmp, &cookie); + catlock = catreserve = 0; + + /* + Now that Catalog is unlocked, update the volume info, making + sure to differentiate between files and directories + */ + if (mode == S_IFDIR) { + hfs_volupdate(hfsmp, VOL_RMDIR, 0); + } + else{ + hfs_volupdate(hfsmp, VOL_RMFILE, 0); + } + + hfs_end_transaction(hfsmp); + started_tr = false; + } /* end for */ + +exit: + + if (orphaned_files > 0 || orphaned_dirs > 0) + LFHFS_LOG(LEVEL_ERROR, "hfs_remove_orphans: Removed %d orphaned / unlinked files and %d directories \n", orphaned_files, orphaned_dirs); + + if (catlock) { + hfs_systemfile_unlock(hfsmp, lockflags); + } + if (catreserve) { + cat_postflight(hfsmp, &cookie); + } + if (started_tr) { + hfs_end_transaction(hfsmp); + } + + hfs_free(iterator); + hfsmp->hfs_flags |= HFS_CLEANED_ORPHANS; +} + + +u_int32_t GetFileInfo(ExtendedVCB *vcb, const char *name, + struct cat_attr *fattr, struct cat_fork *forkinfo) { + + struct hfsmount * hfsmp; + struct cat_desc jdesc; + int lockflags; + int error; + + if (vcb->vcbSigWord != kHFSPlusSigWord) + return (0); + + hfsmp = VCBTOHFS(vcb); + + memset(&jdesc, 0, sizeof(struct cat_desc)); + jdesc.cd_parentcnid = kRootDirID; + jdesc.cd_nameptr = (const u_int8_t *)name; + jdesc.cd_namelen = strlen(name); + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + error = cat_lookup(hfsmp, &jdesc, 0, NULL, fattr, forkinfo, NULL); + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error == 0) { + return (fattr->ca_fileid); + } else if (hfsmp->hfs_flags & HFS_READ_ONLY) { + return (0); + } + + return (0); /* XXX what callers expect on an error */ +} + + +int hfs_early_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, + void *_args, off_t embeddedOffset, daddr64_t mdb_offset, + HFSMasterDirectoryBlock *mdbp) { + + JournalInfoBlock *jibp; + void *bp = NULL; + void *jinfo_bp = NULL; + int sectors_per_fsblock, arg_flags=0, arg_tbufsz=0; + int retval = 0; + uint32_t blksize = hfsmp->hfs_logical_block_size; + struct vnode *devvp; + struct hfs_mount_args *args = _args; + u_int32_t jib_flags; + u_int64_t jib_offset; + u_int64_t jib_size; + + devvp = hfsmp->hfs_devvp; + + if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS)) { + arg_flags = args->journal_flags; + arg_tbufsz = args->journal_tbuffer_size; + } + + sectors_per_fsblock = SWAP_BE32(vhp->blockSize) / blksize; + + // Read Journal Info + jinfo_bp = hfs_malloc(hfsmp->hfs_physical_block_size); + if (!jinfo_bp) { + goto cleanup_dev_name; + } + + uint32_t ujournalInfoBlock = SWAP_BE32(vhp->journalInfoBlock); + uint64_t u64JournalOffset = + (daddr64_t)((embeddedOffset/blksize) + ((u_int64_t)ujournalInfoBlock*sectors_per_fsblock)); + retval = raw_readwrite_read_mount(devvp, u64JournalOffset, hfsmp->hfs_physical_block_size, + jinfo_bp, hfsmp->hfs_physical_block_size, NULL, NULL); + + if (retval) { + goto cleanup_dev_name; + } + + jibp = jinfo_bp; + jib_flags = SWAP_BE32(jibp->flags); + jib_size = SWAP_BE64(jibp->size); + + if (!(jib_flags & kJIJournalInFSMask)) { + goto cleanup_dev_name; + } + + hfsmp->jvp = hfsmp->hfs_devvp; + jib_offset = SWAP_BE64(jibp->offset); + + // save this off for the hack-y check in hfs_remove() + hfsmp->jnl_start = jib_offset / SWAP_BE32(vhp->blockSize); + hfsmp->jnl_size = jib_size; + + if ((hfsmp->hfs_flags & HFS_READ_ONLY) && (hfsmp->hfs_mp->mnt_flag & MNT_ROOTFS) == 0) { + // if the file system is read-only, check if the journal is empty. + // if it is, then we can allow the mount. otherwise we have to + // return failure. + retval = journal_is_clean(hfsmp->jvp, + jib_offset + embeddedOffset, + jib_size, + devvp, + hfsmp->hfs_logical_block_size, + hfsmp->hfs_mp); + + hfsmp->jnl = NULL; + + hfs_free(jinfo_bp); + jinfo_bp = NULL; + + if (retval) { + LFHFS_LOG(LEVEL_ERROR, "hfs: early journal init: the volume is read-only and journal is dirty. Can not mount volume.\n"); + } + + goto cleanup_dev_name; + } + + if (jib_flags & kJIJournalNeedInitMask) { + LFHFS_LOG(LEVEL_ERROR, "hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n", + jib_offset + embeddedOffset, jib_size); + hfsmp->jnl = journal_create(hfsmp->jvp, + jib_offset + embeddedOffset, + jib_size, + devvp, + blksize, + arg_flags, + arg_tbufsz, + NULL, + hfsmp->hfs_mp, + hfsmp->hfs_mp); + + // no need to start a transaction here... if this were to fail + // we'd just re-init it on the next mount. + jib_flags &= ~kJIJournalNeedInitMask; + jibp->flags = SWAP_BE32(jib_flags); + raw_readwrite_write_mount(devvp, u64JournalOffset, hfsmp->hfs_physical_block_size, + jinfo_bp, hfsmp->hfs_physical_block_size, NULL, NULL); + jinfo_bp = NULL; + jibp = NULL; + } else { + LFHFS_LOG(LEVEL_DEFAULT, "hfs: Opening the journal (jib_offset 0x%llx size 0x%llx vhp_blksize %d)...\n", + jib_offset + embeddedOffset, + jib_size, SWAP_BE32(vhp->blockSize)); + + hfsmp->jnl = journal_open(hfsmp->jvp, + jib_offset + embeddedOffset, + jib_size, + devvp, + blksize, + arg_flags, + arg_tbufsz, + NULL, + hfsmp->hfs_mp, + hfsmp->hfs_mp); + + if (hfsmp->jnl && mdbp) { + // reload the mdb because it could have changed + // if the journal had to be replayed. + if (mdb_offset == 0) { + mdb_offset = (daddr64_t)((embeddedOffset / blksize) + HFS_PRI_SECTOR(blksize)); + } + + bp = hfs_malloc(hfsmp->hfs_physical_block_size); + if (!bp) { + goto cleanup_dev_name; + } + + uint64_t u64MDBOffset = HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys); + retval = raw_readwrite_read_mount(devvp, u64MDBOffset, hfsmp->hfs_physical_block_size, bp, hfsmp->hfs_physical_block_size, NULL, NULL); + + if (retval) { + LFHFS_LOG(LEVEL_ERROR, "hfs: failed to reload the mdb after opening the journal (retval %d)!\n", retval); + goto cleanup_dev_name; + } + + bcopy(bp + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size), mdbp, 512); + } + } + + // if we expected the journal to be there and we couldn't + // create it or open it then we have to bail out. + if (hfsmp->jnl == NULL) { + LFHFS_LOG(LEVEL_ERROR, "hfs: early jnl init: failed to open/create the journal (retval %d).\n", retval); + retval = EINVAL; + goto cleanup_dev_name; + } + +cleanup_dev_name: + if (bp) + hfs_free(bp); + + if (jinfo_bp) + hfs_free(jinfo_bp); + + return retval; +} + +// +// This function will go and re-locate the .journal_info_block and +// the .journal files in case they moved (which can happen if you +// run Norton SpeedDisk). If we fail to find either file we just +// disable journaling for this volume and return. We turn off the +// journaling bit in the vcb and assume it will get written to disk +// later (if it doesn't on the next mount we'd do the same thing +// again which is harmless). If we disable journaling we don't +// return an error so that the volume is still mountable. +// +// If the info we find for the .journal_info_block and .journal files +// isn't what we had stored, we re-set our cached info and proceed +// with opening the journal normally. +// +static int hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_args) { + JournalInfoBlock *jibp; + void *jinfo_bp = NULL; + int sectors_per_fsblock, arg_flags=0, arg_tbufsz=0; + int retval, write_jibp = 0, recreate_journal = 0; + struct vnode *devvp; + struct cat_attr jib_attr, jattr; + struct cat_fork jib_fork, jfork; + ExtendedVCB *vcb; + u_int32_t fid; + struct hfs_mount_args *args = _args; + u_int32_t jib_flags; + u_int64_t jib_offset; + u_int64_t jib_size; + + devvp = hfsmp->hfs_devvp; + vcb = HFSTOVCB(hfsmp); + + if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS)) { + if (args->journal_disable) { + return 0; + } + + arg_flags = args->journal_flags; + arg_tbufsz = args->journal_tbuffer_size; + } + + fid = GetFileInfo(vcb, ".journal_info_block", &jib_attr, &jib_fork); + if (fid == 0 || jib_fork.cf_extents[0].startBlock == 0 || jib_fork.cf_size == 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs: can't find the .journal_info_block! disabling journaling (start: %d).\n", + fid ? jib_fork.cf_extents[0].startBlock : 0); + vcb->vcbAtrb &= ~kHFSVolumeJournaledMask; + return 0; + } + hfsmp->hfs_jnlinfoblkid = fid; + + // make sure the journal_info_block begins where we think it should. + if (SWAP_BE32(vhp->journalInfoBlock) != jib_fork.cf_extents[0].startBlock) { + LFHFS_LOG(LEVEL_ERROR, "hfs: The journal_info_block moved (was: %d; is: %d). Fixing up\n", + SWAP_BE32(vhp->journalInfoBlock), jib_fork.cf_extents[0].startBlock); + + vcb->vcbJinfoBlock = jib_fork.cf_extents[0].startBlock; + vhp->journalInfoBlock = SWAP_BE32(jib_fork.cf_extents[0].startBlock); + recreate_journal = 1; + } + + + sectors_per_fsblock = SWAP_BE32(vhp->blockSize) / hfsmp->hfs_logical_block_size; + + // Read journal info + jinfo_bp = hfs_malloc(hfsmp->hfs_physical_block_size); + if (!jinfo_bp) { + LFHFS_LOG(LEVEL_ERROR, "hfs: can't alloc memory.\n"); + vcb->vcbAtrb &= ~kHFSVolumeJournaledMask; + return 0; + } + + uint64_t u64JournalOffset = + (vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size + + ((u_int64_t)SWAP_BE32(vhp->journalInfoBlock)*sectors_per_fsblock)); + + retval = raw_readwrite_read_mount(devvp, u64JournalOffset, hfsmp->hfs_physical_block_size, jinfo_bp, hfsmp->hfs_physical_block_size, NULL, NULL); + + if (retval) { + if (jinfo_bp) { + hfs_free(jinfo_bp); + } + LFHFS_LOG(LEVEL_ERROR, "hfs: can't read journal info block. disabling journaling.\n"); + vcb->vcbAtrb &= ~kHFSVolumeJournaledMask; + return 0; + } + + jibp = jinfo_bp; + jib_flags = SWAP_BE32(jibp->flags); + jib_offset = SWAP_BE64(jibp->offset); + jib_size = SWAP_BE64(jibp->size); + + fid = GetFileInfo(vcb, ".journal", &jattr, &jfork); + if (fid == 0 || jfork.cf_extents[0].startBlock == 0 || jfork.cf_size == 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs: can't find the journal file! disabling journaling (start: %d)\n", + fid ? jfork.cf_extents[0].startBlock : 0); + hfs_free(jinfo_bp); + vcb->vcbAtrb &= ~kHFSVolumeJournaledMask; + return 0; + } + hfsmp->hfs_jnlfileid = fid; + + // make sure the journal file begins where we think it should. + if ((jib_flags & kJIJournalInFSMask) && (jib_offset / (u_int64_t)vcb->blockSize) != jfork.cf_extents[0].startBlock) { + LFHFS_LOG(LEVEL_ERROR, "hfs: The journal file moved (was: %lld; is: %d). Fixing up\n", + (jib_offset / (u_int64_t)vcb->blockSize), jfork.cf_extents[0].startBlock); + + jib_offset = (u_int64_t)jfork.cf_extents[0].startBlock * (u_int64_t)vcb->blockSize; + write_jibp = 1; + recreate_journal = 1; + } + + // check the size of the journal file. + if (jib_size != (u_int64_t)jfork.cf_extents[0].blockCount*vcb->blockSize) { + LFHFS_LOG(LEVEL_ERROR, "hfs: The journal file changed size! (was %lld; is %lld). Fixing up.\n", + jib_size, (u_int64_t)jfork.cf_extents[0].blockCount*vcb->blockSize); + + jib_size = (u_int64_t)jfork.cf_extents[0].blockCount * vcb->blockSize; + write_jibp = 1; + recreate_journal = 1; + } + + if (!(jib_flags & kJIJournalInFSMask)) { + LFHFS_LOG(LEVEL_ERROR, "hfs: No support for journal on a different volume\n"); + hfs_free(jinfo_bp); + vcb->vcbAtrb &= ~kHFSVolumeJournaledMask; + return 0; + } + + hfsmp->jvp = hfsmp->hfs_devvp; + jib_offset += (off_t)vcb->hfsPlusIOPosOffset; + + // save this off for the hack-y check in hfs_remove() + hfsmp->jnl_start = jib_offset / SWAP_BE32(vhp->blockSize); + hfsmp->jnl_size = jib_size; + + if ((hfsmp->hfs_flags & HFS_READ_ONLY) && (hfsmp->hfs_mp->mnt_flag & MNT_ROOTFS) == 0) { + // if the file system is read-only, check if the journal is empty. + // if it is, then we can allow the mount. otherwise we have to + // return failure. + retval = journal_is_clean(hfsmp->jvp, + jib_offset, + jib_size, + devvp, + hfsmp->hfs_logical_block_size, + hfsmp->hfs_mp); + + hfsmp->jnl = NULL; + + hfs_free(jinfo_bp); + + if (retval) { + LFHFS_LOG(LEVEL_ERROR, "hfs_late_journal_init: volume on is read-only and journal is dirty. Can not mount volume.\n"); + } + + return retval; + } + + if ((jib_flags & kJIJournalNeedInitMask) || recreate_journal) { + LFHFS_LOG(LEVEL_ERROR, "hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n", + jib_offset, jib_size); + hfsmp->jnl = journal_create(hfsmp->jvp, + jib_offset, + jib_size, + devvp, + hfsmp->hfs_logical_block_size, + arg_flags, + arg_tbufsz, + NULL, + hfsmp->hfs_mp, + hfsmp->hfs_mp); + + // no need to start a transaction here... if this were to fail + // we'd just re-init it on the next mount. + jib_flags &= ~kJIJournalNeedInitMask; + write_jibp = 1; + + } else { + // + // if we weren't the last person to mount this volume + // then we need to throw away the journal because it + // is likely that someone else mucked with the disk. + // if the journal is empty this is no big deal. if the + // disk is dirty this prevents us from replaying the + // journal over top of changes that someone else made. + // + arg_flags |= JOURNAL_RESET; + + //printf("hfs: Opening the journal (joffset 0x%llx sz 0x%llx vhp_blksize %d)...\n", + // jib_offset, + // jib_size, SWAP_BE32(vhp->blockSize)); + + hfsmp->jnl = journal_open(hfsmp->jvp, + jib_offset, + jib_size, + devvp, + hfsmp->hfs_logical_block_size, + arg_flags, + arg_tbufsz, + NULL, + hfsmp->hfs_mp, + hfsmp->hfs_mp); + } + + + if (write_jibp) { + jibp->flags = SWAP_BE32(jib_flags); + jibp->offset = SWAP_BE64(jib_offset); + jibp->size = SWAP_BE64(jib_size); + + uint64_t uActualWrite = 0; + retval = raw_readwrite_write_mount(devvp, u64JournalOffset, hfsmp->hfs_physical_block_size, jinfo_bp, hfsmp->hfs_physical_block_size, &uActualWrite, NULL); + } + + if (jinfo_bp) { + hfs_free(jinfo_bp); + } + + // if we expected the journal to be there and we couldn't + // create it or open it then we have to bail out. + if (hfsmp->jnl == NULL) { + LFHFS_LOG(LEVEL_ERROR, "hfs: late jnl init: failed to open/create the journal (retval %d).\n", retval); + return EINVAL; + } + + return 0; +} + diff --git a/livefiles_hfs_plugin/lf_hfs_vfsutils.h b/livefiles_hfs_plugin/lf_hfs_vfsutils.h new file mode 100644 index 0000000..2eec034 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_vfsutils.h @@ -0,0 +1,56 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_vfsutils.h + * livefiles_hfs + * + * Created by Yakov Ben Zaken on 20/03/2018. + */ + +#ifndef lf_hfs_vfsutils_h +#define lf_hfs_vfsutils_h + +#include "lf_hfs.h" + +u_int32_t BestBlockSizeFit(u_int32_t allocationBlockSize, u_int32_t blockSizeLimit, u_int32_t baseMultiple); +int hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, off_t embeddedOffset, u_int64_t disksize, bool bFailForDirty); +int hfs_CollectBtreeStats(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, off_t embeddedOffset, void *args); +int hfs_ValidateHFSPlusVolumeHeader(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp); +int hfs_start_transaction(struct hfsmount *hfsmp); +int hfs_end_transaction(struct hfsmount *hfsmp); +void* hfs_malloc( size_t size ); +void hfs_free( void* ptr ); +void* hfs_mallocz( size_t size); +int hfsUnmount( register struct hfsmount *hfsmp); +void hfs_lock_mount(struct hfsmount *hfsmp); +void hfs_unlock_mount(struct hfsmount *hfsmp); +int hfs_systemfile_lock(struct hfsmount *hfsmp, int flags, enum hfs_locktype locktype); +void hfs_systemfile_unlock(struct hfsmount *hfsmp, int flags); +u_int32_t hfs_freeblks(struct hfsmount * hfsmp, int wantreserve); +short MacToVFSError(OSErr err); + +void hfs_reldirhint(struct cnode *dcp, directoryhint_t * relhint); +void hfs_insertdirhint(struct cnode *dcp, directoryhint_t * hint); +void hfs_reldirhints(struct cnode *dcp, int stale_hints_only); + +directoryhint_t* hfs_getdirhint(struct cnode *dcp, int index, int detach); + +int hfs_systemfile_lock(struct hfsmount *hfsmp, int flags, enum hfs_locktype locktype); +void hfs_systemfile_unlock(struct hfsmount *hfsmp, int flags); +bool overflow_extents(struct filefork *fp); +int hfs_namecmp(const u_int8_t *str1, size_t len1, const u_int8_t *str2, size_t len2); +int hfs_strstr(const u_int8_t *str1, size_t len1, const u_int8_t *str2, size_t len2); +int hfs_apendixcmp(const u_int8_t *str1, size_t len1, const u_int8_t *str2, size_t len2); +void hfs_remove_orphans(struct hfsmount * hfsmp); +int hfs_erase_unused_nodes(struct hfsmount *hfsmp); + +int hfs_lock_global (struct hfsmount *hfsmp, enum hfs_locktype locktype); +void hfs_unlock_global (struct hfsmount *hfsmp); + + +// Journaing +int hfs_early_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, + void *_args, off_t embeddedOffset, daddr64_t mdb_offset, + HFSMasterDirectoryBlock *mdbp); +errno_t hfs_flush(struct hfsmount *hfsmp, hfs_flush_mode_t mode); + +#endif /* lf_hfs_vfsutils_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_vnode.c b/livefiles_hfs_plugin/lf_hfs_vnode.c new file mode 100644 index 0000000..292a7a2 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_vnode.c @@ -0,0 +1,173 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_vnode.c + * livefiles_hfs + * + * Created by Or Haimovich on 20/3/18. + */ + +#include "lf_hfs_vnode.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_generic_buf.h" +#include "lf_hfs_fileops_handler.h" + +int VTtoUVFS_tab[16] = +{ + VNON, + /* 1 - 5 */ + UVFS_FA_TYPE_FILE, UVFS_FA_TYPE_DIR, VNON, VNON, UVFS_FA_TYPE_SYMLINK, + /* 6 - 10 */ + VNON, VNON, VNON, VNON, VNON +}; + +enum vtype iftovt_tab[16] = +{ + VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, + VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, +}; + +int uvfsToVtype_tab[4] = +{ + VNON,VREG,VDIR,VLNK, +}; + +mode_t vttoif_tab[9] = +{ + 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, + S_IFSOCK, S_IFIFO, S_IFMT, +}; + +errno_t vnode_initialize(uint32_t size, void *data, vnode_t *vpp) +{ + memset(*vpp,0,sizeof(struct vnode)); + (*vpp)->cnode = NULL; + assert(size == sizeof((*vpp)->sFSParams)); + memcpy((void *) &(*vpp)->sFSParams,data,size); + + if ((*vpp)->sFSParams.vnfs_vtype == VDIR) + { + (*vpp)->sExtraData.sDirData.uDirVersion = 1; + } + return 0; +} + +errno_t vnode_create(uint32_t size, void *data, vnode_t *vpp) +{ + *vpp = hfs_malloc(sizeof(struct vnode)); + if (*vpp == NULL) + { + return ENOMEM; + } + + return (vnode_initialize(size, data, vpp)); +} + +void vnode_rele(vnode_t vp) +{ + if (vp) { + lf_hfs_generic_buf_cache_LockBufCache(); + lf_hfs_generic_buf_cache_remove_vnode(vp); + lf_hfs_generic_buf_cache_UnLockBufCache(); + hfs_free(vp); + } + vp = NULL; +} + +mount_t vnode_mount(vnode_t vp) +{ + return (vp->sFSParams.vnfs_mp); +} + +int vnode_issystem(vnode_t vp) +{ + return (vp->sFSParams.vnfs_marksystem); +} + +int vnode_isreg(vnode_t vp) +{ + return (vp->sFSParams.vnfs_vtype == VREG); +} + +int vnode_isdir(vnode_t vp) +{ + return (vp->sFSParams.vnfs_vtype == VDIR); +} + +int vnode_islnk(vnode_t vp) +{ + return (vp->sFSParams.vnfs_vtype == VLNK); +} + +/*! + @function vnode_update_identity + case: + VNODE_UPDATE_PARENT: set parent. + VNODE_UPDATE_NAME: set name. + VNODE_UPDATE_CACHE: flush cache entries for hard links associated with this file. + + + */ +void vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags) +{ + if (flags & VNODE_UPDATE_PARENT) + { + vp->sFSParams.vnfs_dvp = dvp; + } + + if (flags & VNODE_UPDATE_NAME) + { + if (!vp->sFSParams.vnfs_cnp) { + vp->sFSParams.vnfs_cnp = hfs_malloc(sizeof(struct componentname)); + if (vp->sFSParams.vnfs_cnp == NULL) { + LFHFS_LOG(LEVEL_ERROR, "vnode_update_identity: failed to malloc vnfs_cnp\n"); + assert(0); + } + } + vp->sFSParams.vnfs_cnp->cn_namelen = name_len; + if (vp->sFSParams.vnfs_cnp->cn_nameptr) { + hfs_free(vp->sFSParams.vnfs_cnp->cn_nameptr); + } + vp->sFSParams.vnfs_cnp->cn_nameptr = lf_hfs_utils_allocate_and_copy_string( (char*) name, name_len ); + vp->sFSParams.vnfs_cnp->cn_hash = name_hashval; + } +} + +void vnode_GetAttrInternal (vnode_t vp, UVFSFileAttributes *psOutAttr ) +{ + struct cnode *cp = VTOC(vp); + enum vtype v_type; + + memset( psOutAttr, 0, sizeof(UVFSFileAttributes) ); + + psOutAttr->fa_validmask = VALID_OUT_ATTR_MASK; + + psOutAttr->fa_gid = cp->c_gid; + psOutAttr->fa_uid = cp->c_uid; + psOutAttr->fa_mode = cp->c_mode & ALL_UVFS_MODES; + + v_type = vp->sFSParams.vnfs_vtype; + psOutAttr->fa_type = VTOUVFS(v_type); + + psOutAttr->fa_atime.tv_sec = cp->c_atime; + psOutAttr->fa_ctime.tv_sec = cp->c_ctime; + psOutAttr->fa_mtime.tv_sec = cp->c_mtime; + psOutAttr->fa_birthtime.tv_sec = cp->c_btime; + + psOutAttr->fa_fileid = cp->c_fileid; + psOutAttr->fa_parentid = cp->c_parentcnid; + psOutAttr->fa_bsd_flags = cp->c_bsdflags; + + if (v_type == VDIR) + { + psOutAttr->fa_allocsize = 0; + psOutAttr->fa_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE; + psOutAttr->fa_nlink = cp->c_entries + 2; + } + else + { + psOutAttr->fa_allocsize = VCTOF(vp, cp)->ff_blocks * VTOHFS(vp)->blockSize; + psOutAttr->fa_size = VCTOF(vp, cp)->ff_size; + psOutAttr->fa_nlink = (cp->c_flag & C_HARDLINK)? cp->c_linkcount : 1; + } +} diff --git a/livefiles_hfs_plugin/lf_hfs_vnode.h b/livefiles_hfs_plugin/lf_hfs_vnode.h new file mode 100644 index 0000000..ece2e07 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_vnode.h @@ -0,0 +1,246 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_vnode.h + * livefiles_hfs + * + * Created by Or Haimovich on 18/3/18. + */ + +#ifndef lf_hfs_vnode_h +#define lf_hfs_vnode_h + +#include + +#include "lf_hfs_common.h" + +/* + * Vnode types. VNON means no type. + */ +enum vtype { + /* 0 */ + VNON, + /* 1 - 5 */ + VREG, VDIR, VBLK, VCHR, VLNK, + /* 6 - 10 */ + VSOCK, VFIFO, VBAD, VSTR, VCPLX +}; + +extern int VTtoUVFS_tab[]; + +#define VTTOUVFS(vt) (VTtoUVFS_tab[vt]) +#define IS_ROOT(vp) (vp->sFSParams.vnfs_markroot) +#define IS_DIR(vp) (vp->sFSParams.vnfs_vtype == VDIR) +#define IS_LNK(vp) (vp->sFSParams.vnfs_vtype == VLNK) + + +/* + * Convert between vnode types and inode formats (since POSIX.1 + * defines mode word of stat structure in terms of inode formats). + */ +struct componentname { + /* + * Arguments to lookup. + */ + uint32_t cn_nameiop; /* lookup operation */ + uint32_t cn_flags; /* flags (see below) */ + + /* + * Shared between lookup and commit routines. + */ + char *cn_pnbuf; /* pathname buffer */ + int cn_pnlen; /* length of allocated buffer */ + char *cn_nameptr; /* pointer to looked up name */ + int cn_namelen; /* length of looked up component */ + uint32_t cn_hash; /* hash value of looked up name */ + uint32_t cn_consume; /* chars to consume in lookup() */ +}; + +/* The following structure specifies a vnode for creation */ +struct vnode_fsparam { + struct mount *vnfs_mp; /* mount point to which this vnode_t is part of */ + enum vtype vnfs_vtype; /* vnode type */ + const char *vnfs_str; /* File system Debug aid */ + struct vnode *vnfs_dvp; /* The parent vnode */ + void *vnfs_fsnode; /* inode */ +// int (**vnfs_vops)(void *); /* vnode dispatch table */ + int vnfs_markroot; /* is this a root vnode in FS (not a system wide one) */ + int vnfs_marksystem; /* is a system vnode */ + dev_t vnfs_rdev; /* dev_t for block or char vnodes */ + off_t vnfs_filesize; /* that way no need for getattr in UBC */ + struct componentname *vnfs_cnp; /* component name to add to namecache */ + uint32_t vnfs_flags; /* flags */ +}; + +typedef struct +{ + uint64_t uDirVersion; +} DirData_s; + +typedef struct vnode +{ + uint32_t uValidNodeMagic1; + + struct hfsmount *mount; + bool is_rsrc; + struct cnode *cnode; + struct vnode_fsparam sFSParams; + FileSystemRecord_s* psFSRecord; + bool bIsMountVnode; + + union + { + DirData_s sDirData; + } sExtraData; + + uint32_t uValidNodeMagic2; + +} *vnode_t; + +typedef struct mount +{ + struct hfsmount* psHfsmount; + int mnt_flag; +} *mount_t; + +struct vnode_attr { + /* bitfields */ + uint64_t va_supported; + uint64_t va_active; + /* + * Control flags. The low 16 bits are reserved for the + * ioflags being passed for truncation operations. + */ + int va_vaflags; + + /* traditional stat(2) parameter fields */ + dev_t va_rdev; /* device id (device nodes only) */ + uint64_t va_nlink; /* number of references to this file */ + uint64_t va_total_size; /* size in bytes of all forks */ + uint64_t va_total_alloc; /* disk space used by all forks */ + uint64_t va_data_size; /* size in bytes of the fork managed by current vnode */ + uint64_t va_data_alloc; /* disk space used by the fork managed by current vnode */ + uint32_t va_iosize; /* optimal I/O blocksize */ + /* file security information */ + uid_t va_uid; /* owner UID */ + gid_t va_gid; /* owner GID */ + mode_t va_mode; /* posix permissions */ + uint32_t va_flags; /* file flags */ + struct kauth_acl *va_acl; /* access control list */ + /* timestamps */ + struct timespec va_create_time; /* time of creation */ + struct timespec va_access_time; /* time of last access */ + struct timespec va_modify_time; /* time of last data modification */ + struct timespec va_change_time; /* time of last metadata change */ + struct timespec va_backup_time; /* time of last backup */ + + /* file parameters */ + uint64_t va_fileid; /* file unique ID in filesystem */ + uint64_t va_linkid; /* file link unique ID */ + uint64_t va_parentid; /* parent ID */ + uint32_t va_fsid; /* filesystem ID */ + uint64_t va_filerev; /* file revision counter */ /* XXX */ + uint32_t va_gen; /* file generation count */ /* XXX - relationship of + * these two? */ + /* misc parameters */ + uint32_t va_encoding; /* filename encoding script */ + enum vtype va_type; /* file type */ + char * va_name; /* Name for ATTR_CMN_NAME; MAXPATHLEN bytes */ + guid_t va_uuuid; /* file owner UUID */ + guid_t va_guuid; /* file group UUID */ + + /* Meaningful for directories only */ + uint64_t va_nchildren; /* Number of items in a directory */ + uint64_t va_dirlinkcount; /* Real references to dir (i.e. excluding "." and ".." refs) */ + + struct kauth_acl *va_base_acl; + + struct timespec va_addedtime; /* timestamp when item was added to parent directory */ + + /* Data Protection fields */ + uint32_t va_dataprotect_class; /* class specified for this file if it didn't exist */ + uint32_t va_dataprotect_flags; /* flags from NP open(2) to the filesystem */ + /* Document revision tracking */ + uint32_t va_document_id; + /* Fields for Bulk args */ + uint32_t va_devid; /* devid of filesystem */ + uint32_t va_objtype; /* type of object */ + uint32_t va_objtag; /* vnode tag of filesystem */ + uint32_t va_user_access; /* access for user */ + uint8_t va_finderinfo[32]; /* Finder Info */ + uint64_t va_rsrc_length; /* Resource Fork length */ + uint64_t va_rsrc_alloc; /* Resource Fork allocation size */ + fsid_t va_fsid64; /* fsid, of the correct type */ + uint32_t va_write_gencount; /* counter that increments each time the file changes */ + uint64_t va_private_size; /* If the file were deleted, how many bytes would be freed immediately */ + /* add new fields here only */ +}; + +/* + * Convert between vnode types and inode formats (since POSIX.1 + * defines mode word of stat structure in terms of inode formats). + */ +extern enum vtype iftovt_tab[]; +#define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12]) + +extern int VTtoUVFS_tab[]; +extern int uvfsToVtype_tab[]; +extern mode_t vttoif_tab[]; + +#define VTOUVFS(type) (VTtoUVFS_tab[type]) +#define UVFSTOV(type) (uvfsToVtype_tab[type]) +#define MAKEIMODE(indx) (vttoif_tab[indx]) + +#define VNODE_UPDATE_PARENT 0x01 +#define VNODE_UPDATE_NAME 0x02 +#define VNODE_UPDATE_CACHE 0x04 + +#define VNODE_REMOVE_NODELETEBUSY 0x0001 /* Don't delete busy files (Carbon) */ +#define VNODE_REMOVE_SKIP_NAMESPACE_EVENT 0x0002 /* Do not upcall to userland handlers */ +#define VNODE_REMOVE_NO_AUDIT_PATH 0x0004 /* Do not audit the path */ + +#define VNOVAL (-1) + +/* + * Flags for ioflag. + */ +#define IO_UNIT 0x0001 /* do I/O as atomic unit */ +#define IO_APPEND 0x0002 /* append write to end */ +#define IO_SYNC 0x0004 /* do I/O synchronously */ +#define IO_NODELOCKED 0x0008 /* underlying node already locked */ +#define IO_NDELAY 0x0010 /* FNDELAY flag set in file table */ +#define IO_NOZEROFILL 0x0020 /* F_SETSIZE fcntl uses to prevent zero filling */ +//#ifdef XNU_KERNEL_PRIVATE +#define IO_REVOKE IO_NOZEROFILL /* revoked close for tty, will Not be used in conjunction */ +//#endif /* XNU_KERNEL_PRIVATE */ +#define IO_TAILZEROFILL 0x0040 /* zero fills at the tail of write */ +#define IO_HEADZEROFILL 0x0080 /* zero fills at the head of write */ +#define IO_NOZEROVALID 0x0100 /* do not zero fill if valid page */ +#define IO_NOZERODIRTY 0x0200 /* do not zero fill if page is dirty */ +#define IO_CLOSE 0x0400 /* I/O issued from close path */ +#define IO_NOCACHE 0x0800 /* same effect as VNOCACHE_DATA, but only for this 1 I/O */ +#define IO_RAOFF 0x1000 /* same effect as VRAOFF, but only for this 1 I/O */ +#define IO_DEFWRITE 0x2000 /* defer write if vfs.defwrite is set */ +#define IO_PASSIVE 0x4000 /* this I/O is marked as background I/O so it won't throttle Throttleable I/O */ +#define IO_BACKGROUND IO_PASSIVE /* used for backward compatibility. to be removed after IO_BACKGROUND is no longer +* used by DiskImages in-kernel mode */ +#define IO_NOAUTH 0x8000 /* No authorization checks. */ +#define IO_NODIRECT 0x10000 /* don't use direct synchronous writes if IO_NOCACHE is specified */ +#define IO_ENCRYPTED 0x20000 /* Retrieve encrypted blocks from the filesystem */ +#define IO_RETURN_ON_THROTTLE 0x40000 +#define IO_SINGLE_WRITER 0x80000 +#define IO_SYSCALL_DISPATCH 0x100000 /* I/O was originated from a file table syscall */ +#define IO_SWAP_DISPATCH 0x200000 /* I/O was originated from the swap layer */ +#define IO_SKIP_ENCRYPTION 0x400000 /* Skips en(de)cryption on the IO. Must be initiated from kernel */ +#define IO_EVTONLY 0x800000 /* the i/o is being done on an fd that's marked O_EVTONLY */ + +errno_t vnode_create(uint32_t size, void *data, vnode_t *vpp); +errno_t vnode_initialize(uint32_t size, void *data, vnode_t *vpp); +void vnode_rele(vnode_t vp); +mount_t vnode_mount(vnode_t vp); +int vnode_issystem(vnode_t vp); +int vnode_isreg(vnode_t vp); +int vnode_isdir(vnode_t vp); +int vnode_islnk(vnode_t vp); +void vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags); +void vnode_GetAttrInternal (vnode_t vp, UVFSFileAttributes *psOutAttr ); +#endif /* lf_hfs_vnode_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_vnops.c b/livefiles_hfs_plugin/lf_hfs_vnops.c new file mode 100644 index 0000000..d040e97 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_vnops.c @@ -0,0 +1,3173 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_vnops.c + * livefiles_hfs + * + * Created by Or Haimovich on 20/3/18. + */ + +#include +#include + +#include "lf_hfs_vnops.h" +#include "lf_hfs.h" +#include "lf_hfs_catalog.h" +#include "lf_hfs_dirops_handler.h" +#include "lf_hfs_fileops_handler.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_attrlist.h" +#include "lf_hfs_btree.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_readwrite_ops.h" +#include "lf_hfs_generic_buf.h" +#include "lf_hfs_endian.h" +#include +#include +#include "lf_hfs_link.h" +#include "lf_hfs_journal.h" +#include "lf_hfs_chash.h" + +#define DOT_DIR_SIZE (UVFS_DIRENTRY_RECLEN(1)) +#define DOT_X2_DIR_SIZE (UVFS_DIRENTRY_RECLEN(2)) + + +/* Options for hfs_removedir and hfs_removefile */ +#define HFSRM_SKIP_RESERVE 0x01 +#define _PATH_RSRCFORKSPEC "/..namedfork/rsrc" + +void +replace_desc(struct cnode *cp, struct cat_desc *cdp) +{ + // fixes 4348457 and 4463138 + if (&cp->c_desc == cdp) { + return; + } + + /* First release allocated name buffer */ + if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) { + const u_int8_t *name = cp->c_desc.cd_nameptr; + + cp->c_desc.cd_nameptr = 0; + cp->c_desc.cd_namelen = 0; + cp->c_desc.cd_flags &= ~CD_HASBUF; + hfs_free((void*)name); + } + bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc)); + + /* Cnode now owns the name buffer */ + cdp->cd_nameptr = NULL; + cdp->cd_namelen = 0; + cdp->cd_flags &= ~CD_HASBUF; +} + +static void SynthesizeDotAndDotX2(u_int64_t uCnid, void* puBuff, bool bIsDot, bool bIsLastEntry) +{ + UVFSDirEntry* psDotEntry = (UVFSDirEntry*)puBuff; + uint8_t uNameLen = bIsDot? 1: 2; + memset( psDotEntry, 0, UVFS_DIRENTRY_RECLEN(uNameLen)); + + psDotEntry->de_fileid = uCnid; + psDotEntry->de_filetype = UVFS_FA_TYPE_DIR; + psDotEntry->de_reclen = bIsLastEntry ? 0 : UVFS_DIRENTRY_RECLEN(uNameLen); + psDotEntry->de_nextcookie = uNameLen; + psDotEntry->de_namelen = uNameLen; + uint8_t* puNameBuf = (uint8_t*)psDotEntry->de_name; + puNameBuf[0] = '.'; + if ( bIsDot ) + { + puNameBuf[1] = '\0'; + } + else + { + puNameBuf[1] = '.'; + puNameBuf[2] = '\0'; + } +} + +static int SyntisizeEntries(uint64_t* puOffset, ReadDirBuff_s* psReadDirBuffer, int iIsExtended, u_int64_t uCnid, u_int64_t uParentCnid, UVFSDirEntry** ppsDotDotEntry) +{ + int iError = 0; + void* pvBuff = NULL; + if (!iIsExtended) + { + //Curently not supporting nonextended ReadDir + return ENOTSUP; + } + + if (DOT_DIR_SIZE > psReadDirBuffer->uBufferResid) + { + goto exit; + } + + pvBuff = hfs_malloc(DOT_DIR_SIZE); + if (pvBuff == NULL) + { + LFHFS_LOG(LEVEL_ERROR, "SyntisizeEntries: Failed to allocate buffer for DOT entry\n"); + return ENOMEM; + } + + if (*puOffset == 0) + { + bool bIsEnoughRoomForAll = (DOT_DIR_SIZE + DOT_X2_DIR_SIZE > psReadDirBuffer->uBufferResid); + SynthesizeDotAndDotX2(uCnid, pvBuff, true, bIsEnoughRoomForAll); + memcpy(psReadDirBuffer->pvBuffer + READDIR_BUF_OFFSET(psReadDirBuffer) , pvBuff, DOT_DIR_SIZE); + (*puOffset)++; + psReadDirBuffer->uBufferResid -= DOT_DIR_SIZE; + } + + if (DOT_X2_DIR_SIZE > psReadDirBuffer->uBufferResid) + { + goto exit; + } + + hfs_free(pvBuff); + pvBuff = hfs_malloc(DOT_X2_DIR_SIZE); + if (pvBuff == NULL) + { + LFHFS_LOG(LEVEL_ERROR, "SyntisizeEntries: Failed to allocate buffer for DOTx2 entry\n"); + return ENOMEM; + } + + if (*puOffset == 1) + { + SynthesizeDotAndDotX2(uParentCnid, pvBuff, false, false); + memcpy(psReadDirBuffer->pvBuffer + READDIR_BUF_OFFSET(psReadDirBuffer), pvBuff, DOT_X2_DIR_SIZE); + *ppsDotDotEntry = (UVFSDirEntry*) (psReadDirBuffer->pvBuffer + READDIR_BUF_OFFSET(psReadDirBuffer)); + (*puOffset)++; + psReadDirBuffer->uBufferResid -= DOT_X2_DIR_SIZE; + } + +exit: + if (pvBuff) + hfs_free(pvBuff); + return iError; +} + +/* + * hfs_vnop_readdir reads directory entries into the buffer pointed + * to by uio, in a filesystem independent format. Up to uio_resid + * bytes of data can be transferred. The data in the buffer is a + * series of packed dirent structures where each one contains the + * following entries: + * + * u_int32_t d_fileno; // file number of entry + * u_int16_t d_reclen; // length of this record + * u_int8_t d_type; // file type + * u_int8_t d_namlen; // length of string in d_name + * char d_name[MAXNAMELEN+1]; // null terminated file name + * + * The current position (uio_offset) refers to the next block of + * entries. The offset can only be set to a value previously + * returned by hfs_vnop_readdir or zero. This offset does not have + * to match the number of bytes returned (in uio_resid). + * + * In fact, the offset used by HFS is essentially an index (26 bits) + * with a tag (6 bits). The tag is for associating the next request + * with the current request. This enables us to have multiple threads + * reading the directory while the directory is also being modified. + * + * Each tag/index pair is tied to a unique directory hint. The hint + * contains information (filename) needed to build the catalog b-tree + * key for finding the next set of entries. + * + * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED), + * do NOT synthesize entries for "." and "..". + */ +int +hfs_vnop_readdir(vnode_t vp, int *eofflag, int *numdirent, ReadDirBuff_s* psReadDirBuffer, uint64_t puCookie, int flags) +{ + struct cnode *cp = NULL; + struct hfsmount *hfsmp = VTOHFS(vp); + directoryhint_t *dirhint = NULL; + directoryhint_t localhint; + bool bLocalEOFflag = false; + int error = 0; + uint64_t offset; + user_size_t user_original_resid = psReadDirBuffer->uBufferResid; + int items = 0; + cnid_t cnid_hint = 0; + int bump_valence = 0; + *numdirent = 0; + uint64_t startoffset = offset = puCookie; + bool extended = (flags & VNODE_READDIR_EXTENDED); + bool nfs_cookies = extended && (flags & VNODE_READDIR_REQSEEKOFF); + + if (psReadDirBuffer->pvBuffer == NULL || psReadDirBuffer->uBufferResid < sizeof(UVFSDirEntry)) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_readdir: readDir input is not valid\n"); + return EINVAL; + } + + /* Note that the dirhint calls require an exclusive lock. */ + if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_readdir: Failed to lock vnode\n"); + return error; + } + cp = VTOC(vp); + + /* Pick up cnid hint (if any). */ + if (nfs_cookies) + { + cnid_hint = (cnid_t)(offset >> 32); + offset &= 0x00000000ffffffffLL; + if (cnid_hint == INT_MAX) + { /* searching pass the last item */ + bLocalEOFflag = true; + goto out; + } + } + + /* + * Synthesize entries for "." and "..", unless the directory has + * been deleted, but not closed yet (lazy delete in progress). + */ + UVFSDirEntry* psDotDotEntry = NULL; + if (!(cp->c_flag & C_DELETED)) + { + if ( (error = SyntisizeEntries(&offset, psReadDirBuffer, extended, cp->c_cnid, cp->c_parentcnid, &psDotDotEntry)) != 0 ) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_readdir: Failed to syntisize dot/dotdot entries\n"); + goto out; + } + } + + /* Convert offset into a catalog directory index. */ + int index = (offset & HFS_INDEX_MASK) - 2; + unsigned int tag = (unsigned int) (offset & ~HFS_INDEX_MASK); + + /* Lock catalog during cat_findname and cat_getdirentries. */ + int lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + + /* When called from NFS, try and resolve a cnid hint. */ + if (nfs_cookies && cnid_hint != 0) + { + if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) + { + if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) + { + localhint.dh_index = index - 1; + localhint.dh_time = 0; + bzero(&localhint.dh_link, sizeof(localhint.dh_link)); + dirhint = &localhint; /* don't forget to release the descriptor */ + } + else + { + cat_releasedesc(&localhint.dh_desc); + } + } + } + + /* Get a directory hint (cnode must be locked exclusive) */ + if (dirhint == NULL) + { + dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0); + + /* Hide tag from catalog layer. */ + dirhint->dh_index &= HFS_INDEX_MASK; + if (dirhint->dh_index == HFS_INDEX_MASK) + { + dirhint->dh_index = -1; + } + } + + if (index == 0) + { + dirhint->dh_threadhint = cp->c_dirthreadhint; + } + else + { + /* + * If we have a non-zero index, there is a possibility that during the last + * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case + * then we don't want to return any new entries for the caller. Just return 0 + * items, mark the eofflag, and bail out. Because we won't have done any work, the + * code at the end of the function will release the dirhint for us. + * + * Don't forget to unlock the catalog lock on the way out, too. + */ + if (dirhint->dh_desc.cd_flags & CD_EOF) + { + error = 0; + bLocalEOFflag = true; + offset = startoffset; + if (user_original_resid > 0) { + psReadDirBuffer->uBufferResid = user_original_resid; + } + hfs_systemfile_unlock (hfsmp, lockflags); + + goto seekoffcalc; + } + } + + /* Pack the buffer with dirent entries. */ + error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, psReadDirBuffer, flags, &items, &bLocalEOFflag, psDotDotEntry); + + if (index == 0 && error == 0) + { + cp->c_dirthreadhint = dirhint->dh_threadhint; + } + + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error != 0) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_readdir: Failed to get dir entries\n"); + goto out; + } + + /* Get index to the next item */ + index += items; + + if (items >= (int)cp->c_entries) + { + bLocalEOFflag = true; + } + + /* + * Detect valence FS corruption. + * + * We are holding the cnode lock exclusive, so there should not be + * anybody modifying the valence field of this cnode. If we enter + * this block, that means we observed filesystem corruption, because + * this directory reported a valence of 0, yet we found at least one + * item. In this case, we need to minimally self-heal this + * directory to prevent userland from tripping over a directory + * that appears empty (getattr of valence reports 0), but actually + * has contents. + * + * We'll force the cnode update at the end of the function after + * completing all of the normal getdirentries steps. + */ + if ((cp->c_entries == 0) && (items > 0)) + { + /* disk corruption */ + cp->c_entries++; + /* Mark the cnode as dirty. */ + cp->c_flag |= C_MODIFIED; + LFHFS_LOG(LEVEL_DEBUG, "hfs_vnop_readdir: repairing valence to non-zero! \n"); + bump_valence++; + } + + + /* Convert catalog directory index back into an offset. */ + while (tag == 0) + tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS; + offset = ((index + 2) | tag); + dirhint->dh_index |= tag; + +seekoffcalc: + cp->c_touch_acctime = TRUE; + + if (numdirent) + { + if (startoffset == 0) + items += 2; + else if (startoffset == 1) + items += 1; + + *numdirent = items; + } + +out: + /* If we didn't do anything then go ahead and dump the hint. */ + if ((dirhint != NULL) && (dirhint != &localhint) && (offset == startoffset)) + { + hfs_reldirhint(cp, dirhint); + bLocalEOFflag = true; + } + + if (eofflag) + { + *eofflag = bLocalEOFflag; + } + + if (dirhint == &localhint) + { + cat_releasedesc(&localhint.dh_desc); + } + + if (bump_valence) + { + /* force the update before dropping the cnode lock*/ + hfs_update(vp, 0); + } + + hfs_unlock(cp); + + return (error); +} + +/* + * readdirattr operation will return attributes for the items in the + * directory specified. + * + * It does not do . and .. entries. The problem is if you are at the root of the + * hfs directory and go to .. you could be crossing a mountpoint into a + * different (ufs) file system. The attributes that apply for it may not + * apply for the file system you are doing the readdirattr on. To make life + * simpler, this call will only return entries in its directory, hfs like. + */ +int +hfs_vnop_readdirattr(vnode_t vp, int *eofflag, int *numdirent, ReadDirBuff_s* psReadDirBuffer, uint64_t puCookie) +{ + int error; + uint32_t newstate; + uint32_t uMaxCount = (uint32_t) psReadDirBuffer->uBufferResid / _UVFS_DIRENTRYATTR_RECLEN(UVFS_DIRENTRYATTR_NAMEOFF,0); + + if (psReadDirBuffer->pvBuffer == NULL || psReadDirBuffer->uBufferResid < sizeof(UVFSDirEntry)) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_readdirattr: buffer input is invalid\n"); + return EINVAL; + } + + error = hfs_readdirattr_internal(vp, psReadDirBuffer, uMaxCount, &newstate, eofflag, numdirent, puCookie); + + return (error); +} + +/* + * Sync all hfs B-trees. Use this instead of journal_flush for a volume + * without a journal. Note that the volume bitmap does not get written; + * we rely on fsck_hfs to fix that up (which it can do without any loss + * of data). + */ +static int +hfs_metasync_all(struct hfsmount *hfsmp) +{ + int lockflags; + + /* Lock all of the B-trees so we get a mutually consistent state */ + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK); + +#if LF_HFS_FULL_VNODE_SUPPORT + //Curently we don't keep any cache for btree buffers. + //When we will have a cache we will have to flush it out here. + /* Sync each of the B-trees */ + if (hfsmp->hfs_catalog_vp) + hfs_btsync(hfsmp->hfs_catalog_vp, 0); + if (hfsmp->hfs_extents_vp) + hfs_btsync(hfsmp->hfs_extents_vp, 0); + if (hfsmp->hfs_attribute_vp) + hfs_btsync(hfsmp->hfs_attribute_vp, 0); +#endif + hfs_systemfile_unlock(hfsmp, lockflags); + + return 0; +} +/* + * cnode must be locked + */ +int +hfs_fsync(struct vnode *vp, int waitfor, hfs_fsync_mode_t fsyncmode) +{ + struct cnode *cp = VTOC(vp); + struct filefork *fp = NULL; + int retval = 0; + struct timeval tv; + int took_trunc_lock = 0; + int fsync_default = 1; + + /* + * Applications which only care about data integrity rather than full + * file integrity may opt out of (delay) expensive metadata update + * operations as a performance optimization. + */ + int wait = (waitfor == MNT_WAIT); /* attributes necessary for data retrieval */ + if (fsyncmode != HFS_FSYNC) + fsync_default = 0; + + /* HFS directories don't have any data blocks. */ + if (vnode_isdir(vp)) + goto metasync; + fp = VTOF(vp); + + /* + * For system files flush the B-tree header and + * for regular files write out any clusters + */ + if (vnode_issystem(vp)) + { + if (VTOF(vp)->fcbBTCBPtr != NULL) + { + // XXXdbg + if (VTOHFS(vp)->jnl == NULL) + { + BTFlushPath(VTOF(vp)); + } + } + } + else + { +//TBD- Since we always flush the data for every file when it is being updated +// we don't need to do that here. +// hfs_unlock(cp); +// hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT); +// took_trunc_lock = 1; +// +// if (fp->ff_unallocblocks != 0) +// { +// hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); +// +// hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); +// } +// +//#if LF_HFS_FULL_VNODE_SUPPORT +// /* Don't hold cnode lock when calling into cluster layer. */ +// (void) cluster_push(vp, waitdata ? IO_SYNC : 0); +//#endif +// +// hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS); + } + + /* + * When MNT_WAIT is requested and the zero fill timeout + * has expired then we must explicitly zero out any areas + * that are currently marked invalid (holes). + * + * Files with NODUMP can bypass zero filling here. + */ + if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) || + ((wait || (cp->c_flag & C_ZFWANTSYNC)) && + ((cp->c_bsdflags & UF_NODUMP) == 0) && + (vnode_issystem(vp) ==0) && + cp->c_zftimeout != 0))) + { + microtime(&tv); + if ((cp->c_flag & C_ALWAYS_ZEROFILL) == 0 && fsync_default && tv.tv_sec < (long)cp->c_zftimeout) + { + /* Remember that a force sync was requested. */ + cp->c_flag |= C_ZFWANTSYNC; + goto datasync; + } + if (!TAILQ_EMPTY(&fp->ff_invalidranges)) + { + if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) + { + hfs_unlock(cp); + if (took_trunc_lock) { + hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); + } + hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS); + took_trunc_lock = 1; + } + +#if LF_HFS_FULL_VNODE_SUPPORT + hfs_flush_invalid_ranges(vp); + hfs_unlock(cp); + (void) cluster_push(vp, waitdata ? IO_SYNC : 0); + hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS); +#endif + } + } + +datasync: + if (took_trunc_lock) + hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); + +// TBD - symlink can't be dirsty since we always write the data fully to the device +// else if (fsync_default && vnode_islnk(vp) && vnode_hasdirtyblks(vp) && vnode_isrecycled(vp)) +// { +// /* +// * If it's a symlink that's dirty and is about to be recycled, +// * we need to flush the journal. +// */ +// fsync_default = 0; +// } + +metasync: + if (vnode_isreg(vp) && vnode_issystem(vp)) + { + if (VTOF(vp)->fcbBTCBPtr != NULL) + { + microtime(&tv); + BTSetLastSync(VTOF(vp), (u_int32_t) tv.tv_sec); + } + cp->c_touch_acctime = FALSE; + cp->c_touch_chgtime = FALSE; + cp->c_touch_modtime = FALSE; + } + else + { + retval = hfs_update(vp, HFS_UPDATE_FORCE); + /* + * When MNT_WAIT is requested push out the catalog record for + * this file. If they asked for a full fsync, we can skip this + * because the journal_flush or hfs_metasync_all will push out + * all of the metadata changes. + */ +#if 0 + /* + * As we are not supporting any write buf caches / delay writes, + * this is not needed. + */ + if ((retval == 0) && wait && fsync_default && cp->c_hint && + !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) { + hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint); + } +#endif + /* + * If this was a full fsync, make sure all metadata + * changes get to stable storage. + */ + if (!fsync_default) + { + if (VTOHFS(vp)->jnl) { + if (fsyncmode == HFS_FSYNC_FULL) + hfs_flush(VTOHFS(vp), HFS_FLUSH_FULL); + else + hfs_flush(VTOHFS(vp), HFS_FLUSH_JOURNAL_BARRIER); + } + else + { + retval = hfs_metasync_all(VTOHFS(vp)); + hfs_flush(VTOHFS(vp), HFS_FLUSH_CACHE); + } + } + } + +#if LF_HFS_FULL_VNODE_SUPPORT + if (!hfs_is_dirty(cp) && !ISSET(cp->c_flag, C_DELETED)) + vnode_cleardirty(vp); +#endif + + return (retval); +} + +/* + * hfs_removefile + * + * Similar to hfs_vnop_remove except there are additional options. + * This function may be used to remove directories if they have + * lots of EA's -- note the 'allow_dirs' argument. + * + * This function is able to delete blocks & fork data for the resource + * fork even if it does not exist in core (and have a backing vnode). + * It should infer the correct behavior based on the number of blocks + * in the cnode and whether or not the resource fork pointer exists or + * not. As a result, one only need pass in the 'vp' corresponding to the + * data fork of this file (or main vnode in the case of a directory). + * Passing in a resource fork will result in an error. + * + * Because we do not create any vnodes in this function, we are not at + * risk of deadlocking against ourselves by double-locking. + * + * Requires cnode and truncate locks to be held. + */ +int +hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, + int flags, int skip_reserve, int allow_dirs, int only_unlink) +{ + struct cnode *cp; + struct cnode *dcp; + struct vnode *rsrc_vp = NULL; + struct hfsmount *hfsmp; + struct cat_desc desc; + int dataforkbusy = 0; + int rsrcforkbusy = 0; + int lockflags; + int error = 0; + int started_tr = 0; + int isbigfile = 0, defer_remove=0; + bool isdir= false; + int update_vh = 0; + + cp = VTOC(vp); + dcp = VTOC(dvp); + hfsmp = VTOHFS(vp); + + /* Check if we lost a race post lookup. */ + if (cp->c_flag & (C_NOEXISTS | C_DELETED)) { + return (EINVAL); + } + + if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) + { + return error; + } + + /* Make sure a remove is permitted */ + /* Don't allow deleting the journal or journal_info_block. */ + if (VNODE_IS_RSRC(vp) || vnode_issystem(vp) || IsEntryAJnlFile(hfsmp, cp->c_fileid)) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_removefile: Removing %s file is not premited\n", VNODE_IS_RSRC(vp) ? "Resource" : (vnode_issystem(vp)? "System" : "Journal")); + return (EPERM); + } + else + { + /* + * We know it's a data fork. + * Probe the cnode to see if we have a valid resource fork + * in hand or not. + */ + rsrc_vp = cp->c_rsrc_vp; + } + + /* + * Hard links require special handling. + */ + if (cp->c_flag & C_HARDLINK) + { + /* A directory hard link with a link count of one is + * treated as a regular directory. Therefore it should + * only be removed using rmdir(). + */ + if (IS_DIR(vp) && (cp->c_linkcount == 1) && (allow_dirs == 0)) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_removefile: Trying to remove an hardlink directory\n"); + return (EPERM); + } + + return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve); + } + + /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */ + if (IS_DIR(vp)) + { + if (!allow_dirs) + { + return (EPERM); /* POSIX */ + } + isdir = true; + } + + /* Sanity check the parent ids. */ + if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) && + (cp->c_parentcnid != dcp->c_fileid)) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_removefile: Parent ID's are wrong\n"); + return (EINVAL); + } + + dcp->c_flag |= C_DIR_MODIFICATION; + + // this guy is going away so mark him as such + cp->c_flag |= C_DELETED; + + /* + * If the caller was operating on a file (as opposed to a + * directory with EAs), then we need to figure out + * whether or not it has a valid resource fork vnode. + * + * If there was a valid resource fork vnode, then we need + * to use hfs_truncate to eliminate its data. If there is + * no vnode, then we hold the cnode lock which would + * prevent it from being created. As a result, + * we can use the data deletion functions which do not + * require that a cnode/vnode pair exist. + */ + + /* Check if this file is being used. */ + if ( !isdir ) + { + dataforkbusy = 0; /*vnode_isinuse(vp, 0);*/ + /* + * At this point, we know that 'vp' points to the + * a data fork because we checked it up front. And if + * there is no rsrc fork, rsrc_vp will be NULL. + */ + if (rsrc_vp && (cp->c_blocks - VTOF(vp)->ff_blocks)) + { + rsrcforkbusy = 0; /*vnode_isinuse(rsrc_vp, 0);*/ + } + + /* Check if we have to break the deletion into multiple pieces. */ + isbigfile = cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE; + } + + /* Check if the file has xattrs. If it does we'll have to delete them in + individual transactions in case there are too many */ + if ((hfsmp->hfs_attribute_vp != NULL) && (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) + { + defer_remove = 1; + } + + /* If we are explicitly told to only unlink item and move to hidden dir, then do it */ + if (only_unlink) + { + defer_remove = 1; + } + + /* + * Carbon semantics prohibit deleting busy files. + * (enforced when VNODE_REMOVE_NODELETEBUSY is requested) + */ + if (dataforkbusy || rsrcforkbusy) + { + if ((flags & VNODE_REMOVE_NODELETEBUSY) || (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) + { + error = EBUSY; + goto out; + } + } + + /* + * Do a ubc_setsize to indicate we need to wipe contents if: + * 1) item is a regular file. + * 2) Neither fork is busy AND we are not told to unlink this. + * + * We need to check for the defer_remove since it can be set without + * having a busy data or rsrc fork + */ + if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy) && (defer_remove == 0)) + { + /* + * A ubc_setsize can cause a pagein so defer it + * until after the cnode lock is dropped. The + * cnode lock cannot be dropped/reacquired here + * since we might already hold the journal lock. + */ + if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) + { + cp->c_flag |= C_NEED_DATA_SETSIZE; + } + if (!rsrcforkbusy && rsrc_vp) + { + cp->c_flag |= C_NEED_RSRC_SETSIZE; + } + } + + if ((error = hfs_start_transaction(hfsmp)) != 0) + { + goto out; + } + started_tr = 1; + + /* + * Prepare to truncate any non-busy forks. Busy forks will + * get truncated when their vnode goes inactive. + * Note that we will only enter this region if we + * can avoid creating an open-unlinked file. If + * either region is busy, we will have to create an open + * unlinked file. + * + * Since we are deleting the file, we need to stagger the runtime + * modifications to do things in such a way that a crash won't + * result in us getting overlapped extents or any other + * bad inconsistencies. As such, we call prepare_release_storage + * which updates the UBC, updates quota information, and releases + * any loaned blocks that belong to this file. No actual + * truncation or bitmap manipulation is done until *AFTER* + * the catalog record is removed. + */ + if (isdir == 0 && (!dataforkbusy && !rsrcforkbusy) && (only_unlink == 0)) + { + if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) + { + error = hfs_prepare_release_storage (hfsmp, vp); + if (error) + { + goto out; + } + update_vh = 1; + } + + /* + * If the resource fork vnode does not exist, we can skip this step. + */ + if (!rsrcforkbusy && rsrc_vp) + { + error = hfs_prepare_release_storage (hfsmp, rsrc_vp); + if (error) + { + goto out; + } + update_vh = 1; + } + } + + /* + * Protect against a race with rename by using the component + * name passed in and parent id from dvp (instead of using + * the cp->c_desc which may have changed). Also, be aware that + * because we allow directories to be passed in, we need to special case + * this temporary descriptor in case we were handed a directory. + */ + if (isdir) + { + desc.cd_flags = CD_ISDIR; + } + else + { + desc.cd_flags = 0; + } + desc.cd_encoding = cp->c_desc.cd_encoding; + desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; + desc.cd_namelen = cnp->cn_namelen; + desc.cd_parentcnid = dcp->c_fileid; + desc.cd_hint = cp->c_desc.cd_hint; + desc.cd_cnid = cp->c_cnid; + struct timeval tv; + microtime(&tv); + + /* + * There are two cases to consider: + * 1. File/Dir is busy/big/defer_remove ==> move/rename the file/dir + * 2. File is not in use ==> remove the file + * + * We can get a directory in case 1 because it may have had lots of attributes, + * which need to get removed here. + */ + if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) + { + char delname[32]; + struct cat_desc to_desc; + struct cat_desc todir_desc; + + /* + * Orphan this file or directory (move to hidden directory). + * Again, we need to take care that we treat directories as directories, + * and files as files. Because directories with attributes can be passed in + * check to make sure that we have a directory or a file before filling in the + * temporary descriptor's flags. We keep orphaned directories AND files in + * the FILE_HARDLINKS private directory since we're generalizing over all + * orphaned filesystem objects. + */ + bzero(&todir_desc, sizeof(todir_desc)); + todir_desc.cd_parentcnid = 2; + + MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid); + bzero(&to_desc, sizeof(to_desc)); + to_desc.cd_nameptr = (const u_int8_t *)delname; + to_desc.cd_namelen = strlen(delname); + to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; + if (isdir) + { + to_desc.cd_flags = CD_ISDIR; + } + else + { + to_desc.cd_flags = 0; + } + to_desc.cd_cnid = cp->c_cnid; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); + if (!skip_reserve) + { + if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL))) + { + hfs_systemfile_unlock(hfsmp, lockflags); + goto out; + } + } + + error = cat_rename(hfsmp, &desc, &todir_desc, &to_desc, (struct cat_desc *)NULL); + + if (error == 0) + { + hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++; + if (isdir == 1) + { + INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]); + } + (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS], &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL); + + /* Update the parent directory */ + if (dcp->c_entries > 0) + dcp->c_entries--; + if (isdir == 1) + { + DEC_FOLDERCOUNT(hfsmp, dcp->c_attr); + } + dcp->c_dirchangecnt++; + hfs_incr_gencount(dcp); + + dcp->c_ctime = tv.tv_sec; + dcp->c_mtime = tv.tv_sec; + (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL); + + /* Update the file or directory's state */ + cp->c_flag |= C_DELETED; + cp->c_ctime = tv.tv_sec; + --cp->c_linkcount; + (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL); + } + + hfs_systemfile_unlock(hfsmp, lockflags); + if (error) + goto out; + } + else + { + /* + * Nobody is using this item; we can safely remove everything. + */ + + struct filefork *temp_rsrc_fork = NULL; + u_int32_t fileid = cp->c_fileid; + + /* + * Figure out if we need to read the resource fork data into + * core before wiping out the catalog record. + * + * 1) Must not be a directory + * 2) cnode's c_rsrcfork ptr must be NULL. + * 3) rsrc fork must have actual blocks + */ + if ((isdir == 0) && (cp->c_rsrcfork == NULL) && (cp->c_blocks - VTOF(vp)->ff_blocks)) + { + /* + * The resource fork vnode & filefork did not exist. + * Create a temporary one for use in this function only. + */ + temp_rsrc_fork = hfs_mallocz(sizeof(struct filefork)); + temp_rsrc_fork->ff_cp = cp; + rl_init(&temp_rsrc_fork->ff_invalidranges); + } + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + + /* Look up the resource fork first, if necessary */ + if (temp_rsrc_fork) + { + error = cat_lookup (hfsmp, &desc, 1, (struct cat_desc*) NULL, (struct cat_attr*) NULL, &temp_rsrc_fork->ff_data, NULL); + if (error) + { + hfs_free(temp_rsrc_fork); + hfs_systemfile_unlock (hfsmp, lockflags); + goto out; + } + } + + if (!skip_reserve) + { + if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL))) + { + if (temp_rsrc_fork) + { + hfs_free(temp_rsrc_fork); + } + hfs_systemfile_unlock(hfsmp, lockflags); + goto out; + } + } + + error = cat_delete(hfsmp, &desc, &cp->c_attr); + + if (error && error != ENXIO && error != ENOENT) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_removefile: deleting file %s (id=%d) vol=%s err=%d\n", + cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, hfsmp->vcbVN, error); + } + + if (error == 0) + { + /* Update the parent directory */ + if (dcp->c_entries > 0) + { + dcp->c_entries--; + } + dcp->c_dirchangecnt++; + hfs_incr_gencount(dcp); + + dcp->c_ctime = tv.tv_sec; + dcp->c_mtime = tv.tv_sec; + (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL); + } + + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error) + { + if (temp_rsrc_fork) + { + hfs_free(temp_rsrc_fork); + } + goto out; + } + + /* + * Now that we've wiped out the catalog record, the file effectively doesn't + * exist anymore. So update the quota records to reflect the loss of the + * data fork and the resource fork. + */ + + if (IS_LNK(vp) && cp->c_datafork->ff_symlinkptr) + { + hfs_free(cp->c_datafork->ff_symlinkptr); + cp->c_datafork->ff_symlinkptr = NULL; + } + + /* + * If we didn't get any errors deleting the catalog entry, then go ahead + * and release the backing store now. The filefork pointers are still valid. + */ + if (temp_rsrc_fork) + { + error = hfs_release_storage (hfsmp, cp->c_datafork, temp_rsrc_fork, fileid); + } + else + { + /* if cp->c_rsrcfork == NULL, hfs_release_storage will skip over it. */ + error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid); + } + if (error) + { + /* + * If we encountered an error updating the extents and bitmap, + * mark the volume inconsistent. At this point, the catalog record has + * already been deleted, so we can't recover it at this point. We need + * to proceed and update the volume header and mark the cnode C_NOEXISTS. + * The subsequent fsck should be able to recover the free space for us. + */ + hfs_mark_inconsistent(hfsmp, HFS_OP_INCOMPLETE); + } + else + { + /* reset update_vh to 0, since hfs_release_storage should have done it for us */ + update_vh = 0; + } + + /* Get rid of the temporary rsrc fork */ + if (temp_rsrc_fork) + { + hfs_free(temp_rsrc_fork); + } + + cp->c_flag |= C_NOEXISTS; + cp->c_flag &= ~C_DELETED; + + cp->c_touch_chgtime = TRUE; + --cp->c_linkcount; + + /* + * We must never get a directory if we're in this else block. We could + * accidentally drop the number of files in the volume header if we did. + */ + hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID)); + } + + /* + * All done with this cnode's descriptor... + * + * Note: all future catalog calls for this cnode must be by + * fileid only. This is OK for HFS (which doesn't have file + * thread records) since HFS doesn't support the removal of + * busy files. + */ + cat_releasedesc(&cp->c_desc); + +out: + if (error) + { + cp->c_flag &= ~C_DELETED; + } + + if (update_vh) + { + /* + * If we bailed out earlier, we may need to update the volume header + * to deal with the borrowed blocks accounting. + */ + hfs_volupdate (hfsmp, VOL_UPDATE, 0); + } + + if (started_tr) + { + hfs_end_transaction(hfsmp); + } + + dcp->c_flag &= ~C_DIR_MODIFICATION; + //TBD - We have wakeup here but can't see anyone who's msleeping on c_flag... + //wakeup((caddr_t)&dcp->c_flag); + + return (error); +} + +/* + * Remove a file or link. + */ +int +hfs_vnop_remove(struct vnode* psParentDir,struct vnode *psFileToRemove, struct componentname* psCN, int iFlags) +{ + struct cnode *dcp = VTOC(psParentDir); + struct cnode *cp = VTOC(psFileToRemove); + struct vnode *rvp = NULL; + int error = 0; + + if (psParentDir == psFileToRemove) + { + return (EINVAL); + } + +relock: + + hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + + if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) + { + hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); + if (rvp) + { + hfs_free(rvp); + } + return (error); + } + + /* + * Lazily respond to determining if there is a valid resource fork + * vnode attached to 'cp' if it is a regular file or symlink. + * If the vnode does not exist, then we may proceed without having to + * create it. + * + * If, however, it does exist, then we need to acquire an iocount on the + * vnode after acquiring its vid. This ensures that if we have to do I/O + * against it, it can't get recycled from underneath us in the middle + * of this call. + * + * Note: this function may be invoked for directory hardlinks, so just skip these + * steps if 'vp' is a directory. + */ + enum vtype vtype = psFileToRemove->sFSParams.vnfs_vtype; + if ((vtype == VLNK) || (vtype == VREG)) + { + if ((cp->c_rsrc_vp) && (rvp == NULL)) + { + /* We need to acquire the rsrc vnode */ + rvp = cp->c_rsrc_vp; + + /* Unlock everything to acquire iocount on the rsrc vnode */ + hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT); + hfs_unlockpair (dcp, cp); + + goto relock; + } + } + + /* + * Check to see if we raced rmdir for the parent directory + * hfs_removefile already checks for a race on vp/cp + */ + if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) + { + error = ENOENT; + goto rm_done; + } + + error = hfs_removefile(psParentDir, psFileToRemove, psCN, iFlags, 0, 0, 0); + + /* + * Drop the truncate lock before unlocking the cnode + * (which can potentially perform a vnode_put and + * recycle the vnode which in turn might require the + * truncate lock) + */ +rm_done: + //Update Directory version + psParentDir->sExtraData.sDirData.uDirVersion++; + + hfs_unlockpair(dcp, cp); + hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); + + if (rvp) + hfs_free(rvp); + + return (error); +} + +/* + * Remove a directory. + */ +int +hfs_vnop_rmdir(struct vnode *dvp, struct vnode *vp, struct componentname* psCN) +{ + int error = 0; + struct cnode *dcp = VTOC(dvp); + struct cnode *cp = VTOC(vp); + + if (!S_ISDIR(cp->c_mode)) + { + return (ENOTDIR); + } + if (dvp == vp) + { + return (EINVAL); + } + + if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) + { + return (error); + } + + /* Check for a race with rmdir on the parent directory */ + if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) + { + hfs_unlockpair (dcp, cp); + return ENOENT; + } + + error = hfs_removedir(dvp, vp, psCN, 0, 0); + + hfs_unlockpair(dcp, cp); + + return (error); +} + +/* + * Remove a directory + * + * Both dvp and vp cnodes are locked + */ +int +hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int skip_reserve, int only_unlink) +{ + struct cnode *cp; + struct cnode *dcp; + struct hfsmount * hfsmp; + struct cat_desc desc; + int lockflags; + int error = 0, started_tr = 0; + + cp = VTOC(vp); + dcp = VTOC(dvp); + hfsmp = VTOHFS(vp); + + if (cp->c_flag & (C_NOEXISTS | C_DELETED)){ + return (EINVAL); + } + + if (cp->c_entries != 0){ + return (ENOTEMPTY); + } + + /* Deal with directory hardlinks */ + if (cp->c_flag & C_HARDLINK) + { + /* + * Note that if we have a directory which was a hardlink at any point, + * its actual directory data is stored in the directory inode in the hidden + * directory rather than the leaf element(s) present in the namespace. + * + * If there are still other hardlinks to this directory, + * then we'll just eliminate this particular link and the vnode will still exist. + * If this is the last link to an empty directory, then we'll open-unlink the + * directory and it will be only tagged with C_DELETED (as opposed to C_NOEXISTS). + * + * We could also return EBUSY here. + */ + + return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve); + } + + /* + * In a few cases, we may want to allow the directory to persist in an + * open-unlinked state. If the directory is being open-unlinked (still has usecount + * references), or if it has EAs, or if it was being deleted as part of a rename, + * then we go ahead and move it to the hidden directory. + * + * If the directory is being open-unlinked, then we want to keep the catalog entry + * alive so that future EA calls and fchmod/fstat etc. do not cause issues later. + * + * If the directory had EAs, then we want to use the open-unlink trick so that the + * EA removal is not done in one giant transaction. Otherwise, it could cause a panic + * due to overflowing the journal. + * + * Finally, if it was deleted as part of a rename, we move it to the hidden directory + * in order to maintain rename atomicity. + * + * Note that the allow_dirs argument to hfs_removefile specifies that it is + * supposed to handle directories for this case. + */ + + if (((hfsmp->hfs_attribute_vp != NULL) && ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0)) || (only_unlink != 0)) + { + + int ret = hfs_removefile(dvp, vp, cnp, 0, 0, 1, only_unlink); +// Will be released in the layer above where it was created +// vnode_recycle(vp); + return ret; + } + + dcp->c_flag |= C_DIR_MODIFICATION; + + if ((error = hfs_start_transaction(hfsmp)) != 0) + { + goto out; + } + started_tr = 1; + + /* + * Verify the directory is empty (and valid). + * (Rmdir ".." won't be valid since + * ".." will contain a reference to + * the current directory and thus be + * non-empty.) + */ + if ((dcp->c_bsdflags & (UF_APPEND | SF_APPEND)) || (cp->c_bsdflags & ((UF_IMMUTABLE | SF_IMMUTABLE | UF_APPEND | SF_APPEND)))) + { + error = EPERM; + goto out; + } + + /* + * Protect against a race with rename by using the component + * name passed in and parent id from dvp (instead of using + * the cp->c_desc which may have changed). + */ + desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; + desc.cd_namelen = cnp->cn_namelen; + desc.cd_parentcnid = dcp->c_fileid; + desc.cd_cnid = cp->c_cnid; + desc.cd_flags = CD_ISDIR; + desc.cd_encoding = cp->c_encoding; + desc.cd_hint = 0; + + if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) + { + error = 0; + goto out; + } + + /* Remove entry from catalog */ + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + + if (!skip_reserve) + { + /* + * Reserve some space in the Catalog file. + */ + if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL))) + { + hfs_systemfile_unlock(hfsmp, lockflags); + goto out; + } + } + + error = cat_delete(hfsmp, &desc, &cp->c_attr); + + if (!error) + { + /* The parent lost a child */ + if (dcp->c_entries > 0) + dcp->c_entries--; + DEC_FOLDERCOUNT(hfsmp, dcp->c_attr); + dcp->c_dirchangecnt++; + hfs_incr_gencount(dcp); + + dcp->c_touch_chgtime = TRUE; + dcp->c_touch_modtime = TRUE; + dcp->c_flag |= C_MODIFIED; + + hfs_update(dcp->c_vp, 0); + } + + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error) + goto out; + + hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID)); + + /* Mark C_NOEXISTS since the catalog entry is now gone */ + cp->c_flag |= C_NOEXISTS; + +out: + dvp->sExtraData.sDirData.uDirVersion++; + + dcp->c_flag &= ~C_DIR_MODIFICATION; +//TBD - We have wakeup here but can't see anyone who's msleeping on c_flag... +// wakeup((caddr_t)&dcp->c_flag); + + if (started_tr) + { + hfs_end_transaction(hfsmp); + } + + return (error); +} + +int hfs_vnop_setattr( vnode_t vp, const UVFSFileAttributes *attr ) +{ + int err = 0; + if ( attr->fa_validmask == 0 ) + { + return 0; + } + + if ( ( attr->fa_validmask & READ_ONLY_FA_FIELDS ) + /*|| ( attr->fa_validmask & ~VALID_IN_ATTR_MASK )*/) + { + return EINVAL; + } + + struct cnode *cp = NULL; + + /* Don't allow modification of the journal. */ + struct hfsmount *hfsmp = VTOHFS(vp); + if (hfs_is_journal_file(hfsmp, VTOC(vp))) { + return (EPERM); + } + + /* + * File size change request. + * We are guaranteed that this is not a directory, and that + * the filesystem object is writeable. + */ + + if ( attr->fa_validmask & UVFS_FA_VALID_SIZE ) + { + if (!vnode_isreg(vp)) + { + if (vnode_isdir(vp) || vnode_islnk(vp)) + { + return EPERM; + } + //otherwise return EINVAL + return EINVAL; + } + + // Take truncate lock + hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + + // hfs_truncate will deal with the cnode lock + err = hfs_truncate(vp, attr->fa_size, 0, 0); + + hfs_unlock_truncate(VTOC(vp), HFS_LOCK_DEFAULT); + if (err) + return err; + } + + + if ((err = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) + return (err); + cp = VTOC(vp); + + + if ( attr->fa_validmask & UVFS_FA_VALID_UID ) + { + cp->c_flag |= C_MODIFIED; + cp->c_touch_chgtime = TRUE; + cp->c_uid = attr->fa_uid; + } + + if ( attr->fa_validmask & UVFS_FA_VALID_GID ) + { + cp->c_flag |= C_MODIFIED; + cp->c_touch_chgtime = TRUE; + cp->c_gid = attr->fa_gid; + } + + if ( attr->fa_validmask & UVFS_FA_VALID_MODE ) + { + mode_t new_mode = (cp->c_mode & ~ALLPERMS) | (attr->fa_mode & ALLPERMS); + if (new_mode != cp->c_mode) { + cp->c_mode = new_mode; + cp->c_flag |= C_MINOR_MOD; + } + } + + if ( attr->fa_validmask & UVFS_FA_VALID_BSD_FLAGS ) + { + cp->c_bsdflags = attr->fa_bsd_flags; + } + + /* + * Timestamp updates. + */ + if ( attr->fa_validmask & UVFS_FA_VALID_ATIME ) + { + cp->c_atime = attr->fa_atime.tv_sec; + cp->c_touch_acctime = FALSE; + } + + if ( attr->fa_validmask & UVFS_FA_VALID_BIRTHTIME ) + { + cp->c_ctime = attr->fa_birthtime.tv_sec; + } + + if ( attr->fa_validmask & UVFS_FA_VALID_MTIME ) + { + cp->c_mtime = attr->fa_mtime.tv_sec; + cp->c_touch_modtime = FALSE; + cp->c_touch_chgtime = TRUE; + + hfs_clear_might_be_dirty_flag(cp); + } + + err = hfs_update(vp, 0); + + /* Purge origin cache for cnode, since caller now has correct link ID for it + * We purge it here since it was acquired for us during lookup, and we no longer need it. + */ + if ((cp->c_flag & C_HARDLINK) && (!IS_DIR(vp))){ + hfs_relorigin(cp, 0); + } + + hfs_unlock(cp); + + return err; +} + +/* + * Update a cnode's on-disk metadata. + * + * The cnode must be locked exclusive. See declaration for possible + * options. + */ +int +hfs_update(struct vnode *vp, int options) +{ +#pragma unused (options) + + struct cnode *cp = VTOC(vp); + const struct cat_fork *dataforkp = NULL; + const struct cat_fork *rsrcforkp = NULL; + struct cat_fork datafork; + struct cat_fork rsrcfork; + struct hfsmount *hfsmp; + int lockflags; + int error = 0; + + if (ISSET(cp->c_flag, C_NOEXISTS)) + return 0; + + hfsmp = VTOHFS(vp); + + if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) || + hfsmp->hfs_catalog_vp == NULL){ + return (0); + } + + if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) { + CLR(cp->c_flag, C_MODIFIED | C_MINOR_MOD | C_NEEDS_DATEADDED); + cp->c_touch_acctime = 0; + cp->c_touch_chgtime = 0; + cp->c_touch_modtime = 0; + return (0); + } + + hfs_touchtimes(hfsmp, cp); + + if (!ISSET(cp->c_flag, C_MODIFIED | C_MINOR_MOD) + && !hfs_should_save_atime(cp)) { + // Nothing to update + return 0; + } + + bool check_txn = false; + if (!ISSET(options, HFS_UPDATE_FORCE) && !ISSET(cp->c_flag, C_MODIFIED)) { + /* + * This must be a minor modification. If the current + * transaction already has an update for this node, then we + * bundle in the modification. + */ + if (hfsmp->jnl + && journal_current_txn(hfsmp->jnl) == cp->c_update_txn) { + check_txn = true; + } + else + { + error = 0; + goto exit; + } + } + + error = hfs_start_transaction(hfsmp); + if ( error != 0 ) + { + goto exit; + } + + if (check_txn + && journal_current_txn(hfsmp->jnl) != cp->c_update_txn) { + hfs_end_transaction(hfsmp); + error = 0; + goto exit; + } + + /* + * Modify the values passed to cat_update based on whether or not + * the file has invalid ranges or borrowed blocks. + */ + dataforkp = hfs_prepare_fork_for_update(cp->c_datafork, NULL, &datafork, hfsmp->blockSize); + rsrcforkp = hfs_prepare_fork_for_update(cp->c_rsrcfork, NULL, &rsrcfork, hfsmp->blockSize); + + /* + * Lock the Catalog b-tree file. + */ + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); + + error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp); + + if (hfsmp->jnl) + cp->c_update_txn = journal_current_txn(hfsmp->jnl); + + hfs_systemfile_unlock(hfsmp, lockflags); + + CLR(cp->c_flag, C_MODIFIED | C_MINOR_MOD); + + hfs_end_transaction(hfsmp); + +exit: + + return error; +} + +/* + * Prepares a fork for cat_update by making sure ff_size and ff_blocks + * are no bigger than the valid data on disk thus reducing the chance + * of exposing uninitialised data in the event of a non clean unmount. + * fork_buf is where to put the temporary copy if required. (It can + * be inside pfork.) + */ +const struct cat_fork * +hfs_prepare_fork_for_update(filefork_t *ff, const struct cat_fork *cf, struct cat_fork *cf_buf, uint32_t block_size) +{ + if (!ff) + return NULL; + + if (!cf) + cf = &ff->ff_data; + if (!cf_buf) + cf_buf = &ff->ff_data; + + off_t max_size = ff->ff_size; + + if (!ff->ff_unallocblocks && ff->ff_size <= max_size) + return cf; // Nothing to do + + if (ff->ff_blocks < ff->ff_unallocblocks) { + LFHFS_LOG(LEVEL_ERROR, "hfs_prepare_fork_for_update: ff_blocks %d is less than unalloc blocks %d\n", + ff->ff_blocks, ff->ff_unallocblocks); + hfs_assert(0); + } + + struct cat_fork *out = cf_buf; + + if (out != cf) + bcopy(cf, out, sizeof(*cf)); + + // Adjust cf_blocks for cf_vblocks + out->cf_blocks -= out->cf_vblocks; + + /* + * Here we trim the size with the updated cf_blocks. This is + * probably unnecessary now because the invalid ranges should + * catch this (but that wasn't always the case). + */ + off_t alloc_bytes = blk_to_bytes(out->cf_blocks, block_size); + if (out->cf_size > alloc_bytes) + out->cf_size = alloc_bytes; + + // Trim cf_size to first invalid range + if (out->cf_size > max_size) + out->cf_size = max_size; + + return out; +} + +/* + * Read contents of a symbolic link. + */ +int +hfs_vnop_readlink( struct vnode *vp, void* data, size_t dataSize, size_t *actuallyRead ) +{ + struct cnode *cp; + struct filefork *fp; + int error; + + if (!vnode_islnk(vp)) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_readlink: Received node is not a symlink\n"); + return (EINVAL); + } + + if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) + return (error); + cp = VTOC(vp); + fp = VTOF(vp); + + /* Zero length sym links are not allowed */ + if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_readlink: Symlink is with invalid content length\n"); + error = EINVAL; + goto exit; + } + + if ( dataSize < (size_t)fp->ff_size+1 ) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_readlink: Received buffer size is too small\n"); + error = ENOBUFS; + goto exit; + } + + /* Cache the path so we don't waste buffer cache resources */ + if (fp->ff_symlinkptr == NULL) { + GenericLFBufPtr bp = NULL; + + fp->ff_symlinkptr = hfs_mallocz(fp->ff_size); + if ( fp->ff_symlinkptr == NULL ) + { + error = ENOMEM; + goto exit; + } + + bp = lf_hfs_generic_buf_allocate( vp, 0, roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size), 0); + error = lf_hfs_generic_buf_read(bp); + if (error) { + lf_hfs_generic_buf_release(bp); + if (fp->ff_symlinkptr) { + hfs_free(fp->ff_symlinkptr); + fp->ff_symlinkptr = NULL; + } + goto exit; + } + bcopy(bp->pvData, fp->ff_symlinkptr, (size_t)fp->ff_size); + lf_hfs_generic_buf_release(bp); + } + + memcpy(data, fp->ff_symlinkptr, fp->ff_size); + ((uint8_t*)data)[fp->ff_size] = 0; + *actuallyRead = fp->ff_size+1; + +exit: + hfs_unlock(cp); + return (error); +} + +/* + * Make a directory. + */ +int +hfs_vnop_mkdir(vnode_t a_dvp, vnode_t *a_vpp, struct componentname *a_cnp, UVFSFileAttributes* a_vap) +{ + int iErr = 0; + + /***** HACK ALERT ********/ + a_cnp->cn_flags |= MAKEENTRY; + a_vap->fa_type = UVFS_FA_TYPE_DIR; + + iErr = hfs_makenode(a_dvp, a_vpp, a_cnp, a_vap); + +#if HFS_CRASH_TEST + CRASH_ABORT(CRASH_ABORT_MAKE_DIR, a_dvp->mount, NULL); +#endif + + return(iErr); +} + +/* + * Create a regular file. + */ +int +hfs_vnop_create(vnode_t a_dvp, vnode_t *a_vpp, struct componentname *a_cnp, UVFSFileAttributes* a_vap) +{ + a_vap->fa_type = UVFS_FA_TYPE_FILE; + return hfs_makenode(a_dvp, a_vpp, a_cnp, a_vap); +} + +/* + * Allocate a new node + */ +int +hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, UVFSFileAttributes *psGivenAttr) +{ + struct hfsmount *hfsmp = VTOHFS(dvp); + struct cnode *dcp = NULL; + struct cnode *cp = NULL; + struct vnode *tvp = NULL; + enum vtype vnodetype = UVFSTOV(psGivenAttr->fa_type); + mode_t mode = MAKEIMODE(vnodetype); + struct cat_attr attr = {0}; + int lockflags; + int error, started_tr = 0; + + int newvnode_flags = 0; + u_int32_t gnv_flags = 0; + int nocache = 0; + struct cat_desc out_desc = {0}; + out_desc.cd_flags = 0; + out_desc.cd_nameptr = NULL; + + if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) + return (error); + dcp = VTOC(dvp); + + /* Don't allow creation of new entries in open-unlinked directories */ + if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) + { + error = ENOENT; + goto exit; + } + + if ( !(psGivenAttr->fa_validmask & UVFS_FA_VALID_MODE) && (vnodetype != VDIR) ) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_makenode: Invalid mode or type[%#llx, %d]", + (unsigned long long)psGivenAttr->fa_validmask, psGivenAttr->fa_type); + error = EINVAL; + goto exit; + } + + if ( ( psGivenAttr->fa_validmask & READ_ONLY_FA_FIELDS ) /*|| ( psGivenAttr->fa_validmask & ~VALID_IN_ATTR_MASK )*/ ) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_makenode: Setting readonly fields or invalid mask[%#llx, %#llx]", (unsigned long long)psGivenAttr->fa_validmask, (unsigned long long)READ_ONLY_FA_FIELDS); + error = EINVAL; + goto exit; + } + + dcp->c_flag |= C_DIR_MODIFICATION; + + *vpp = NULL; + + /* Check if were out of usable disk space. */ + if (hfs_freeblks(hfsmp, 1) == 0) + { + error = ENOSPC; + goto exit; + } + + struct timeval tv; + microtime(&tv); + + /* Setup the default attributes */ + if ( psGivenAttr->fa_validmask & UVFS_FA_VALID_MODE ) + { + mode = (mode & ~ALLPERMS) | (psGivenAttr->fa_mode & ALLPERMS); + } + + attr.ca_mode = mode; + attr.ca_linkcount = 1; + attr.ca_itime = tv.tv_sec; + attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime; + attr.ca_atimeondisk = attr.ca_atime; + + /* + * HFS+ only: all files get ThreadExists + */ + if (vnodetype == VDIR) + { + if (hfsmp->hfs_flags & HFS_FOLDERCOUNT) + { + attr.ca_recflags = kHFSHasFolderCountMask; + } + } + else + { + attr.ca_recflags = kHFSThreadExistsMask; + } + + /* + * Add the date added to the item. See above, as + * all of the dates are set to the itime. + */ + hfs_write_dateadded (&attr, attr.ca_atime); + + /* Initialize the gen counter to 1 */ + hfs_write_gencount(&attr, (uint32_t)1); + + if ( psGivenAttr->fa_validmask & UVFS_FA_VALID_UID ) + { + attr.ca_uid = psGivenAttr->fa_uid; + } + + if ( psGivenAttr->fa_validmask & UVFS_FA_VALID_GID ) + { + attr.ca_gid = psGivenAttr->fa_gid; + } + + /* Tag symlinks with a type and creator. */ + if (vnodetype == VLNK) + { + struct FndrFileInfo *fip; + + fip = (struct FndrFileInfo *)&attr.ca_finderinfo; + fip->fdType = SWAP_BE32(kSymLinkFileType); + fip->fdCreator = SWAP_BE32(kSymLinkCreator); + } + + /* Setup the descriptor */ + struct cat_desc in_desc ={0}; + in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; + in_desc.cd_namelen = cnp->cn_namelen; + in_desc.cd_parentcnid = dcp->c_fileid; + in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0; + in_desc.cd_hint = dcp->c_childhint; + in_desc.cd_encoding = 0; + + if ((error = hfs_start_transaction(hfsmp)) != 0) + { + goto exit; + } + started_tr = 1; + + // have to also lock the attribute file because cat_create() needs + // to check that any fileID it wants to use does not have orphaned + // attributes in it. + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK); + cnid_t new_id = 0; + + /* Reserve some space in the Catalog file. */ + error = cat_preflight(hfsmp, CAT_CREATE, NULL); + if (error != 0) + { + hfs_systemfile_unlock(hfsmp, lockflags); + goto exit; + } + + error = cat_acquire_cnid(hfsmp, &new_id); + if (error != 0) + { + hfs_systemfile_unlock (hfsmp, lockflags); + goto exit; + } + + error = cat_create(hfsmp, new_id, &in_desc, &attr, &out_desc); + if (error == 0) { + /* Update the parent directory */ + dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */ + dcp->c_entries++; + + if (vnodetype == VDIR) + { + INC_FOLDERCOUNT(hfsmp, dcp->c_attr); + } + dcp->c_dirchangecnt++; + hfs_incr_gencount(dcp); + + dcp->c_touch_chgtime = dcp->c_touch_modtime = true; + dcp->c_flag |= C_MODIFIED; + + hfs_update(dcp->c_vp, 0); + } + hfs_systemfile_unlock(hfsmp, lockflags); + if (error) + goto exit; + + uint32_t txn = hfsmp->jnl ? journal_current_txn(hfsmp->jnl) : 0; + + hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE, (dcp->c_cnid == kHFSRootFolderID)); + + // XXXdbg + // have to end the transaction here before we call hfs_getnewvnode() + // because that can cause us to try and reclaim a vnode on a different + // file system which could cause us to start a transaction which can + // deadlock with someone on that other file system (since we could be + // holding two transaction locks as well as various vnodes and we did + // not obtain the locks on them in the proper order). + // + // NOTE: this means that if the quota check fails or we have to update + // the change time on a block-special device that those changes + // will happen as part of independent transactions. + // + if (started_tr) + { + hfs_end_transaction(hfsmp); + started_tr = 0; + } + + gnv_flags |= GNV_CREATE; + if (nocache) + { + gnv_flags |= GNV_NOCACHE; + } + + /* + * Create a vnode for the object just created. + * + * NOTE: Maintaining the cnode lock on the parent directory is important, + * as it prevents race conditions where other threads want to look up entries + * in the directory and/or add things as we are in the process of creating + * the vnode below. However, this has the potential for causing a + * double lock panic when dealing with shadow files on a HFS boot partition. + * The panic could occur if we are not cleaning up after ourselves properly + * when done with a shadow file or in the error cases. The error would occur if we + * try to create a new vnode, and then end up reclaiming another shadow vnode to + * create the new one. However, if everything is working properly, this should + * be a non-issue as we would never enter that reclaim codepath. + * + * The cnode is locked on successful return. + */ + error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, gnv_flags, &attr, + NULL, &tvp, &newvnode_flags); + if (error) + goto exit; + + cp = VTOC(tvp); + + cp->c_update_txn = txn; + + *vpp = tvp; + +exit: + cat_releasedesc(&out_desc); + + //Update Directory version + dvp->sExtraData.sDirData.uDirVersion++; + + /* + * Make sure we release cnode lock on dcp. + */ + if (dcp) + { + dcp->c_flag &= ~C_DIR_MODIFICATION; + + //TBD - We have wakeup here but can't see anyone who's msleeping on c_flag... + //wakeup((caddr_t)&dcp->c_flag); + hfs_unlock(dcp); + } + + if (cp != NULL) { + hfs_unlock(cp); + } + if (started_tr) { + hfs_end_transaction(hfsmp); + } + + return (error); +} + +/* + * Create a symbolic link. + */ +int +hfs_vnop_symlink(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, char* symlink_content, UVFSFileAttributes *attrp) +{ + struct vnode *vp = NULL; + struct cnode *cp = NULL; + struct hfsmount *hfsmp; + struct filefork *fp; + GenericLFBufPtr bp = NULL; + char *datap; + int started_tr = 0; + uint64_t len; + int error; + + hfsmp = VTOHFS(dvp); + + len = strlen(symlink_content); + if (len > MAXPATHLEN) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_symlink: Received symlink content too long\n"); + return (ENAMETOOLONG); + } + + if (len == 0 ) + { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_symlink: Received zero length symlink content\n"); + return (EINVAL); + } + + /* Check for free space */ + if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) { + return (ENOSPC); + } + + attrp->fa_type = UVFS_FA_TYPE_SYMLINK; + attrp->fa_mode |= S_IFLNK; + attrp->fa_validmask |= UVFS_FA_VALID_MODE; + + /* Create the vnode */ + if ((error = hfs_makenode(dvp, vpp, cnp, attrp))) { + goto out; + } + vp = *vpp; + if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) { + goto out; + } + cp = VTOC(vp); + fp = VTOF(vp); + + if (cp->c_flag & (C_NOEXISTS | C_DELETED)) { + goto out; + } + +#if QUOTA + (void)hfs_getinoquota(cp); +#endif /* QUOTA */ + + if ((error = hfs_start_transaction(hfsmp)) != 0) { + goto out; + } + started_tr = 1; + + /* + * Allocate space for the link. + * + * Since we're already inside a transaction, + * + * Don't need truncate lock since a symlink is treated as a system file. + */ + error = hfs_truncate(vp, len, IO_NOZEROFILL, 0); + + /* On errors, remove the symlink file */ + if (error) { + /* + * End the transaction so we don't re-take the cnode lock + * below while inside a transaction (lock order violation). + */ + hfs_end_transaction(hfsmp); + /* hfs_removefile() requires holding the truncate lock */ + hfs_unlock(cp); + hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS); + + if (hfs_start_transaction(hfsmp) != 0) { + started_tr = 0; + hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); + goto out; + } + + (void) hfs_removefile(dvp, vp, cnp, 0, 0, 0, 0); + hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); + goto out; + } + + /* Write the sym-link to disk */ + bp = lf_hfs_generic_buf_allocate( vp, 0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size), 0); + error = lf_hfs_generic_buf_read( bp ); + if ( error != 0 ) + { + goto out; + } + + if (hfsmp->jnl) + { + journal_modify_block_start(hfsmp->jnl, bp); + } + datap = bp->pvData; + assert(bp->uDataSize >= len); + bzero(datap, bp->uDataSize); + bcopy(symlink_content, datap, len); + if (hfsmp->jnl) + { + journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL); + bp = NULL; // block will be released by the journal + } + else + { + error = lf_hfs_generic_buf_write(bp); + if ( error != 0 ) + { + goto out; + } + } +out: + if (started_tr) + hfs_end_transaction(hfsmp); + + if ((cp != NULL) && (vp != NULL)) { + hfs_unlock(cp); + } + if (error) { + if (vp) { + // vnode_put(vp); + } + *vpp = NULL; + } + + if ( bp ) { + lf_hfs_generic_buf_release(bp); + } + + hfs_flush(hfsmp, HFS_FLUSH_FULL); + + return (error); +} + +/* + * Rename a cnode. + * + * The VFS layer guarantees that: + * - source and destination will either both be directories, or + * both not be directories. + * - all the vnodes are from the same file system + * + * When the target is a directory, HFS must ensure that its empty. + * + * Note that this function requires up to 6 vnodes in order to work properly + * if it is operating on files (and not on directories). This is because only + * files can have resource forks, and we now require iocounts to be held on the + * vnodes corresponding to the resource forks (if applicable) as well as + * the files or directories undergoing rename. The problem with not holding + * iocounts on the resource fork vnodes is that it can lead to a deadlock + * situation: The rsrc fork of the source file may be recycled and reclaimed + * in order to provide a vnode for the destination file's rsrc fork. Since + * data and rsrc forks share the same cnode, we'd eventually try to lock the + * source file's cnode in order to sync its rsrc fork to disk, but it's already + * been locked. By taking the rsrc fork vnodes up front we ensure that they + * cannot be recycled, and that the situation mentioned above cannot happen. + */ +int +hfs_vnop_renamex(struct vnode *fdvp,struct vnode *fvp, struct componentname *fcnp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp) +{ + + /* + * Note that we only need locals for the target/destination's + * resource fork vnode (and only if necessary). We don't care if the + * source has a resource fork vnode or not. + */ + struct vnode *tvp_rsrc = NULL; + struct cnode *tcp = NULL; + struct cnode *error_cnode; + struct cat_desc from_desc; + + struct hfsmount *hfsmp = VTOHFS(tdvp); + int tvp_deleted = 0; + int started_tr = 0, got_cookie = 0; + int took_trunc_lock = 0; + int lockflags; + int error; + + int rename_exclusive = 0; + +retry: + /* When tvp exists, take the truncate lock for hfs_removefile(). */ + if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) { + hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + took_trunc_lock = 1; + } + + if (tvp && VTOC(tvp) == NULL) + return (EINVAL); + + error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL, HFS_EXCLUSIVE_LOCK, &error_cnode); + if (error) + { + if (took_trunc_lock) + { + hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT); + took_trunc_lock = 0; + } + + /* + * We hit an error path. If we were trying to re-acquire the locks + * after coming through here once, we might have already obtained + * an iocount on tvp's resource fork vnode. Drop that before dealing + * with the failure. Note this is safe -- since we are in an + * error handling path, we can't be holding the cnode locks. + */ + if (tvp_rsrc && tcp) + { + hfs_chash_lower_OpenLookupCounter(tcp); + hfs_free(tvp_rsrc); + tvp_rsrc = NULL; + } + + /* + * tvp might no longer exist. If the cause of the lock failure + * was tvp, then we can try again with tvp/tcp set to NULL. + * This is ok because the vfs syscall will vnode_put the vnodes + * after we return from hfs_vnop_rename. + */ + if ((error == ENOENT) && (tvp != NULL) && (error_cnode == VTOC(tvp))) { + tcp = NULL; + tvp = NULL; + goto retry; + } + + /* If we want to reintroduce notifications for failed renames, this + is the place to do it. */ + + return (error); + } + + struct cnode* fdcp = VTOC(fdvp); + struct cnode* fcp = VTOC(fvp); + struct cnode* tdcp = VTOC(tdvp); + tcp = tvp ? VTOC(tvp) : NULL; + + /* + * If caller requested an exclusive rename (VFS_RENAME_EXCL) and 'tcp' exists + * then we must fail the operation. + */ + if (tcp && rename_exclusive) + { + error = EEXIST; + goto out; + } + + /* + * Acquire iocounts on the destination's resource fork vnode + * if necessary. If dst/src are files and the dst has a resource + * fork vnode, then we need to try and acquire an iocount on the rsrc vnode. + * If it does not exist, then we don't care and can skip it. + */ + if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) + { + if ((tvp) && (tcp->c_rsrc_vp) && (tvp_rsrc == NULL)) + { + tvp_rsrc = tcp->c_rsrc_vp; + hfs_chash_raise_OpenLookupCounter(tcp); + + /* Unlock everything to acquire iocount on this rsrc vnode */ + if (took_trunc_lock) + { + hfs_unlock_truncate (VTOC(tvp), HFS_LOCK_DEFAULT); + took_trunc_lock = 0; + } + + + hfs_unlockfour(fdcp, fcp, tdcp, tcp); + + goto retry; + } + } + + /* Ensure we didn't race src or dst parent directories with rmdir. */ + if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) + { + error = ENOENT; + goto out; + } + + if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) + { + error = ENOENT; + goto out; + } + + + /* Check for a race against unlink. The hfs_valid_cnode checks validate + * the parent/child relationship with fdcp and tdcp, as well as the + * component name of the target cnodes. + */ + if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid, NULL, &error)) + { + error = ENOENT; + goto out; + } + + if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid, NULL, &error))) + { + // + // hmm, the destination vnode isn't valid any more. + // in this case we can just drop him and pretend he + // never existed in the first place. + // + if (took_trunc_lock) + { + hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT); + took_trunc_lock = 0; + } + error = 0; + + hfs_unlockfour(fdcp, fcp, tdcp, tcp); + + tcp = NULL; + tvp = NULL; + + // retry the locking with tvp null'ed out + goto retry; + } + + fdcp->c_flag |= C_DIR_MODIFICATION; + if (fdvp != tdvp) + { + tdcp->c_flag |= C_DIR_MODIFICATION; + } + + /* + * Disallow renaming of a directory hard link if the source and + * destination parent directories are different, or a directory whose + * descendant is a directory hard link and the one of the ancestors + * of the destination directory is a directory hard link. + */ + if (vnode_isdir(fvp) && (fdvp != tdvp)) + { + if (fcp->c_flag & C_HARDLINK) { + error = EPERM; + goto out; + } + if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) + { + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) + { + error = EPERM; + hfs_systemfile_unlock(hfsmp, lockflags); + goto out; + } + hfs_systemfile_unlock(hfsmp, lockflags); + } + } + + /* + * The following edge case is caught here: + * (to cannot be a descendent of from) + * + * o fdvp + * / + * / + * o fvp + * \ + * \ + * o tdvp + * / + * / + * o tvp + */ + if (tdcp->c_parentcnid == fcp->c_fileid) + { + error = EINVAL; + goto out; + } + + /* + * The following two edge cases are caught here: + * (note tvp is not empty) + * + * o tdvp o tdvp + * / / + * / / + * o tvp tvp o fdvp + * \ \ + * \ \ + * o fdvp o fvp + * / + * / + * o fvp + */ + if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) + { + error = ENOTEMPTY; + goto out; + } + + /* + * The following edge case is caught here: + * (the from child and parent are the same) + * + * o tdvp + * / + * / + * fdvp o fvp + */ + if (fdvp == fvp) + { + error = EINVAL; + goto out; + } + + /* + * Make sure "from" vnode and its parent are changeable. + */ + if ((fcp->c_bsdflags & (SF_IMMUTABLE | UF_IMMUTABLE | UF_APPEND | SF_APPEND)) || (fdcp->c_bsdflags & (UF_APPEND | SF_APPEND))) + { + error = EPERM; + goto out; + } + + /* Don't allow modification of the journal or journal_info_block */ + if (hfs_is_journal_file(hfsmp, fcp) || (tcp && hfs_is_journal_file(hfsmp, tcp))) + { + error = EPERM; + goto out; + } + + struct cat_desc out_desc = {0}; + from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr; + from_desc.cd_namelen = fcnp->cn_namelen; + from_desc.cd_parentcnid = fdcp->c_fileid; + from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED); + from_desc.cd_cnid = fcp->c_cnid; + + struct cat_desc to_desc = {0}; + to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr; + to_desc.cd_namelen = tcnp->cn_namelen; + to_desc.cd_parentcnid = tdcp->c_fileid; + to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED); + to_desc.cd_cnid = fcp->c_cnid; + + if ((error = hfs_start_transaction(hfsmp)) != 0) + { + goto out; + } + started_tr = 1; + + /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask + * inside a journal transaction and without holding a cnode lock. + * As setting of this bit depends on being in journal transaction for + * concurrency, check this bit again after we start journal transaction for rename + * to ensure that this directory does not have any descendant that + * is a directory hard link. + */ + if (vnode_isdir(fvp) && (fdvp != tdvp)) + { + if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) + { + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) { + error = EPERM; + hfs_systemfile_unlock(hfsmp, lockflags); + goto out; + } + hfs_systemfile_unlock(hfsmp, lockflags); + } + } + + // if it's a hardlink then re-lookup the name so + // that we get the correct cnid in from_desc (see + // the comment in hfs_removefile for more details) + if (fcp->c_flag & C_HARDLINK) + { + struct cat_desc tmpdesc; + cnid_t real_cnid; + + tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr; + tmpdesc.cd_namelen = fcnp->cn_namelen; + tmpdesc.cd_parentcnid = fdcp->c_fileid; + tmpdesc.cd_hint = fdcp->c_childhint; + tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR; + tmpdesc.cd_encoding = 0; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + + if (cat_lookup(hfsmp, &tmpdesc, 0, NULL, NULL, NULL, &real_cnid) != 0) + { + hfs_systemfile_unlock(hfsmp, lockflags); + goto out; + } + + // use the real cnid instead of whatever happened to be there + from_desc.cd_cnid = real_cnid; + hfs_systemfile_unlock(hfsmp, lockflags); + } + + /* + * Reserve some space in the Catalog file. + */ + cat_cookie_t cookie; + if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie))) + { + goto out; + } + got_cookie = 1; + + /* + * If the destination exists then it may need to be removed. + * + * Due to HFS's locking system, we should always move the + * existing 'tvp' element to the hidden directory in hfs_vnop_rename. + * Because the VNOP_LOOKUP call enters and exits the filesystem independently + * of the actual vnop that it was trying to do (stat, link, readlink), + * we must release the cnode lock of that element during the interim to + * do MAC checking, vnode authorization, and other calls. In that time, + * the item can be deleted (or renamed over). However, only in the rename + * case is it inappropriate to return ENOENT from any of those calls. Either + * the call should return information about the old element (stale), or get + * information about the newer element that we are about to write in its place. + * + * HFS lookup has been modified to detect a rename and re-drive its + * lookup internally. For other calls that have already succeeded in + * their lookup call and are waiting to acquire the cnode lock in order + * to proceed, that cnode lock will not fail due to the cnode being marked + * C_NOEXISTS, because it won't have been marked as such. It will only + * have C_DELETED. Thus, they will simply act on the stale open-unlinked + * element. All future callers will get the new element. + * + * To implement this behavior, we pass the "only_unlink" argument to + * hfs_removefile and hfs_removedir. This will result in the vnode acting + * as though it is open-unlinked. Additionally, when we are done moving the + * element to the hidden directory, we vnode_recycle the target so that it is + * reclaimed as soon as possible. Reclaim and inactive are both + * capable of clearing out unused blocks for an open-unlinked file or dir. + */ + if (tvp) + { + /* + * When fvp matches tvp they could be case variants + * or matching hard links. + */ + if (fvp == tvp) + { + if (!(fcp->c_flag & C_HARDLINK)) + { + /* + * If they're not hardlinks, then fvp == tvp must mean we + * are using case-insensitive HFS because case-sensitive would + * not use the same vnode for both. In this case we just update + * the catalog for: a -> A + */ + goto skip_rm; /* simple case variant */ + + } + /* For all cases below, we must be using hardlinks */ + else if ((fdvp != tdvp) || (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) + { + /* + * If the parent directories are not the same, AND the two items + * are hardlinks, posix says to do nothing: + * dir1/fred <-> dir2/bob and the op was mv dir1/fred -> dir2/bob + * We just return 0 in this case. + * + * If case sensitivity is on, and we are using hardlinks + * then renaming is supposed to do nothing. + * dir1/fred <-> dir2/FRED, and op == mv dir1/fred -> dir2/FRED + */ + goto out; + + } + else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen, (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) + { + /* + * If we get here, then the following must be true: + * a) We are running case-insensitive HFS+. + * b) Both paths 'fvp' and 'tvp' are in the same parent directory. + * c) the two names are case-variants of each other. + * + * In this case, we are really only dealing with a single catalog record + * whose name is being updated. + * + * op is dir1/fred -> dir1/FRED + * + * We need to special case the name matching, because if + * dir1/fred <-> dir1/bob were the two links, and the + * op was dir1/fred -> dir1/bob + * That would fail/do nothing. + */ + goto skip_rm; /* case-variant hardlink in the same dir */ + } + else + { + goto out; /* matching hardlink, nothing to do */ + } + } + + + if (vnode_isdir(tvp)) + { + /* + * hfs_removedir will eventually call hfs_removefile on the directory + * we're working on, because only hfs_removefile does the renaming of the + * item to the hidden directory. The directory will stay around in the + * hidden directory with C_DELETED until it gets an inactive or a reclaim. + * That way, we can destroy all of the EAs as needed and allow new ones to be + * written. + */ + error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 0); + } + else + { + error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, 0); + + /* + * If the destination file had a resource fork vnode, then we need to get rid of + * its blocks when there are no more references to it. Because the call to + * hfs_removefile above always open-unlinks things, we need to force an inactive/reclaim + * on the resource fork vnode, in order to prevent block leaks. Otherwise, + * the resource fork vnode could prevent the data fork vnode from going out of scope + * because it holds a v_parent reference on it. So we mark it for termination + * with a call to vnode_recycle. hfs_vnop_reclaim has been modified so that it + * can clean up the blocks of open-unlinked files and resource forks. + * + * We can safely call vnode_recycle on the resource fork because we took an iocount + * reference on it at the beginning of the function. + */ + + if ((error == 0) && (tcp->c_flag & C_DELETED) && (tvp_rsrc)) + { + hfs_chash_lower_OpenLookupCounter(tcp); + hfs_free(tvp_rsrc); + } + } + + if (error) + { + goto out; + } + + tvp_deleted = 1; + + if ( ((VTOC(tvp)->c_flag & C_HARDLINK) == 0 ) || (VTOC(tvp)->c_linkcount == 0) ) + { + INVALIDATE_NODE(tvp); + } + + /* Mark 'tcp' as being deleted due to a rename */ + tcp->c_flag |= C_RENAMED; + + /* + * Aggressively mark tvp/tcp for termination to ensure that we recover all blocks + * as quickly as possible. + */ + //TBD -- Need to see what we are doing with recycle +// vnode_recycle(tvp); + } + +skip_rm: + /* + * All done with tvp and fvp. + * + * We also jump to this point if there was no destination observed during lookup and namei. + * However, because only iocounts are held at the VFS layer, there is nothing preventing a + * competing thread from racing us and creating a file or dir at the destination of this rename + * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename + * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the + * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled + * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY. + * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno + * will be swallowed and it will restart the operation. + */ + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); + error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc); + hfs_systemfile_unlock(hfsmp, lockflags); + + if (error) + { + if (error == EEXIST) + { + error = ERECYCLE; + } + goto out; + } + + /* Update cnode's catalog descriptor */ + replace_desc(fcp, &out_desc); + fcp->c_parentcnid = tdcp->c_fileid; + fcp->c_hint = 0; + + /* + * Now indicate this cnode needs to have date-added written to the + * finderinfo, but only if moving to a different directory, or if + * it doesn't already have it. + */ + if (fdvp != tdvp || !ISSET(fcp->c_attr.ca_recflags, kHFSHasDateAddedMask)) + fcp->c_flag |= C_NEEDS_DATEADDED; + + (void) hfs_update (fvp, 0); + + hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE, (fdcp->c_cnid == kHFSRootFolderID)); + hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE, (tdcp->c_cnid == kHFSRootFolderID)); + + /* Update both parent directories. */ + if (fdvp != tdvp) + { + if (vnode_isdir(fvp)) + { + /* If the source directory has directory hard link + * descendants, set the kHFSHasChildLinkBit in the + * destination parent hierarchy + */ + if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) && !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) + { + + tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask; + + error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid); + if (error) + { + LFHFS_LOG(LEVEL_DEBUG, "hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid); + error = 0; + } + } + INC_FOLDERCOUNT(hfsmp, tdcp->c_attr); + DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr); + } + tdcp->c_entries++; + tdcp->c_dirchangecnt++; + tdcp->c_flag |= C_MODIFIED; + hfs_incr_gencount(tdcp); + + if (fdcp->c_entries > 0) + fdcp->c_entries--; + fdcp->c_dirchangecnt++; + fdcp->c_flag |= C_MODIFIED; + fdcp->c_touch_chgtime = TRUE; + fdcp->c_touch_modtime = TRUE; + + if (ISSET(fcp->c_flag, C_HARDLINK)) + { + hfs_relorigin(fcp, fdcp->c_fileid); + if (fdcp->c_fileid != fdcp->c_cnid) + hfs_relorigin(fcp, fdcp->c_cnid); + } + + (void) hfs_update(fdvp, 0); + } + hfs_incr_gencount(fdcp); + + tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */ + tdcp->c_touch_chgtime = TRUE; + tdcp->c_touch_modtime = TRUE; + + (void) hfs_update(tdvp, 0); + + /* Update the vnode's name now that the rename has completed. */ + vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME)); + + /* + * At this point, we may have a resource fork vnode attached to the + * 'from' vnode. If it exists, we will want to update its name, because + * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc"). + * + * Note that the only thing we need to update here is the name attached to + * the vnode, since a resource fork vnode does not have a separate resource + * cnode -- it's still 'fcp'. + */ + if (fcp->c_rsrc_vp) + { + char* rsrc_path = NULL; + int len; + + /* Create a new temporary buffer that's going to hold the new name */ + rsrc_path = hfs_malloc(MAXPATHLEN); + len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC); + len = MIN(len, MAXPATHLEN); + + /* + * vnode_update_identity will do the following for us: + * 1) release reference on the existing rsrc vnode's name. + * 2) attach the new name to the resource vnode + * 3) update the vnode's vid + */ + vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE)); + + /* Free the memory associated with the resource fork's name */ + hfs_free(rsrc_path); + } +out: + if (got_cookie) + { + cat_postflight(hfsmp, &cookie); + } + if (started_tr) + { + hfs_end_transaction(hfsmp); + } + + fdvp->sExtraData.sDirData.uDirVersion++; + fdcp->c_flag &= ~C_DIR_MODIFICATION; + //TBD - We have wakeup here but can't see anyone who's msleeping on c_flag... +// wakeup((caddr_t)&fdcp->c_flag); + + if (fdvp != tdvp) + { + tdvp->sExtraData.sDirData.uDirVersion++; + tdcp->c_flag &= ~C_DIR_MODIFICATION; + //TBD - We have wakeup here but can't see anyone who's msleeping on c_flag... +// wakeup((caddr_t)&tdcp->c_flag); + + } + + /* Now vnode_put the resource forks vnodes if necessary */ + if (tvp_rsrc) + { + hfs_chash_lower_OpenLookupCounter(tcp); + hfs_free(tvp_rsrc); + tvp_rsrc = NULL; + } + + hfs_unlockfour(fdcp, fcp, tdcp, tcp); + + if (took_trunc_lock) + { + hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT); + } + + /* After tvp is removed the only acceptable error is EIO */ + if (error && tvp_deleted) + error = EIO; + + return (error); +} + +/* + * link vnode operation + * + * IN vnode_t a_vp; + * IN vnode_t a_tdvp; + * IN struct componentname *a_cnp; + * IN vfs_context_t a_context; + */ +int +hfs_vnop_link(vnode_t vp, vnode_t tdvp, struct componentname *cnp) +{ + struct hfsmount *hfsmp = VTOHFS(vp);; + struct cnode *cp = VTOC(vp);; + struct cnode *tdcp; + struct cnode *fdcp = NULL; + struct cat_desc todesc; + cnid_t parentcnid; + int lockflags = 0; + int intrans = 0; + enum vtype v_type = vp->sFSParams.vnfs_vtype; + int error, ret; + + /* + * For now, return ENOTSUP for a symlink target. This can happen + * for linkat(2) when called without AT_SYMLINK_FOLLOW. + */ + if (v_type == VLNK || v_type == VDIR) + return (EPERM ); + + /* Make sure our private directory exists. */ + if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0) { + return (ENOTSUP); + } + + if (hfs_freeblks(hfsmp, 0) == 0) { + return (ENOSPC); + } + + /* Lock the cnodes. */ + if ((error = hfs_lockpair(VTOC(tdvp), VTOC(vp), HFS_EXCLUSIVE_LOCK))) { + return (error); + } + + tdcp = VTOC(tdvp); + /* grab the parent CNID from originlist after grabbing cnode locks */ + parentcnid = hfs_currentparent(cp, /* have_lock: */ true); + + if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) { + error = ENOENT; + goto out; + } + + /* Check the source for errors: + * too many links, immutable, race with unlink + */ + if (cp->c_linkcount >= HFS_LINK_MAX) { + error = EMLINK; + goto out; + } + if (cp->c_bsdflags & (UF_IMMUTABLE | SF_IMMUTABLE | UF_APPEND | SF_APPEND)) { + error = EPERM; + goto out; + } + if (cp->c_flag & (C_NOEXISTS | C_DELETED)) { + error = ENOENT; + goto out; + } + + tdcp->c_flag |= C_DIR_MODIFICATION; + + if (hfs_start_transaction(hfsmp) != 0) { + error = EINVAL; + goto out; + } + intrans = 1; + + todesc.cd_flags = (v_type == VDIR) ? CD_ISDIR : 0; + todesc.cd_encoding = 0; + todesc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; + todesc.cd_namelen = cnp->cn_namelen; + todesc.cd_parentcnid = tdcp->c_fileid; + todesc.cd_hint = 0; + todesc.cd_cnid = 0; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); + + /* If destination exists then we lost a race with create. */ + if (cat_lookup(hfsmp, &todesc, 0, NULL, NULL, NULL, NULL) == 0) { + error = EEXIST; + goto out; + } + if (cp->c_flag & C_HARDLINK) { + struct cat_attr cattr; + + /* If inode is missing then we lost a race with unlink. */ + if ((cat_idlookup(hfsmp, cp->c_fileid, 0, 0, NULL, &cattr, NULL) != 0) || + (cattr.ca_fileid != cp->c_fileid)) { + error = ENOENT; + goto out; + } + } else { + cnid_t fileid; + + /* If source is missing then we lost a race with unlink. */ + if ((cat_lookup(hfsmp, &cp->c_desc, 0, NULL, NULL, NULL, &fileid) != 0) || + (fileid != cp->c_fileid)) { + error = ENOENT; + goto out; + } + } + /* + * All directory links must reside in an non-ARCHIVED hierarchy. + */ + if (v_type == VDIR) { + /* + * - Source parent and destination parent cannot match + * - A link is not permitted in the root directory + * - Parent of 'pointed at' directory is not the root directory + * - The 'pointed at' directory (source) is not an ancestor + * of the new directory hard link (destination). + * - No ancestor of the new directory hard link (destination) + * is a directory hard link. + */ + if ((parentcnid == tdcp->c_fileid) || + (tdcp->c_fileid == kHFSRootFolderID) || + (parentcnid == kHFSRootFolderID) || + cat_check_link_ancestry(hfsmp, tdcp->c_fileid, cp->c_fileid)) { + error = EPERM; /* abide by the rules, you did not */ + goto out; + } + } + hfs_systemfile_unlock(hfsmp, lockflags); + lockflags = 0; + + cp->c_linkcount++; + cp->c_flag |= C_MODIFIED; + cp->c_touch_chgtime = TRUE; + error = hfs_makelink(hfsmp, vp, cp, tdcp, cnp); + if (error) { + cp->c_linkcount--; + hfs_volupdate(hfsmp, VOL_UPDATE, 0); + } else { + /* Update the target directory and volume stats */ + tdcp->c_entries++; + if (v_type == VDIR) { + INC_FOLDERCOUNT(hfsmp, tdcp->c_attr); + tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask; + + /* Set kHFSHasChildLinkBit in the destination hierarchy */ + error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid); + if (error) { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_link: error updating destination parent chain for id=%u, vol=%s\n", tdcp->c_cnid, hfsmp->vcbVN); + } + } + tdcp->c_dirchangecnt++; + tdcp->c_flag |= C_MODIFIED; + hfs_incr_gencount(tdcp); + tdcp->c_touch_chgtime = TRUE; + tdcp->c_touch_modtime = TRUE; + + error = hfs_update(tdvp, 0); + if (error) { + if (error != EIO && error != ENXIO) { + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_link: error %d updating tdvp %p\n", error, tdvp); + error = EIO; + } + hfs_mark_inconsistent(hfsmp, HFS_OP_INCOMPLETE); + } + + hfs_volupdate(hfsmp, VOL_MKFILE, (tdcp->c_cnid == kHFSRootFolderID)); + } + + if (error == 0 && (ret = hfs_update(vp, 0)) != 0) { + if (ret != EIO && ret != ENXIO) + LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_link: error %d updating vp @ %p\n", ret, vp); + hfs_mark_inconsistent(hfsmp, HFS_OP_INCOMPLETE); + } + +out: + if (lockflags) { + hfs_systemfile_unlock(hfsmp, lockflags); + } + if (intrans) { + hfs_end_transaction(hfsmp); + } + + tdcp->c_flag &= ~C_DIR_MODIFICATION; + //TBD - We have wakeup here but can't see anyone who's msleeping on c_flag... +// wakeup((caddr_t)&tdcp->c_flag); + + if (fdcp) { + hfs_unlockfour(tdcp, cp, fdcp, NULL); + } else { + hfs_unlockpair(tdcp, cp); + } + + return (error); +} + +int hfs_removefile_callback(GenericLFBuf *psBuff, void *pvArgs) { + + journal_kill_block(((struct hfsmount *)pvArgs)->jnl, psBuff); + + return (0); +} + diff --git a/livefiles_hfs_plugin/lf_hfs_vnops.h b/livefiles_hfs_plugin/lf_hfs_vnops.h new file mode 100644 index 0000000..6c1d1f6 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_vnops.h @@ -0,0 +1,51 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_vnops.h + * livefiles_hfs + * + * Created by Or Haimovich on 20/3/18. + */ + +#ifndef lf_hfs_vnops_h +#define lf_hfs_vnops_h + +#include "lf_hfs_cnode.h" +#include "lf_hfs.h" +#include "lf_hfs_generic_buf.h" +#include + +/* VNOP_READDIR flags: */ +#define VNODE_READDIR_EXTENDED 0x0001 /* use extended directory entries */ +#define VNODE_READDIR_REQSEEKOFF 0x0002 /* requires seek offset (cookies) */ +#define VNODE_READDIR_SEEKOFF32 0x0004 /* seek offset values should fit in 32 bits */ +#define VNODE_READDIR_NAMEMAX 0x0008 /* For extended readdir, try to limit names to NAME_MAX bytes */ + +/* + * flags for VNOP_BLOCKMAP + */ +#define VNODE_READ 0x01 +#define VNODE_WRITE 0x02 +#define VNODE_BLOCKMAP_NO_TRACK 0x04 + +void replace_desc(struct cnode *cp, struct cat_desc *cdp); +int hfs_vnop_readdir(vnode_t vp, int *eofflag, int *numdirent, ReadDirBuff_s* psReadDirBuffer, uint64_t puCookie, int flags); +int hfs_vnop_readdirattr(vnode_t vp, int *eofflag, int *numdirent, ReadDirBuff_s* psReadDirBuffer, uint64_t puCookie); +int hfs_fsync(struct vnode *vp, int waitfor, hfs_fsync_mode_t fsyncmode); +int hfs_vnop_remove(struct vnode* psParentDir,struct vnode *psFileToRemove, struct componentname* psCN, int iFlags); +int hfs_vnop_rmdir(struct vnode *dvp, struct vnode *vp, struct componentname* psCN); +int hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int skip_reserve, int only_unlink); +int hfs_vnop_setattr(vnode_t vp, const UVFSFileAttributes *attr); +int hfs_update(struct vnode *vp, int options); +const struct cat_fork * hfs_prepare_fork_for_update(filefork_t *ff, const struct cat_fork *cf, struct cat_fork *cf_buf, uint32_t block_size); +int hfs_vnop_readlink(struct vnode *vp, void* data, size_t dataSize, size_t *actuallyRead); +int hfs_vnop_create(vnode_t a_dvp, vnode_t *a_vpp, struct componentname *a_cnp, UVFSFileAttributes* a_vap); +int hfs_vnop_mkdir(vnode_t a_dvp, vnode_t *a_vpp, struct componentname *a_cnp, UVFSFileAttributes* a_vap); +int hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, UVFSFileAttributes *psGivenAttr); +int hfs_vnop_symlink(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, char* symlink_content, UVFSFileAttributes *attrp); + +int hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int skip_reserve, int only_unlink); +int hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int flags, int skip_reserve, int allow_dirs, int only_unlink); +int hfs_vnop_renamex(struct vnode *fdvp,struct vnode *fvp, struct componentname *fcnp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp); +int hfs_vnop_link(vnode_t vp, vnode_t tdvp, struct componentname *cnp); +int hfs_removefile_callback(GenericLFBuf *psBuff, void *pvArgs); +#endif /* lf_hfs_vnops_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_volume_allocation.c b/livefiles_hfs_plugin/lf_hfs_volume_allocation.c new file mode 100644 index 0000000..c709205 --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_volume_allocation.c @@ -0,0 +1,4790 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. +* +* lf_hfs_volume_allocation.c +* livefiles_hfs +* +* Created by Or Haimovich on 22/3/18. +*/ + +#include + +#include "lf_hfs_volume_allocation.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_endian.h" +#include "lf_hfs_format.h" +#include "lf_hfs_locks.h" +#include "lf_hfs_rangelist.h" +#include "lf_hfs_raw_read_write.h" +#include "lf_hfs_readwrite_ops.h" +#include "lf_hfs_utils.h" +#include "lf_hfs_journal.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_vfsops.h" +#include "lf_hfs_generic_buf.h" + +#pragma clang diagnostic ignored "-Waddress-of-packed-member" + +enum { + /* + * HFSDBG_ALLOC_ENABLED: Log calls to BlockAllocate and + * BlockDeallocate, including the internal BlockAllocateXxx + * routines so we can see how an allocation was satisfied. + * + * HFSDBG_EXT_CACHE_ENABLED: Log routines that read or write the + * free extent cache. + * + * HFSDBG_UNMAP_ENABLED: Log events involving the trim list. + * + * HFSDBG_BITMAP_ENABLED: Log accesses to the volume bitmap (setting + * or clearing bits, scanning the bitmap). + */ + HFSDBG_ALLOC_ENABLED = 1, + HFSDBG_EXT_CACHE_ENABLED = 2, + HFSDBG_UNMAP_ENABLED = 4, + HFSDBG_BITMAP_ENABLED = 8 +}; + +enum { + kBytesPerWord = 4, + kBitsPerByte = 8, + kBitsPerWord = 32, + + kBitsWithinWordMask = kBitsPerWord-1 +}; + +#define kLowBitInWordMask 0x00000001ul +#define kHighBitInWordMask 0x80000000ul +#define kAllBitsSetInWord 0xFFFFFFFFul + +#define HFS_MIN_SUMMARY_BLOCKSIZE 4096 + +#define ALLOC_DEBUG 0 + +static OSErr ReadBitmapBlock( + ExtendedVCB *vcb, + u_int32_t bit, + u_int32_t **buffer, + GenericLFBufPtr *blockRef, + hfs_block_alloc_flags_t flags); + +static OSErr ReleaseBitmapBlock( + ExtendedVCB *vcb, + GenericLFBufPtr blockRef, + Boolean dirty); + +static OSErr hfs_block_alloc_int(hfsmount_t *hfsmp, + HFSPlusExtentDescriptor *extent, + hfs_block_alloc_flags_t flags, + hfs_alloc_extra_args_t *ap); + +static OSErr BlockFindAny( + ExtendedVCB *vcb, + u_int32_t startingBlock, + u_int32_t endingBlock, + u_int32_t maxBlocks, + hfs_block_alloc_flags_t flags, + Boolean trustSummary, + u_int32_t *actualStartBlock, + u_int32_t *actualNumBlocks); + +static OSErr BlockFindAnyBitmap( + ExtendedVCB *vcb, + u_int32_t startingBlock, + u_int32_t endingBlock, + u_int32_t maxBlocks, + hfs_block_alloc_flags_t flags, + u_int32_t *actualStartBlock, + u_int32_t *actualNumBlocks); + +static OSErr BlockFindContig( + ExtendedVCB *vcb, + u_int32_t startingBlock, + u_int32_t minBlocks, + u_int32_t maxBlocks, + hfs_block_alloc_flags_t flags, + u_int32_t *actualStartBlock, + u_int32_t *actualNumBlocks); + +static OSErr BlockFindContiguous( + ExtendedVCB *vcb, + u_int32_t startingBlock, + u_int32_t endingBlock, + u_int32_t minBlocks, + u_int32_t maxBlocks, + Boolean useMetaZone, + Boolean trustSummary, + u_int32_t *actualStartBlock, + u_int32_t *actualNumBlocks, + hfs_block_alloc_flags_t flags); + +static OSErr BlockFindKnown( + ExtendedVCB *vcb, + u_int32_t maxBlocks, + u_int32_t *actualStartBlock, + u_int32_t *actualNumBlocks); + +static OSErr hfs_alloc_try_hard(hfsmount_t *hfsmp, + HFSPlusExtentDescriptor *extent, + uint32_t max_blocks, + hfs_block_alloc_flags_t flags); + +static OSErr BlockMarkAllocatedInternal ( + ExtendedVCB *vcb, + u_int32_t startingBlock, + u_int32_t numBlocks, + hfs_block_alloc_flags_t flags); + +static OSErr BlockMarkFreeInternal( + ExtendedVCB *vcb, + u_int32_t startingBlock, + u_int32_t numBlocks, + Boolean do_validate); + + +static OSErr ReadBitmapRange (struct hfsmount *hfsmp, uint32_t offset, uint32_t iosize, + uint32_t **buffer, GenericLFBuf **blockRef); + +static OSErr ReleaseScanBitmapRange( GenericLFBufPtr bp ); + +static int hfs_track_unmap_blocks (struct hfsmount *hfsmp, u_int32_t offset, + u_int32_t numBlocks, struct jnl_trim_list *list); + +static int hfs_alloc_scan_range(struct hfsmount *hfsmp, + u_int32_t startbit, + u_int32_t *bitToScan, + struct jnl_trim_list *list); + +static int hfs_scan_range_size (struct hfsmount* hfsmp, uint32_t start, uint32_t *iosize); +/* Bitmap Re-use Detection */ +static inline int extents_overlap (uint32_t start1, uint32_t len1, + uint32_t start2, uint32_t len2) { + return !( ((start1 + len1) <= start2) || ((start2 + len2) <= start1) ); +} + +/* Summary Table Functions */ +static int hfs_set_summary (struct hfsmount *hfsmp, uint32_t summarybit, uint32_t inuse); +static int hfs_get_summary_index (struct hfsmount *hfsmp, uint32_t block, uint32_t *index); +static int hfs_find_summary_free (struct hfsmount *hfsmp, uint32_t block, uint32_t *newblock); +static int hfs_get_summary_allocblock (struct hfsmount *hfsmp, uint32_t summarybit, uint32_t *alloc); +static int hfs_release_summary (struct hfsmount *hfsmp, uint32_t start, uint32_t length); +static int hfs_check_summary (struct hfsmount *hfsmp, uint32_t start, uint32_t *freeblocks); + +/* Used in external mount code to initialize the summary table */ +int hfs_init_summary (struct hfsmount *hfsmp); + +#if ALLOC_DEBUG +void hfs_validate_summary (struct hfsmount *hfsmp); +#endif + + +/* Functions for manipulating free extent cache */ +static void remove_free_extent_cache(struct hfsmount *hfsmp, u_int32_t startBlock, u_int32_t blockCount); +static Boolean add_free_extent_cache(struct hfsmount *hfsmp, u_int32_t startBlock, u_int32_t blockCount); +static void sanity_check_free_ext(struct hfsmount *hfsmp, int check_allocated); + +static void hfs_release_reserved(hfsmount_t *hfsmp, struct rl_entry *range, int list); + + +#if ALLOC_DEBUG +/* + * Validation Routine to verify that the TRIM list maintained by the journal + * is in good shape relative to what we think the bitmap should have. We should + * never encounter allocated blocks in the TRIM list, so if we ever encounter them, + * we panic. + */ +int trim_validate_bitmap (struct hfsmount *hfsmp); +int trim_validate_bitmap (struct hfsmount *hfsmp) { + u_int64_t blockno_offset; + u_int64_t numblocks; + int i; + int count; + u_int32_t startblk; + u_int32_t blks; + int err = 0; + uint32_t alloccount = 0; + + if (hfsmp->jnl) { + struct journal *jnl = (struct journal*)hfsmp->jnl; + if (jnl->active_tr) { + struct jnl_trim_list *trim = &(jnl->active_tr->trim); + count = trim->extent_count; + for (i = 0; i < count; i++) { + blockno_offset = trim->extents[i].offset; + blockno_offset = blockno_offset - (uint64_t)hfsmp->hfsPlusIOPosOffset; + blockno_offset = blockno_offset / hfsmp->blockSize; + numblocks = trim->extents[i].length / hfsmp->blockSize; + + startblk = (u_int32_t)blockno_offset; + blks = (u_int32_t) numblocks; + err = hfs_count_allocated (hfsmp, startblk, blks, &alloccount); + + if (err == 0 && alloccount != 0) { + LFHFS_LOG(LEVEL_ERROR, "trim_validate_bitmap: %d blocks @ ABN %d are allocated!", alloccount, startblk); + hfs_assert(0); + } + } + } + } + return 0; +} + +#endif + +/* + ;________________________________________________________________________________ + ; + ; Routine: hfs_issue_unmap + ; + ; Function: Issue a DKIOCUNMAP for all blocks currently tracked by the jnl_trim_list + ; + ; Input Arguments: + ; hfsmp - The volume containing the allocation blocks. + ; list - The list of currently tracked trim ranges. + ;________________________________________________________________________________ + */ + +static int hfs_issue_unmap (struct hfsmount *hfsmp, struct jnl_trim_list *list) +{ + dk_unmap_t unmap; + int error = 0; + + if (list->extent_count > 0 && list->extents != NULL) { + bzero(&unmap, sizeof(unmap)); + unmap.extents = list->extents; + unmap.extentsCount = list->extent_count; + + /* Issue a TRIM and flush them out */ + error = ioctl(hfsmp->hfs_devvp->psFSRecord->iFD, DKIOCUNMAP, &unmap); + + bzero (list->extents, (list->allocated_count * sizeof(dk_extent_t))); + bzero (&unmap, sizeof(unmap)); + list->extent_count = 0; + } + + return error; +} + +/* + ;________________________________________________________________________________ + ; + ; Routine: hfs_track_unmap_blocks + ; + ; Function: Make note of a range of allocation blocks that should be + ; unmapped (trimmed). That is, the given range of blocks no + ; longer have useful content, and the device can unmap the + ; previous contents. For example, a solid state disk may reuse + ; the underlying storage for other blocks. + ; + ; This routine is only supported for journaled volumes. + ; + ; *****NOTE*****: + ; This function should *NOT* be used when the volume is fully + ; mounted. This function is intended to support a bitmap iteration + ; at mount time to fully inform the SSD driver of the state of all blocks + ; at mount time, and assumes that there is no allocation/deallocation + ; interference during its iteration., + ; + ; Input Arguments: + ; hfsmp - The volume containing the allocation blocks. + ; offset - The first allocation block of the extent being freed. + ; numBlocks - The number of allocation blocks of the extent being freed. + ; list - The list of currently tracked trim ranges. + ;________________________________________________________________________________ + */ +static int hfs_track_unmap_blocks (struct hfsmount *hfsmp, u_int32_t start, u_int32_t numBlocks, struct jnl_trim_list *list) { + u_int64_t offset; + u_int64_t length; + int error = 0; + + if ((hfsmp->jnl != NULL)) + { + if ((hfsmp->hfs_flags & HFS_UNMAP) && list->allocated_count && list->extents != NULL) + { + + int extent_no = list->extent_count; + offset = (u_int64_t) start * hfsmp->blockSize + (u_int64_t) hfsmp->hfsPlusIOPosOffset; + length = (u_int64_t) numBlocks * hfsmp->blockSize; + + list->extents[extent_no].offset = offset; + list->extents[extent_no].length = length; + list->extent_count++; + if (list->extent_count == list->allocated_count) { + error = hfs_issue_unmap (hfsmp, list); + } + } + } + + return error; +} + +/* + ;________________________________________________________________________________ + ; + ; Routine: hfs_trim_callback + ; + ; Function: This function is called when a transaction that freed extents + ; (via hfs_unmap_free_extent/journal_trim_add_extent) has been + ; written to the on-disk journal. This routine will add those + ; extents to the free extent cache so that they can be reused. + ; + ; CAUTION: This routine is called while the journal's trim lock + ; is held shared, so that no other thread can reuse any portion + ; of those extents. We must be very careful about which locks + ; we take from within this callback, to avoid deadlock. The + ; call to add_free_extent_cache will end up taking the cache's + ; lock (just long enough to add these extents to the cache). + ; + ; CAUTION: If the journal becomes invalid (eg., due to an I/O + ; error when trying to write to the journal), this callback + ; will stop getting called, even if extents got freed before + ; the journal became invalid! + ; + ; Input Arguments: + ; arg - The hfsmount of the volume containing the extents. + ; extent_count - The number of extents freed in the transaction. + ; extents - An array of extents (byte ranges) that were freed. + ;________________________________________________________________________________ + */ + +void +hfs_trim_callback(void *arg, uint32_t extent_count, const dk_extent_t *extents) +{ + uint32_t i; + uint32_t startBlock, numBlocks; + struct hfsmount *hfsmp = arg; + + for (i=0; ihfsPlusIOPosOffset) / hfsmp->blockSize); + numBlocks = (uint32_t)(extents[i].length / hfsmp->blockSize); + (void) add_free_extent_cache(hfsmp, startBlock, numBlocks); + } +} + + +/* + ;________________________________________________________________________________ + ; + ; Routine: ScanUnmapBlocks + ; + ; Function: Traverse the bitmap, and potentially issue DKIOCUNMAPs to the underlying + ; device as needed so that the underlying disk device is as + ; up-to-date as possible with which blocks are unmapped. + ; Additionally build up the summary table as needed. + ; + ; This function reads the bitmap in large block size + ; (up to 1MB) unlike the runtime which reads the bitmap + ; in 4K block size. So if this function is being called + ; after the volume is mounted and actively modified, the + ; caller needs to invalidate all of the existing buffers + ; associated with the bitmap vnode before calling this + ; function. If the buffers are not invalidated, it can + ; cause buf_t collision and potential data corruption. + ; + ; Input Arguments: + ; hfsmp - The volume containing the allocation blocks. + ;________________________________________________________________________________ + */ + +u_int32_t ScanUnmapBlocks (struct hfsmount *hfsmp) +{ + u_int32_t blocks_scanned = 0; + int error = 0; + struct jnl_trim_list trimlist; + + /* + *struct jnl_trim_list { + uint32_t allocated_count; + uint32_t extent_count; + dk_extent_t *extents; + }; + */ + bzero (&trimlist, sizeof(trimlist)); + + /* + * Any trim related work should be tied to whether the underlying + * storage media supports UNMAP, as any solid state device would + * on desktop or embedded. + * + * We do this because we may want to scan the full bitmap on + * desktop for spinning media for the purposes of building up the + * summary table. + * + * We also avoid sending TRIMs down to the underlying media if the + * mount is read-only. + */ + + if ((hfsmp->hfs_flags & HFS_UNMAP) && + ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0)) { + /* If the underlying device supports unmap and the mount is read-write, initialize */ + int alloc_count = ((u_int32_t)PAGE_SIZE) / sizeof(dk_extent_t); + void *extents = hfs_malloc(alloc_count * sizeof(dk_extent_t)); + trimlist.extents = (dk_extent_t*)extents; + trimlist.allocated_count = alloc_count; + trimlist.extent_count = 0; + } + + while ((blocks_scanned < hfsmp->totalBlocks) && (error == 0)){ + + error = hfs_alloc_scan_range (hfsmp, blocks_scanned, &blocks_scanned, &trimlist); + + if (error) { + LFHFS_LOG(LEVEL_DEBUG, "ScanUnmapBlocks: bitmap scan range error: %d on vol=%s\n", error, hfsmp->vcbVN); + break; + } + } + + if ((hfsmp->hfs_flags & HFS_UNMAP) && ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0)) { + if (error == 0) { + hfs_issue_unmap(hfsmp, &trimlist); + } + if (trimlist.extents) { + hfs_free(trimlist.extents); + } + } + + /* + * This is in an #if block because hfs_validate_summary prototype and function body + * will only show up if ALLOC_DEBUG is on, to save wired memory ever so slightly. + */ +#if ALLOC_DEBUG + sanity_check_free_ext(hfsmp, 1); + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + /* Validate the summary table too! */ + hfs_validate_summary(hfsmp); + LFHFS_LOG(LEVEL_DEBUG, "ScanUnmapBlocks: Summary validation complete on %s\n", hfsmp->vcbVN); + } +#endif + + return error; +} + +static void add_to_reserved_list(hfsmount_t *hfsmp, uint32_t start, + uint32_t count, int list, + struct rl_entry **reservation) +{ + struct rl_entry *range, *next_range; + + if (list == HFS_TENTATIVE_BLOCKS) { + int nranges = 0; + // Don't allow more than 4 tentative reservations + TAILQ_FOREACH_SAFE(range, &hfsmp->hfs_reserved_ranges[HFS_TENTATIVE_BLOCKS], + rl_link, next_range) { + if (++nranges > 3) + hfs_release_reserved(hfsmp, range, HFS_TENTATIVE_BLOCKS); + } + } + + range = hfs_malloc(sizeof(*range)); + range->rl_start = start; + range->rl_end = start + count - 1; + TAILQ_INSERT_HEAD(&hfsmp->hfs_reserved_ranges[list], range, rl_link); + *reservation = range; +} + +static void hfs_release_reserved(hfsmount_t *hfsmp, + struct rl_entry *range, + int list) +{ + if (range->rl_start == -1) + return; + + TAILQ_REMOVE(&hfsmp->hfs_reserved_ranges[list], range, rl_link); + + if (rl_len(range) > 0) { + if (list == HFS_TENTATIVE_BLOCKS) + hfsmp->tentativeBlocks -= rl_len(range); + else { + hfs_assert(hfsmp->lockedBlocks >= rl_len(range)); + hfsmp->lockedBlocks -= rl_len(range); + } + hfs_release_summary(hfsmp, (uint32_t)range->rl_start, (uint32_t)rl_len(range)); + add_free_extent_cache(hfsmp, (uint32_t)range->rl_start, (uint32_t)rl_len(range)); + } + + range->rl_start = -1; + range->rl_end = -2; +} + +static void hfs_free_locked_internal(hfsmount_t *hfsmp, + struct rl_entry **reservation, + int list) +{ + if (*reservation) { + hfs_release_reserved(hfsmp, *reservation, list); + hfs_free(*reservation); + *reservation = NULL; + } +} + +void hfs_free_tentative(hfsmount_t *hfsmp, struct rl_entry **reservation) +{ + hfs_free_locked_internal(hfsmp, reservation, HFS_TENTATIVE_BLOCKS); +} + +void hfs_free_locked(hfsmount_t *hfsmp, struct rl_entry **reservation) +{ + hfs_free_locked_internal(hfsmp, reservation, HFS_LOCKED_BLOCKS); +} + +OSErr BlockAllocate ( + hfsmount_t *hfsmp, /* which volume to allocate space on */ + u_int32_t startingBlock, /* preferred starting block, or 0 for no preference */ + u_int32_t minBlocks, /* desired number of blocks to allocate */ + u_int32_t maxBlocks, /* maximum number of blocks to allocate */ + hfs_block_alloc_flags_t flags, /* option flags */ + u_int32_t *actualStartBlock, /* actual first block of allocation */ + u_int32_t *actualNumBlocks) +{ + hfs_alloc_extra_args_t extra_args = { + .max_blocks = maxBlocks + }; + + HFSPlusExtentDescriptor extent = { startingBlock, minBlocks }; + + OSErr err = hfs_block_alloc_int(hfsmp, &extent, flags, &extra_args); + + *actualStartBlock = extent.startBlock; + *actualNumBlocks = extent.blockCount; + + return err; +} + +errno_t hfs_block_alloc(hfsmount_t *hfsmp, + HFSPlusExtentDescriptor *extent, + hfs_block_alloc_flags_t flags, + hfs_alloc_extra_args_t *ap) +{ + return MacToVFSError(hfs_block_alloc_int(hfsmp, extent, flags, ap)); +} + +/* + ;________________________________________________________________________________ + ; + ; Routine: hfs_block_alloc_int + ; + ; Function: Allocate space on a volume. If contiguous allocation is requested, + ; at least the requested number of bytes will be allocated or an + ; error will be returned. If contiguous allocation is not forced, + ; the space will be allocated with the first largest extent available + ; at the requested starting allocation block. If there is not enough + ; room there, a block allocation of less than the requested size will be + ; allocated. + ; + ; If the requested starting block is 0 (for new file allocations), + ; the volume's allocation block pointer will be used as a starting + ; point. + ; + ; Input Arguments: + ; hfsmp - Pointer to the HFS mount structure. + ; extent - startBlock indicates the block to start + ; searching from and blockCount is the number of + ; blocks required. Depending on the flags used, + ; more or less blocks may be returned. The + ; allocated extent is returned via this + ; parameter. + ; flags - Flags to specify options like contiguous, use + ; metadata zone, skip free block check, etc. + ; ap - Additional arguments used depending on flags. + ; See hfs_alloc_extra_args_t and below. + ; + ; Output: + ; (result) - Error code, zero for successful allocation + ; extent - If successful, the allocated extent. + ; + ; Side effects: + ; The volume bitmap is read and updated; the volume bitmap cache may be changed. + ; + ; HFS_ALLOC_TENTATIVE + ; Blocks will be reserved but not marked allocated. They can be + ; stolen if free space is limited. Tentative blocks can be used by + ; passing HFS_ALLOC_USE_TENTATIVE and passing in the resevation. + ; @ap->reservation_out is used to store the reservation. + ; + ; HFS_ALLOC_USE_TENTATIVE + ; Use blocks previously returned with HFS_ALLOC_TENTATIVE. + ; @ap->reservation_in should be set to whatever @ap->reservation_out + ; was set to when HFS_ALLOC_TENTATIVE was used. If the tentative + ; reservation was stolen, a normal allocation will take place. + ; + ; HFS_ALLOC_LOCKED + ; Blocks will be reserved but not marked allocated. Unlike tentative + ; reservations they cannot be stolen. It is safe to write to these + ; blocks. @ap->reservation_out is used to store the reservation. + ; + ; HFS_ALLOC_COMMIT + ; This will take blocks previously returned with HFS_ALLOC_LOCKED and + ; mark them allocated on disk. @ap->reservation_in is used. + ; + ; HFS_ALLOC_ROLL_BACK + ; Take blocks that were just recently deallocated and mark them + ; allocated. This is for roll back situations. Blocks got + ; deallocated and then something went wrong and we need to roll back + ; by marking the blocks allocated. + ; + ; HFS_ALLOC_FORCECONTIG + ; It will not return fewer than @min_blocks. + ; + ; HFS_ALLOC_TRY_HARD + ; We will perform an exhaustive search to try and find @max_blocks. + ; It will not return fewer than @min_blocks. + ; + ;________________________________________________________________________________ + */ +OSErr hfs_block_alloc_int(hfsmount_t *hfsmp, + HFSPlusExtentDescriptor *extent, + hfs_block_alloc_flags_t flags, + hfs_alloc_extra_args_t *ap) +{ + OSErr err = 0; + u_int32_t freeBlocks; + Boolean updateAllocPtr = false; // true if nextAllocation needs to be updated + Boolean forceContiguous = false; + Boolean forceFlush; + + uint32_t startingBlock = extent->startBlock; + uint32_t minBlocks = extent->blockCount; + uint32_t maxBlocks = (ap && ap->max_blocks) ? ap->max_blocks : minBlocks; + + if (ISSET(flags, HFS_ALLOC_COMMIT)) { + if (ap == NULL || ap->reservation_in == NULL) { + err = paramErr; + goto exit; + } + extent->startBlock = (uint32_t)(*ap->reservation_in)->rl_start; + extent->blockCount = (uint32_t)rl_len(*ap->reservation_in); + goto mark_allocated; + } + + if (ISSET(flags, HFS_ALLOC_ROLL_BACK)) + goto mark_allocated; + + freeBlocks = hfs_freeblks(hfsmp, 0); + + if (ISSET(flags, HFS_ALLOC_USE_TENTATIVE)) { + if (ap == NULL || ap->reservation_in == NULL) { + err = paramErr; + goto exit; + } + struct rl_entry *range = *ap->reservation_in; + + if (range && range->rl_start != -1) { + /* + * It's possible that we have a tentative reservation + * but there aren't enough free blocks due to loaned blocks + * or insufficient space in the backing store. + */ + uint32_t count = (uint32_t)min(min(maxBlocks, rl_len(range)), freeBlocks); + + if (count >= minBlocks) { + extent->startBlock = (uint32_t)range->rl_start; + extent->blockCount = count; + + // Should we go straight to commit? + if (!ISSET(flags, HFS_ALLOC_LOCKED)) + SET(flags, HFS_ALLOC_COMMIT); + + goto mark_allocated; + } + } + + /* + * We can't use the tentative reservation so free it and allocate + * normally. + */ + hfs_free_tentative(hfsmp, ap->reservation_in); + CLR(flags, HFS_ALLOC_USE_TENTATIVE); + } + + if (ISSET(flags, HFS_ALLOC_FORCECONTIG | HFS_ALLOC_TRY_HARD)) + forceContiguous = true; + + if (flags & HFS_ALLOC_FLUSHTXN) { + forceFlush = true; + } + else { + forceFlush = false; + } + + hfs_assert(hfsmp->freeBlocks >= hfsmp->tentativeBlocks); + + // See if we have to steal tentative blocks + if (freeBlocks < hfsmp->tentativeBlocks + minBlocks) + SET(flags, HFS_ALLOC_IGNORE_TENTATIVE); + + /* Skip free block check if blocks are being allocated for relocating + * data during truncating a volume. + * + * During hfs_truncatefs(), the volume free block count is updated + * before relocating data to reflect the total number of free blocks + * that will exist on the volume after resize is successful. This + * means that we have reserved allocation blocks required for relocating + * the data and hence there is no need to check the free blocks. + * It will also prevent resize failure when the number of blocks in + * an extent being relocated is more than the free blocks that will + * exist after the volume is resized. + */ + if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) { + // If the disk is already full, don't bother. + if (freeBlocks == 0) { + err = dskFulErr; + goto exit; + } + if (forceContiguous && freeBlocks < minBlocks) { + err = dskFulErr; + goto exit; + } + + /* + * Clip if necessary so we don't over-subscribe the free blocks. + */ + if (minBlocks > freeBlocks) { + minBlocks = freeBlocks; + } + if (maxBlocks > freeBlocks) { + maxBlocks = freeBlocks; + } + } + + if (ISSET(flags, HFS_ALLOC_TRY_HARD)) { + err = hfs_alloc_try_hard(hfsmp, extent, maxBlocks, flags); + if (err) + goto exit; + + goto mark_allocated; + } + + // + // If caller didn't specify a starting block number, then use the volume's + // next block to allocate from. + // + if (startingBlock == 0) { + hfs_lock_mount (hfsmp); + startingBlock = hfsmp->nextAllocation; + hfs_unlock_mount(hfsmp); + updateAllocPtr = true; + } + + if (startingBlock >= hfsmp->allocLimit) { + startingBlock = 0; /* overflow so start at beginning */ + } + + // + // If the request must be contiguous, then find a sequence of free blocks + // that is long enough. Otherwise, find the first free block. + // + if (forceContiguous) { + err = BlockFindContig(hfsmp, startingBlock, minBlocks, maxBlocks, + flags, &extent->startBlock, &extent->blockCount); + /* + * If we allocated from a new position then also update the roving allocator. + * This will keep the roving allocation pointer up-to-date even + * if we are using the new R/B tree allocator, since + * it doesn't matter to us here, how the underlying allocator found + * the block to vend out. + */ + if ((err == noErr) && + (extent->startBlock > startingBlock) && + ((extent->startBlock < hfsmp->hfs_metazone_start) || + (extent->startBlock > hfsmp->hfs_metazone_end))) { + updateAllocPtr = true; + } + } else { + /* + * Scan the bitmap once, gather the N largest free extents, then + * allocate from these largest extents. Repeat as needed until + * we get all the space we needed. We could probably build up + * that list when the higher level caller tried (and failed) a + * contiguous allocation first. + * + * Note that the free-extent cache will be cease to be updated if + * we are using the red-black tree for allocations. If we jettison + * the tree, then we will reset the free-extent cache and start over. + */ + + /* Disable HFS_ALLOC_FLUSHTXN if needed */ + if (forceFlush) { + flags &= ~HFS_ALLOC_FLUSHTXN; + } + + /* + * BlockFindKnown only examines the free extent cache; anything in there will + * have been committed to stable storage already. + */ + err = BlockFindKnown(hfsmp, maxBlocks, &extent->startBlock, + &extent->blockCount); + + /* dskFulErr out of BlockFindKnown indicates an empty Free Extent Cache */ + + if (err == dskFulErr) { + /* + * Now we have to do a bigger scan. Start at startingBlock and go up until the + * allocation limit. We 'trust' the summary bitmap in this call, if it tells us + * that it could not find any free space. + */ + err = BlockFindAny(hfsmp, startingBlock, hfsmp->allocLimit, + maxBlocks, flags, true, + &extent->startBlock, &extent->blockCount); + } + if (err == dskFulErr) { + /* + * Vary the behavior here if the summary table is on or off. + * If it is on, then we don't trust it it if we get into this case and + * basically do a full scan for maximum coverage. + * If it is off, then we trust the above and go up until the startingBlock. + */ + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + err = BlockFindAny(hfsmp, 1, hfsmp->allocLimit, maxBlocks, + flags, false, + &extent->startBlock, &extent->blockCount); + } + else { + err = BlockFindAny(hfsmp, 1, startingBlock, maxBlocks, + flags, false, + &extent->startBlock, &extent->blockCount); + } + + /* + * Last Resort: Find/use blocks that may require a journal flush. + */ + if (err == dskFulErr && forceFlush) { + flags |= HFS_ALLOC_FLUSHTXN; + err = BlockFindAny(hfsmp, 1, hfsmp->allocLimit, maxBlocks, + flags, false, + &extent->startBlock, &extent->blockCount); + } + } + } + + if (err) + goto exit; + +mark_allocated: + + // Handle alignment + if (ap && ap->alignment && extent->blockCount < ap->max_blocks) { + /* + * See the comment in FileMgrInternal.h for alignment + * semantics. + */ + uint32_t rounding = ((extent->blockCount + ap->alignment_offset) + % ap->alignment); + + // @minBlocks is still the minimum + if (extent->blockCount >= minBlocks + rounding) + extent->blockCount -= rounding; + } + + err = BlockMarkAllocatedInternal(hfsmp, extent->startBlock, + extent->blockCount, flags); + + if (err) + goto exit; + + // if we actually allocated something then go update the + // various bits of state that we maintain regardless of + // whether there was an error (i.e. partial allocations + // still need to update things like the free block count). + // + if (extent->blockCount != 0) { + // + // If we used the volume's roving allocation pointer, then we need to update it. + // Adding in the length of the current allocation might reduce the next allocate + // call by avoiding a re-scan of the already allocated space. However, the clump + // just allocated can quite conceivably end up being truncated or released when + // the file is closed or its EOF changed. Leaving the allocation pointer at the + // start of the last allocation will avoid unnecessary fragmentation in this case. + // + hfs_lock_mount (hfsmp); + + if (!ISSET(flags, HFS_ALLOC_USE_TENTATIVE | HFS_ALLOC_COMMIT)) { + lf_lck_spin_lock(&hfsmp->vcbFreeExtLock); + if (hfsmp->vcbFreeExtCnt == 0 && hfsmp->hfs_freed_block_count == 0) { + hfsmp->sparseAllocation = extent->startBlock; + } + lf_lck_spin_unlock(&hfsmp->vcbFreeExtLock); + if (extent->blockCount < hfsmp->hfs_freed_block_count) { + hfsmp->hfs_freed_block_count -= extent->blockCount; + } else { + hfsmp->hfs_freed_block_count = 0; + } + + if (updateAllocPtr && + ((extent->startBlock < hfsmp->hfs_metazone_start) || + (extent->startBlock > hfsmp->hfs_metazone_end))) { + HFS_UPDATE_NEXT_ALLOCATION(hfsmp, extent->startBlock); + } + + (void) remove_free_extent_cache(hfsmp, extent->startBlock, extent->blockCount); + } + + if (ISSET(flags, HFS_ALLOC_USE_TENTATIVE)) { + if (ap == NULL || ap->reservation_in == NULL) { + err = paramErr; + goto exit; + } + (*ap->reservation_in)->rl_start += extent->blockCount; + hfsmp->tentativeBlocks -= extent->blockCount; + if (rl_len(*ap->reservation_in) <= 0) + hfs_free_tentative(hfsmp, ap->reservation_in); + } else if (ISSET(flags, HFS_ALLOC_COMMIT)) { + // Handle committing locked extents + hfs_assert(hfsmp->lockedBlocks >= extent->blockCount); + (*ap->reservation_in)->rl_start += extent->blockCount; + hfsmp->lockedBlocks -= extent->blockCount; + hfs_free_locked(hfsmp, ap->reservation_in); + } + + /* + * Update the number of free blocks on the volume + * + * Skip updating the free blocks count if the block are + * being allocated to relocate data as part of hfs_truncatefs() + */ + + if (ISSET(flags, HFS_ALLOC_TENTATIVE)) { + hfsmp->tentativeBlocks += extent->blockCount; + } else if (ISSET(flags, HFS_ALLOC_LOCKED)) { + hfsmp->lockedBlocks += extent->blockCount; + } else if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) { + hfsmp->freeBlocks -= extent->blockCount; + } + MarkVCBDirty(hfsmp); + hfs_unlock_mount(hfsmp); + + if (ISSET(flags, HFS_ALLOC_TENTATIVE)) { + hfs_assert(ap); + add_to_reserved_list(hfsmp, extent->startBlock, extent->blockCount, + 0, ap->reservation_out); + } else if (ISSET(flags, HFS_ALLOC_LOCKED)) { + hfs_assert(ap); + add_to_reserved_list(hfsmp, extent->startBlock, extent->blockCount, + 1, ap->reservation_out); + } + + if (ISSET(flags, HFS_ALLOC_IGNORE_TENTATIVE)) { + /* + * See if we used tentative blocks. Note that we cannot + * free the reservations here because we don't have access + * to the external pointers. All we can do is update the + * reservations and they'll be cleaned up when whatever is + * holding the pointers calls us back. + * + * We use the rangelist code to detect overlaps and + * constrain the tentative block allocation. Note that + * @end is inclusive so that our rangelist code will + * resolve the various cases for us. As a result, we need + * to ensure that we account for it properly when removing + * the blocks from the tentative count in the mount point + * and re-inserting the remainder (either head or tail) + */ + struct rl_entry *range, *next_range; + struct rl_head *ranges = &hfsmp->hfs_reserved_ranges[HFS_TENTATIVE_BLOCKS]; + const uint32_t start = extent->startBlock; + const uint32_t end = start + extent->blockCount - 1; + TAILQ_FOREACH_SAFE(range, ranges, rl_link, next_range) { + switch (rl_overlap(range, start, end)) { + case RL_OVERLAPCONTAINSRANGE: + // Keep the bigger part + if (start - range->rl_start > range->rl_end - end) { + // Discard the tail + hfsmp->tentativeBlocks -= range->rl_end + 1 - start; + hfs_release_summary(hfsmp, end + 1, (uint32_t)(range->rl_end - end)); + const uint32_t old_end = (uint32_t)range->rl_end; + range->rl_end = start - 1; + add_free_extent_cache(hfsmp, end + 1, old_end - end); + } else { + // Discard the head + hfsmp->tentativeBlocks -= end + 1 - range->rl_start; + hfs_release_summary(hfsmp, (uint32_t)range->rl_start, (uint32_t)(start - range->rl_start)); + const uint32_t old_start = (uint32_t)range->rl_start; + range->rl_start = end + 1; + add_free_extent_cache(hfsmp, old_start, + start - old_start); + } + hfs_assert(range->rl_end >= range->rl_start); + break; + case RL_MATCHINGOVERLAP: + case RL_OVERLAPISCONTAINED: + hfsmp->tentativeBlocks -= rl_len(range); + range->rl_end = range->rl_start - 1; + hfs_release_reserved(hfsmp, range, HFS_TENTATIVE_BLOCKS); + break; + case RL_OVERLAPSTARTSBEFORE: + hfsmp->tentativeBlocks -= range->rl_end + 1 - start; + range->rl_end = start - 1; + hfs_assert(range->rl_end >= range->rl_start); + break; + case RL_OVERLAPENDSAFTER: + hfsmp->tentativeBlocks -= end + 1 - range->rl_start; + range->rl_start = end + 1; + hfs_assert(range->rl_end >= range->rl_start); + break; + case RL_NOOVERLAP: + break; + } + } + } + } + +exit: + + if (ALLOC_DEBUG) { + if (err == noErr) { + if (extent->startBlock >= hfsmp->totalBlocks) { + LFHFS_LOG(LEVEL_ERROR, "BlockAllocate: vending invalid blocks!"); + hfs_assert(0); + } + if (extent->startBlock >= hfsmp->allocLimit) { + LFHFS_LOG(LEVEL_ERROR, "BlockAllocate: vending block past allocLimit!"); + hfs_assert(0); + } + + if ((extent->startBlock + extent->blockCount) >= hfsmp->totalBlocks) { + LFHFS_LOG(LEVEL_ERROR, "BlockAllocate: vending too many invalid blocks!"); + hfs_assert(0); + } + + if ((extent->startBlock + extent->blockCount) >= hfsmp->allocLimit) { + LFHFS_LOG(LEVEL_ERROR, "BlockAllocate: vending too many invalid blocks past allocLimit!"); + hfs_assert(0); + } + } + } + + if (err) { + // Just to be safe... + extent->startBlock = 0; + extent->blockCount = 0; + } + + // KBZ : For now, make sure clusters fills with zeros. + raw_readwrite_zero_fill_fill( hfsmp, extent->startBlock, extent->blockCount ); + + return err; +} + + +/* + ;________________________________________________________________________________ + ; + ; Routine: BlockDeallocate + ; + ; Function: Update the bitmap to deallocate a run of disk allocation blocks + ; + ; Input Arguments: + ; vcb - Pointer to ExtendedVCB for the volume to free space on + ; firstBlock - First allocation block to be freed + ; numBlocks - Number of allocation blocks to free up (must be > 0!) + ; + ; Output: + ; (result) - Result code + ; + ; Side effects: + ; The volume bitmap is read and updated; the volume bitmap cache may be changed. + ; The Allocator's red-black trees may also be modified as a result. + ; + ;________________________________________________________________________________ + */ + +OSErr BlockDeallocate ( + ExtendedVCB *vcb, // Which volume to deallocate space on + u_int32_t firstBlock, // First block in range to deallocate + u_int32_t numBlocks, // Number of contiguous blocks to deallocate + hfs_block_alloc_flags_t flags) +{ + if (ISSET(flags, HFS_ALLOC_TENTATIVE | HFS_ALLOC_LOCKED)) + return 0; + + OSErr err; + struct hfsmount *hfsmp; + hfsmp = VCBTOHFS(vcb); + + // + // If no blocks to deallocate, then exit early + // + if (numBlocks == 0) { + err = noErr; + goto Exit; + } + + + if (ALLOC_DEBUG) { + if (firstBlock >= hfsmp->totalBlocks) { + LFHFS_LOG(LEVEL_ERROR, "BlockDeallocate: freeing invalid blocks!"); + hfs_assert(0); + } + + if ((firstBlock + numBlocks) >= hfsmp->totalBlocks) { + LFHFS_LOG(LEVEL_ERROR, "BlockDeallocate: freeing too many invalid blocks!"); + hfs_assert(0); + } + } + + /* + * If we're using the summary bitmap, then try to mark the bits + * as potentially usable/free before actually deallocating them. + * It is better to be slightly speculative here for correctness. + */ + + (void) hfs_release_summary (hfsmp, firstBlock, numBlocks); + + err = BlockMarkFreeInternal(vcb, firstBlock, numBlocks, true); + + if (err) { + goto Exit; + } + + // + // Update the volume's free block count, and mark the VCB as dirty. + // + hfs_lock_mount(hfsmp); + /* + * Do not update the free block count. This flags is specified + * when a volume is being truncated. + */ + if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) { + vcb->freeBlocks += numBlocks; + } + + vcb->hfs_freed_block_count += numBlocks; + + if (vcb->nextAllocation == (firstBlock + numBlocks)) { + HFS_UPDATE_NEXT_ALLOCATION(vcb, (vcb->nextAllocation - numBlocks)); + } + + if (hfsmp->jnl == NULL) + { + /* + * In the journal case, we'll add the free extent once the journal + * calls us back to tell us it wrote the transaction to disk. + */ + (void) add_free_extent_cache(vcb, firstBlock, numBlocks); + + /* + * If the journal case, we'll only update sparseAllocation once the + * free extent cache becomes empty (when we remove the last entry + * from the cache). Skipping it here means we're less likely to + * find a recently freed extent via the bitmap before it gets added + * to the free extent cache. + */ + if (firstBlock < vcb->sparseAllocation) { + vcb->sparseAllocation = firstBlock; + } + } + + MarkVCBDirty(vcb); + hfs_unlock_mount(hfsmp); + +Exit: + + return err; +} + + +u_int8_t freebitcount[16] = { + 4, 3, 3, 2, 3, 2, 2, 1, /* 0 1 2 3 4 5 6 7 */ + 3, 2, 2, 1, 2, 1, 1, 0, /* 8 9 A B C D E F */ +}; + +u_int32_t +MetaZoneFreeBlocks(ExtendedVCB *vcb) +{ + u_int32_t freeblocks; + u_int32_t *currCache; + GenericLFBufPtr blockRef; + u_int32_t bit; + u_int32_t lastbit; + int bytesleft; + int bytesperblock; + u_int8_t byte; + u_int8_t *buffer; + + blockRef = 0; + bytesleft = freeblocks = 0; + buffer = NULL; + bit = VCBTOHFS(vcb)->hfs_metazone_start; + if (bit == 1) + bit = 0; + + lastbit = VCBTOHFS(vcb)->hfs_metazone_end; + bytesperblock = vcb->vcbVBMIOSize; + + /* + * Count all the bits from bit to lastbit. + */ + while (bit < lastbit) { + /* + * Get next bitmap block. + */ + if (bytesleft == 0) { + if (blockRef) { + (void) ReleaseBitmapBlock(vcb, blockRef, false); + blockRef = 0; + } + if (ReadBitmapBlock(vcb, bit, &currCache, &blockRef, + HFS_ALLOC_IGNORE_TENTATIVE) != 0) { + return (0); + } + buffer = (u_int8_t *)currCache; + bytesleft = bytesperblock; + } + byte = *buffer++; + freeblocks += freebitcount[byte & 0x0F]; + freeblocks += freebitcount[(byte >> 4) & 0x0F]; + bit += kBitsPerByte; + --bytesleft; + } + if (blockRef) + (void) ReleaseBitmapBlock(vcb, blockRef, false); + + return (freeblocks); +} + + +/* + * Obtain the next allocation block (bit) that's + * outside the metadata allocation zone. + */ +static u_int32_t NextBitmapBlock( + ExtendedVCB *vcb, + u_int32_t bit) +{ + struct hfsmount *hfsmp = VCBTOHFS(vcb); + + if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) == 0) + return (bit); + /* + * Skip over metadata allocation zone. + */ + if ((bit >= hfsmp->hfs_metazone_start) && + (bit <= hfsmp->hfs_metazone_end)) { + bit = hfsmp->hfs_metazone_end + 1; + } + return (bit); +} + + +// Assumes @bitmap is aligned to 8 bytes and multiple of 8 bytes. +static void bits_set(void *bitmap, int start, int end) +{ + const int start_bit = start & 63; + const int end_bit = end & 63; + +#define LEFT_MASK(bit) OSSwapHostToBigInt64(0xffffffffffffffffull << (64 - bit)) +#define RIGHT_MASK(bit) OSSwapHostToBigInt64(0xffffffffffffffffull >> bit) + + uint64_t *p = (uint64_t *)bitmap + start / 64; + + if ((start & ~63) == (end & ~63)) { + // Start and end in same 64 bits + *p |= RIGHT_MASK(start_bit) & LEFT_MASK(end_bit); + } else { + *p++ |= RIGHT_MASK(start_bit); + + int nquads = (end - end_bit - start - 1) / 64; + + while (nquads--) + *p++ = 0xffffffffffffffffull; + + if (end_bit) + *p |= LEFT_MASK(end_bit); + } +} + +// Modifies the buffer and applies any reservations that we might have +static GenericLFBufPtr process_reservations(hfsmount_t *hfsmp, GenericLFBufPtr bp, off_t offset, hfs_block_alloc_flags_t flags, bool always_copy) +{ + +#if 0 + bool taken_copy = false; +#else +#pragma unused (always_copy) +#endif + + void *buffer = bp->pvData; + const uint64_t nbytes = bp->uValidBytes; + const off_t end = offset + nbytes * 8 - 1; + + for (int i = (ISSET(flags, HFS_ALLOC_IGNORE_TENTATIVE) + ? HFS_LOCKED_BLOCKS : HFS_TENTATIVE_BLOCKS); i < 2; ++i) { + struct rl_entry *entry; + TAILQ_FOREACH(entry, &hfsmp->hfs_reserved_ranges[i], rl_link) { + uint32_t a, b; + + enum rl_overlaptype overlap_type = rl_overlap(entry, offset, end); + + if (overlap_type == RL_NOOVERLAP) + continue; + +#if 0 + /* + * If always_copy is false, we only take a copy if B_LOCKED is + * set because ReleaseScanBitmapRange doesn't invalidate the + * buffer in that case. + */ + if (!taken_copy && (always_copy || ISSET(buf_flags(bp), B_LOCKED))) { + buf_t new_bp = buf_create_shadow(bp, true, 0, NULL, NULL); + buf_brelse(bp); + bp = new_bp; + buf_setflags(bp, B_NOCACHE); + buffer = (void *)buf_dataptr(bp); + taken_copy = true; + } +#endif + switch (overlap_type) { + case RL_OVERLAPCONTAINSRANGE: + case RL_MATCHINGOVERLAP: + memset(buffer, 0xff, nbytes); + return bp; + case RL_OVERLAPISCONTAINED: + a = (uint32_t)entry->rl_start; + b = (uint32_t)entry->rl_end; + break; + case RL_OVERLAPSTARTSBEFORE: + a = (uint32_t)offset; + b = (uint32_t)entry->rl_end; + break; + case RL_OVERLAPENDSAFTER: + a = (uint32_t)entry->rl_start; + b = (uint32_t)end; + break; + case RL_NOOVERLAP: + __builtin_unreachable(); + } + + a -= offset; + b -= offset; + + hfs_assert(b >= a); + + // b is inclusive + bits_set(buffer, a, b + 1); + } + } // for (;;) + + return bp; +} + +/* + ;_______________________________________________________________________ + ; + ; Routine: ReadBitmapBlock + ; + ; Function: Read in a bitmap block corresponding to a given allocation + ; block (bit). Return a pointer to the bitmap block. + ; + ; Inputs: + ; vcb -- Pointer to ExtendedVCB + ; bit -- Allocation block whose bitmap block is desired + ; + ; Outputs: + ; buffer -- Pointer to bitmap block corresonding to "block" + ; blockRef + ;_______________________________________________________________________ + */ +static OSErr ReadBitmapBlock(ExtendedVCB *vcb, + u_int32_t bit, + u_int32_t **buffer, + GenericLFBufPtr *blockRef, + hfs_block_alloc_flags_t flags) +{ + OSErr err = 0; + GenericLFBufPtr bp = NULL; + struct vnode *vp = NULL; + daddr64_t block; + u_int32_t blockSize; + + /* + * volume bitmap blocks are protected by the allocation file lock + */ + REQUIRE_FILE_LOCK(vcb->hfs_allocation_vp, false); + + blockSize = (u_int32_t)vcb->vcbVBMIOSize; + if (blockSize == 0) return EINVAL; //Devision protection + block = (daddr64_t)(bit / (blockSize * kBitsPerByte)); + + /* HFS+ / HFSX */ + vp = vcb->hfs_allocation_vp; /* use allocation file vnode */ + + bp = lf_hfs_generic_buf_allocate(vp, block, blockSize, 0); + err = lf_hfs_generic_buf_read(bp); + + if ( err ) + { + lf_hfs_generic_buf_release(bp); + *blockRef = NULL; + *buffer = NULL; + } + else + { + if (!ISSET(flags, HFS_ALLOC_IGNORE_RESERVED)) { + bp = process_reservations(vcb, bp, block * blockSize * 8, flags, /* always_copy: */ true); + } + + bp->uFlags = flags; + + *blockRef = bp; + *buffer = bp->pvData; + } + + return err; +} + + +/* + ;_______________________________________________________________________ + ; + ; Routine: ReadBitmapRange + ; + ; Function: Read in a range of the bitmap starting at the given offset. + ; Use the supplied size to determine the amount of I/O to generate + ; against the bitmap file. Return a pointer to the bitmap block. + ; + ; Inputs: + ; hfsmp -- Pointer to hfs mount + ; offset -- byte offset into the bitmap file + ; size -- How much I/O to generate against the bitmap file. + ; + ; Outputs: + ; buffer -- Pointer to bitmap block data corresonding to "block" + ; blockRef -- struct 'buf' pointer which MUST be released in a subsequent call. + ;_______________________________________________________________________ + */ +static OSErr ReadBitmapRange(struct hfsmount *hfsmp, uint32_t offset, uint32_t iosize, uint32_t **buffer, GenericLFBuf **blockRef) +{ + + OSErr err = 0; + GenericLFBufPtr bp = NULL; + struct vnode *vp = NULL; + daddr64_t block; + + /* + * volume bitmap blocks are protected by the allocation file lock + */ + REQUIRE_FILE_LOCK(hfsmp->hfs_allocation_vp, false); + + vp = hfsmp->hfs_allocation_vp; /* use allocation file vnode */ + + /* + * The byte offset argument must be converted into bitmap-relative logical + * block numbers before using it in buf_meta_bread. + * + * lf_hfs_generic_buf_read (and the things it calls) will eventually try to + * reconstruct the byte offset into the file by multiplying the logical + * block number passed in below by the given iosize. + * So we prepare for that by converting the byte offset back into + * logical blocks in terms of iosize units. + * + * The amount of I/O requested and the byte offset should be computed + * based on the helper function in the frame that called us, so we can + * get away with just doing a simple divide here. + */ + block = (daddr64_t)(offset / iosize); + + bp = lf_hfs_generic_buf_allocate(vp, block, iosize, 0); + err = lf_hfs_generic_buf_read(bp); + + if ( err ) + { + lf_hfs_generic_buf_release(bp); + *blockRef = 0; + *buffer = NULL; + } + else + { + bp = process_reservations(hfsmp, bp, (offset * 8), 0, /* always_copy: */ false); + *blockRef = bp; + *buffer = bp->pvData; + } + + return err; +} + + +/* + ;_______________________________________________________________________ + ; + ; Routine: ReleaseBitmapBlock + ; + ; Function: Relase a bitmap block. + ; + ; Inputs: + ; vcb + ; blockRef + ; dirty + ;_______________________________________________________________________ + */ +static OSErr ReleaseBitmapBlock( ExtendedVCB *vcb, GenericLFBufPtr blockRef, Boolean dirty) +{ + + GenericLFBufPtr bp = blockRef; + + if (blockRef == 0) { + if (dirty) + { + LFHFS_LOG(LEVEL_ERROR, "ReleaseBitmapBlock: missing bp"); + hfs_assert(0); + } + return (0); + } + + if (bp) + { + if (dirty) + { + hfs_block_alloc_flags_t flags = (uint32_t)bp->uFlags; + + if (!ISSET(flags, HFS_ALLOC_IGNORE_RESERVED)) + { + LFHFS_LOG(LEVEL_ERROR, "Modified read-only bitmap buffer!"); + hfs_assert(0); + } + + struct hfsmount *hfsmp = VCBTOHFS(vcb); + if (hfsmp->jnl) + { + journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL); + } + else + { + lf_hfs_generic_buf_write(bp); + lf_hfs_generic_buf_release(bp); + } + } else { + lf_hfs_generic_buf_release(bp); + } + } + + return (0); +} + +/* + * ReleaseScanBitmapRange + * + * This is used to release struct bufs that were created for use by + * bitmap scanning code. Because they may be of sizes different than the + * typical runtime manipulation code, we want to force them to be purged out + * of the buffer cache ASAP, so we'll release them differently than in the + * ReleaseBitmapBlock case. + * + * Additionally, because we know that we're only reading the blocks and that they + * should have been clean prior to reading them, we will never + * issue a write to them (thus dirtying them). + */ + +static OSErr ReleaseScanBitmapRange( GenericLFBufPtr bp ) +{ + if (bp) + { + lf_hfs_generic_buf_release(bp); + } + + return (0); +} + +/* + * @extent.startBlock, on input, contains a preferred block for the + * allocation. @extent.blockCount, on input, contains the minimum + * number of blocks acceptable. Upon success, the result is conveyed + * in @extent. + */ +static OSErr hfs_alloc_try_hard(hfsmount_t *hfsmp, + HFSPlusExtentDescriptor *extent, + uint32_t max_blocks, + hfs_block_alloc_flags_t flags) +{ + OSErr err = dskFulErr; + + const uint32_t min_blocks = extent->blockCount; + + // It's > rather than >= because the last block is always reserved + if (extent->startBlock > 0 && extent->startBlock < hfsmp->allocLimit + && hfsmp->allocLimit - extent->startBlock > max_blocks) { + /* + * This is just checking to see if there's an extent starting + * at extent->startBlock that will suit. We only check for + * @max_blocks here; @min_blocks is ignored. + */ + + err = BlockFindContiguous(hfsmp, extent->startBlock, extent->startBlock + max_blocks, + max_blocks, max_blocks, true, true, + &extent->startBlock, &extent->blockCount, flags); + + if (err != dskFulErr) + return err; + } + + err = BlockFindKnown(hfsmp, max_blocks, &extent->startBlock, + &extent->blockCount); + + if (!err) { + if (extent->blockCount >= max_blocks) + return 0; + } else if (err != dskFulErr) + return err; + + // Try a more exhaustive search + return BlockFindContiguous(hfsmp, 1, hfsmp->allocLimit, + min_blocks, max_blocks, + /* useMetaZone: */ true, + /* trustSummary: */ true, + &extent->startBlock, &extent->blockCount, flags); +} + +/* + _______________________________________________________________________ + + Routine: BlockFindContig + + Function: Find a contiguous group of allocation blocks. If the + minimum cannot be satisfied, nothing is returned. The + caller guarantees that there are enough free blocks + (though they may not be contiguous, in which case this + call will fail). + + Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock Preferred first block for allocation + minBlocks Minimum number of contiguous blocks to allocate + maxBlocks Maximum number of contiguous blocks to allocate + flags + + Outputs: + actualStartBlock First block of range allocated, or 0 if error + actualNumBlocks Number of blocks allocated, or 0 if error + _______________________________________________________________________ + */ +static OSErr BlockFindContig( + ExtendedVCB *vcb, + u_int32_t startingBlock, + u_int32_t minBlocks, + u_int32_t maxBlocks, + hfs_block_alloc_flags_t flags, + u_int32_t *actualStartBlock, + u_int32_t *actualNumBlocks) +{ + OSErr retval = noErr; + uint32_t currentStart = startingBlock; + + uint32_t foundStart = 0; // values to emit to caller + uint32_t foundCount = 0; + + uint32_t collision_start = 0; // if we have to re-allocate a recently deleted extent, use this + uint32_t collision_count = 0; + + int allowReuse = (flags & HFS_ALLOC_FLUSHTXN); + Boolean useMetaZone = (flags & HFS_ALLOC_METAZONE); + + struct hfsmount *hfsmp = VCBTOHFS(vcb); + + while ((retval == noErr) && (foundStart == 0) && (foundCount == 0)) { + + /* Try and find something that works. */ + + /* + * NOTE: If the only contiguous free extent of at least minBlocks + * crosses startingBlock (i.e. starts before, ends after), then we + * won't find it. Earlier versions *did* find this case by letting + * the second search look past startingBlock by minBlocks. But + * with the free extent cache, this can lead to duplicate entries + * in the cache, causing the same blocks to be allocated twice. + */ + retval = BlockFindContiguous(vcb, currentStart, vcb->allocLimit, minBlocks, + maxBlocks, useMetaZone, true, &foundStart, &foundCount, flags); + + if (retval == dskFulErr && currentStart != 0) { + /* + * We constrain the endingBlock so we don't bother looking for ranges + * that would overlap those found in the previous call, if the summary bitmap + * is not on for this volume. If it is, then we assume that it was not trust + * -worthy and do a full scan. + */ + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + retval = BlockFindContiguous(vcb, 1, vcb->allocLimit, minBlocks, + maxBlocks, useMetaZone, false, &foundStart, &foundCount, flags); + } + else { + retval = BlockFindContiguous(vcb, 1, currentStart, minBlocks, + maxBlocks, useMetaZone, false, &foundStart, &foundCount, flags); + } + } + + if (retval != noErr) { + goto bailout; + } + + /* Do we overlap with the recently found collision extent? */ + if (collision_start) { + if (extents_overlap (foundStart, foundCount, collision_start, collision_count)) { + /* + * We've looped around, and the only thing we could use was the collision extent. + * Since we are allowed to use it, go ahead and do so now. + */ + if(allowReuse) { + /* + * then we couldn't find anything except values which might have been + * recently deallocated. just return our cached value if we are allowed to. + */ + foundStart = collision_start; + foundCount = collision_count; + goto bailout; + } + else { + /* Otherwise, we looped around and couldn't find anything that wouldn't require a journal flush. */ + retval = dskFulErr; + goto bailout; + } + } + } + /* + * If we found something good, we'd break out of the loop at the top; foundCount + * and foundStart should be set. + */ + + } // end while loop. + +bailout: + + if (retval == noErr) { + *actualStartBlock = foundStart; + *actualNumBlocks = foundCount; + } + + return retval; + +} + + +/* + _______________________________________________________________________ + + Routine: BlockFindAny + + Function: Find one or more allocation blocks and may return fewer than + requested. The caller guarantees that there is at least one + free block. + + Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock Preferred first block for allocation + endingBlock Last block to check + 1 + maxBlocks Maximum number of contiguous blocks to allocate + useMetaZone + + Outputs: + actualStartBlock First block of range allocated, or 0 if error + actualNumBlocks Number of blocks allocated, or 0 if error + _______________________________________________________________________ + */ + +static OSErr BlockFindAny( + ExtendedVCB *vcb, + u_int32_t startingBlock, + register u_int32_t endingBlock, + u_int32_t maxBlocks, + hfs_block_alloc_flags_t flags, + Boolean trustSummary, + u_int32_t *actualStartBlock, + u_int32_t *actualNumBlocks) +{ + + /* + * If it is enabled, scan through the summary table to find the first free block. + * + * If it reports that there are not any free blocks, we could have a false + * positive, so in that case, use the input arguments as a pass through. + */ + uint32_t start_blk = startingBlock; + uint32_t end_blk = endingBlock; + struct hfsmount *hfsmp; + OSErr err; + + hfsmp = (struct hfsmount*)vcb; + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + uint32_t suggested_start; + + /* + * If the summary table is enabled, scan through it to find the first free + * block. If there was an error, or we couldn't find anything free in the + * summary table, then just leave the start_blk fields unmodified. We wouldn't + * have gotten to this point if the mount point made it look like there was possibly + * free space in the FS. + */ + err = hfs_find_summary_free (hfsmp, startingBlock, &suggested_start); + if (err == 0) { + start_blk = suggested_start; + } + else { + /* Differentiate between ENOSPC and a more esoteric error in the above call. */ + if ((err == ENOSPC) && (trustSummary)) { + /* + * The 'trustSummary' argument is for doing a full scan if we really + * really, need the space and we think it's somewhere but can't find it in the + * summary table. If it's true, then we trust the summary table and return + * dskFulErr if we couldn't find it above. + */ + return dskFulErr; + } + /* + * If either trustSummary was false or we got a different errno, then we + * want to fall through to the real bitmap single i/o code... + */ + } + } + + err = BlockFindAnyBitmap(vcb, start_blk, end_blk, maxBlocks, + flags, actualStartBlock, actualNumBlocks); + + return err; +} + + +/* + * BlockFindAnyBitmap finds free ranges by scanning the bitmap to + * figure out where the free allocation blocks are. Inputs and + * outputs are the same as for BlockFindAny. + */ + +static OSErr BlockFindAnyBitmap( + ExtendedVCB *vcb, + u_int32_t startingBlock, + register u_int32_t endingBlock, + u_int32_t maxBlocks, + hfs_block_alloc_flags_t flags, + u_int32_t *actualStartBlock, + u_int32_t *actualNumBlocks) +{ + OSErr err; + register u_int32_t block = 0; // current block number + register u_int32_t currentWord; // Pointer to current word within bitmap block + register u_int32_t bitMask; // Word with given bits already set (ready to OR in) + register u_int32_t wordsLeft; // Number of words left in this bitmap block + u_int32_t *buffer = NULL; + u_int32_t *currCache = NULL; + GenericLFBufPtr blockRef = 0; + u_int32_t bitsPerBlock; + u_int32_t wordsPerBlock; + struct hfsmount *hfsmp = VCBTOHFS(vcb); + Boolean useMetaZone = (flags & HFS_ALLOC_METAZONE); + + /* + * When we're skipping the metadata zone and the start/end + * range overlaps with the metadata zone then adjust the + * start to be outside of the metadata zone. If the range + * is entirely inside the metadata zone then we can deny the + * request (dskFulErr). + */ + if (!useMetaZone && (vcb->hfs_flags & HFS_METADATA_ZONE)) { + if (startingBlock <= vcb->hfs_metazone_end) { + if (endingBlock > (vcb->hfs_metazone_end + 2)) + startingBlock = vcb->hfs_metazone_end + 1; + else { + err = dskFulErr; + goto Exit; + } + } + } + + // Since this routine doesn't wrap around + if (maxBlocks > (endingBlock - startingBlock)) { + maxBlocks = endingBlock - startingBlock; + } + + // + // Pre-read the first bitmap block + // + err = ReadBitmapBlock(vcb, startingBlock, &currCache, &blockRef, flags); + if (err != noErr) goto Exit; + buffer = currCache; + + // + // Set up the current position within the block + // + { + u_int32_t wordIndexInBlock; + + bitsPerBlock = vcb->vcbVBMIOSize * kBitsPerByte; + wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord; + + wordIndexInBlock = (startingBlock & (bitsPerBlock-1)) / kBitsPerWord; + buffer += wordIndexInBlock; + wordsLeft = wordsPerBlock - wordIndexInBlock; + currentWord = SWAP_BE32 (*buffer); + bitMask = kHighBitInWordMask >> (startingBlock & kBitsWithinWordMask); + } + + /* + * While loop 1: + * Find the first unallocated block starting at 'block' + */ + uint32_t summary_block_scan = 0; + + block=startingBlock; + while (block < endingBlock) { + if ((currentWord & bitMask) == 0) + break; + + // Next bit + ++block; + bitMask >>= 1; + if (bitMask == 0) { + // Next word + bitMask = kHighBitInWordMask; + ++buffer; + + if (--wordsLeft == 0) { + // Next block + buffer = currCache = NULL; + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + /* + * If summary_block_scan is non-zero, then we must have + * pulled a bitmap file block into core, and scanned through + * the entire thing. Because we're in this loop, we are + * implicitly trusting that the bitmap didn't have any knowledge + * about this particular block. As a result, update the bitmap + * (lazily, now that we've scanned it) with our findings that + * this particular block is completely used up. + */ + if (summary_block_scan != 0) { + uint32_t summary_bit; + (void) hfs_get_summary_index (hfsmp, summary_block_scan, &summary_bit); + hfs_set_summary (hfsmp, summary_bit, 1); + } + } + + err = ReleaseBitmapBlock(vcb, blockRef, false); + if (err != noErr) goto Exit; + + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) { + block = NextBitmapBlock(vcb, block); + } + if (block >= endingBlock) { + err = dskFulErr; + goto Exit; + } + + err = ReadBitmapBlock(vcb, block, &currCache, &blockRef, flags); + if (err != noErr) goto Exit; + buffer = currCache; + summary_block_scan = block; + wordsLeft = wordsPerBlock; + } + currentWord = SWAP_BE32 (*buffer); + } + } + + // Did we get to the end of the bitmap before finding a free block? + // If so, then couldn't allocate anything. + if (block >= endingBlock) { + err = dskFulErr; + goto Exit; + } + +#if LF_HFS_CHECK_UNMAPPED + /* + * Don't move forward just yet. Verify that either one of the following + * two conditions is true: + * 1) journaling is not enabled + * 2) block is not currently on any pending TRIM list. + */ + if (hfsmp->jnl != NULL && (forceFlush == false)) { + int recently_deleted = 0; + uint32_t nextblk; + err = CheckUnmappedBytes (hfsmp, (uint64_t) block, 1, &recently_deleted, &nextblk); + if ((err == 0) && (recently_deleted)) { + + /* release the bitmap block & unset currCache. we may jump past it. */ + err = ReleaseBitmapBlock(vcb, blockRef, false); + currCache = NULL; + if (err != noErr) { + goto Exit; + } + /* set our start to nextblk, and re-do the search. */ + startingBlock = nextblk; + goto restartSearchAny; + } + } +#endif + + // Return the first block in the allocated range + *actualStartBlock = block; + + // If we could get the desired number of blocks before hitting endingBlock, + // then adjust endingBlock so we won't keep looking. Ideally, the comparison + // would be (block + maxBlocks) < endingBlock, but that could overflow. The + // comparison below yields identical results, but without overflow. + if (block < (endingBlock-maxBlocks)) { + endingBlock = block + maxBlocks; // if we get this far, we've found enough + } + + /* + * While loop 2: + * Scan the bitmap, starting at 'currentWord' in the current + * bitmap block. Continue iterating through the bitmap until + * either we hit an allocated block, or until we have accumuluated + * maxBlocks worth of bitmap. + */ + + /* Continue until we see an allocated block */ + while ((currentWord & bitMask) == 0) { + // Move to the next block. If no more, then exit. + ++block; + if (block == endingBlock) { + break; + } + + // Next bit + bitMask >>= 1; + if (bitMask == 0) { + // Next word + bitMask = kHighBitInWordMask; + ++buffer; + + if (--wordsLeft == 0) { + // Next block + buffer = currCache = NULL; + + /* We're only reading the bitmap here, so mark it as clean */ + err = ReleaseBitmapBlock(vcb, blockRef, false); + if (err != noErr) { + goto Exit; + } + + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) { + u_int32_t nextBlock; + nextBlock = NextBitmapBlock(vcb, block); + if (nextBlock != block) { + goto Exit; /* allocation gap, so stop */ + } + } + + if (block >= endingBlock) { + goto Exit; + } + + err = ReadBitmapBlock(vcb, block, &currCache, &blockRef, flags); + if (err != noErr) { + goto Exit; + } + buffer = currCache; + wordsLeft = wordsPerBlock; + } + currentWord = SWAP_BE32 (*buffer); + } + } + +Exit: + if (currCache) { + /* Release the bitmap reference prior to marking bits in-use */ + (void) ReleaseBitmapBlock(vcb, blockRef, false); + currCache = NULL; + } + + if (err == noErr) { + *actualNumBlocks = block - *actualStartBlock; + + // sanity check + if ((*actualStartBlock + *actualNumBlocks) > vcb->allocLimit) { + LFHFS_LOG(LEVEL_ERROR, "BlockFindAnyBitmap: allocation overflow on \"%s\"", vcb->vcbVN); + hfs_assert(0); + } + } + else { + *actualStartBlock = 0; + *actualNumBlocks = 0; + } + + return err; +} + + +/* + _______________________________________________________________________ + + Routine: BlockFindKnown + + Function: Return a potential extent from the free extent cache. The + returned extent *must* be marked allocated and removed + from the cache by the *caller*. + + Inputs: + vcb Pointer to volume where space is to be allocated + maxBlocks Maximum number of contiguous blocks to allocate + + Outputs: + actualStartBlock First block of range allocated, or 0 if error + actualNumBlocks Number of blocks allocated, or 0 if error + + Returns: + dskFulErr Free extent cache is empty + _______________________________________________________________________ + */ + +static OSErr BlockFindKnown( + ExtendedVCB *vcb, + u_int32_t maxBlocks, + u_int32_t *actualStartBlock, + u_int32_t *actualNumBlocks) +{ + OSErr err; + u_int32_t foundBlocks; + struct hfsmount *hfsmp = VCBTOHFS(vcb); + + hfs_lock_mount (hfsmp); + lf_lck_spin_lock(&vcb->vcbFreeExtLock); + if ( vcb->vcbFreeExtCnt == 0 || + vcb->vcbFreeExt[0].blockCount == 0) { + lf_lck_spin_unlock(&vcb->vcbFreeExtLock); + hfs_unlock_mount(hfsmp); + return dskFulErr; + } + lf_lck_spin_unlock(&vcb->vcbFreeExtLock); + hfs_unlock_mount(hfsmp); + + lf_lck_spin_lock(&vcb->vcbFreeExtLock); + + // Just grab up to maxBlocks of the first (largest) free exent. + *actualStartBlock = vcb->vcbFreeExt[0].startBlock; + foundBlocks = vcb->vcbFreeExt[0].blockCount; + if (foundBlocks > maxBlocks) + foundBlocks = maxBlocks; + *actualNumBlocks = foundBlocks; + + lf_lck_spin_unlock(&vcb->vcbFreeExtLock); + + // sanity check + if ((*actualStartBlock + *actualNumBlocks) > vcb->allocLimit) + { + LFHFS_LOG(LEVEL_ERROR, "BlockAllocateKnown() found allocation overflow on \"%s\"", vcb->vcbVN); + hfs_mark_inconsistent(vcb, HFS_INCONSISTENCY_DETECTED); + err = EIO; + } else + err = 0; + + return err; +} + +/* + * BlockMarkAllocated + * + * This is a wrapper function around the internal calls which will actually mark the blocks + * as in-use. It will mark the blocks in the red-black tree if appropriate. We need to do + * this logic here to avoid callers having to deal with whether or not the red-black tree + * is enabled. + */ + +OSErr BlockMarkAllocated( + ExtendedVCB *vcb, + u_int32_t startingBlock, + register u_int32_t numBlocks) +{ + return BlockMarkAllocatedInternal(vcb, startingBlock, numBlocks, 0); +} + + +/* + _______________________________________________________________________ + + Routine: BlockMarkAllocatedInternal + + Function: Mark a contiguous group of blocks as allocated (set in the + bitmap). It assumes those bits are currently marked + deallocated (clear in the bitmap). Note that this function + must be called regardless of whether or not the bitmap or + tree-based allocator is used, as all allocations must correctly + be marked on-disk. If the tree-based approach is running, then + this will be done before the node is removed from the tree. + + Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock First block number to mark as allocated + numBlocks Number of blocks to mark as allocated + _______________________________________________________________________ + */ +static +OSErr BlockMarkAllocatedInternal ( + ExtendedVCB *vcb, + u_int32_t startingBlock, + u_int32_t numBlocks, + hfs_block_alloc_flags_t flags) +{ + OSErr err; + register u_int32_t *currentWord; // Pointer to current word within bitmap block + register u_int32_t wordsLeft; // Number of words left in this bitmap block + register u_int32_t bitMask; // Word with given bits already set (ready to OR in) + u_int32_t firstBit; // Bit index within word of first bit to allocate + u_int32_t numBits; // Number of bits in word to allocate + u_int32_t *buffer = NULL; + GenericLFBufPtr blockRef = NULL; + u_int32_t bitsPerBlock; + u_int32_t wordsPerBlock; + // XXXdbg + struct hfsmount *hfsmp = VCBTOHFS(vcb); + +#if DEBUG + + if (!ISSET(flags, HFS_ALLOC_COMMIT) + || ISSET(flags, HFS_ALLOC_USE_TENTATIVE)) { + struct rl_entry *range; + TAILQ_FOREACH(range, &hfsmp->hfs_reserved_ranges[HFS_LOCKED_BLOCKS], rl_link) { + hfs_assert(rl_overlap(range, startingBlock, + startingBlock + numBlocks - 1) == RL_NOOVERLAP); + } + } + +#endif + +#if LF_HFS_CHECK_UNMAPPED + int force_flush = 0; + /* + * Since we are about to mark these bits as in-use + * in the bitmap, decide if we need to alert the caller + * that a journal flush might be appropriate. It's safe to + * poke at the journal pointer here since we MUST have + * called start_transaction by the time this function is invoked. + * If the journal is enabled, then it will have taken the requisite + * journal locks. If it is not enabled, then we have taken + * a shared lock on the global lock. + */ + if (hfsmp->jnl) { + uint32_t ignore; + err = CheckUnmappedBytes (hfsmp, (uint64_t) startingBlock, (uint64_t)numBlocks, &force_flush, &ignore); + if ((err == 0) && (force_flush)) { + journal_request_immediate_flush (hfsmp->jnl); + } + } + + hfs_unmap_alloc_extent(vcb, startingBlock, numBlocks); +#endif + + /* + * Don't make changes to the disk if we're just reserving. Note that + * we could do better in the tentative case because we could, in theory, + * avoid the journal flush above. However, that would mean that we would + * need to catch the callback to stop it incorrectly addding the extent + * to our free cache. + */ + if (ISSET(flags, HFS_ALLOC_LOCKED | HFS_ALLOC_TENTATIVE)) { + err = 0; + goto Exit; + } + + // + // Pre-read the bitmap block containing the first word of allocation + // + + err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef, + HFS_ALLOC_IGNORE_RESERVED); + if (err != noErr) goto Exit; + // + // Initialize currentWord, and wordsLeft. + // + { + u_int32_t wordIndexInBlock; + + bitsPerBlock = vcb->vcbVBMIOSize * kBitsPerByte; + wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord; + + wordIndexInBlock = (startingBlock & (bitsPerBlock-1)) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + wordsLeft = wordsPerBlock - wordIndexInBlock; + } + + // XXXdbg + if (hfsmp->jnl) { + journal_modify_block_start(hfsmp->jnl, blockRef); + } + + // + // If the first block to allocate doesn't start on a word + // boundary in the bitmap, then treat that first word + // specially. + // + + firstBit = startingBlock % kBitsPerWord; + if (firstBit != 0) { + bitMask = kAllBitsSetInWord >> firstBit; // turn off all bits before firstBit + numBits = kBitsPerWord - firstBit; // number of remaining bits in this word + if (numBits > numBlocks) { + numBits = numBlocks; // entire allocation is inside this one word + bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits)); // turn off bits after last + } +#if DEBUG + if ((*currentWord & SWAP_BE32 (bitMask)) != 0) { + LFHFS_LOG(LEVEL_ERROR, "BlockMarkAllocatedInternal: blocks already allocated!"); + hfs_assert(0); + } +#endif + *currentWord |= SWAP_BE32 (bitMask); // set the bits in the bitmap + numBlocks -= numBits; // adjust number of blocks left to allocate + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Allocate whole words (32 blocks) at a time. + // + + bitMask = kAllBitsSetInWord; // put this in a register for 68K + while (numBlocks >= kBitsPerWord) { + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += bitsPerBlock; // generate a block number in the next bitmap block + + buffer = NULL; + err = ReleaseBitmapBlock(vcb, blockRef, true); + if (err != noErr) goto Exit; + + err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef, + HFS_ALLOC_IGNORE_RESERVED); + if (err != noErr) goto Exit; + + // XXXdbg + if (hfsmp->jnl) { + journal_modify_block_start(hfsmp->jnl, blockRef); + } + + // Readjust currentWord and wordsLeft + currentWord = buffer; + wordsLeft = wordsPerBlock; + } +#if DEBUG + if (*currentWord != 0) { + LFHFS_LOG(LEVEL_ERROR, "BlockMarkAllocatedInternal: blocks already allocated!"); + hfs_assert(0); + } +#endif + *currentWord = SWAP_BE32 (bitMask); + numBlocks -= kBitsPerWord; + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Allocate any remaining blocks. + // + + if (numBlocks != 0) { + bitMask = ~(kAllBitsSetInWord >> numBlocks); // set first numBlocks bits + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += bitsPerBlock; // generate a block number in the next bitmap block + + buffer = NULL; + err = ReleaseBitmapBlock(vcb, blockRef, true); + if (err != noErr) goto Exit; + + err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef, + HFS_ALLOC_IGNORE_RESERVED); + if (err != noErr) goto Exit; + // XXXdbg + if (hfsmp->jnl) { + journal_modify_block_start(hfsmp->jnl, blockRef); + } + currentWord = buffer; + } +#if DEBUG + if ((*currentWord & SWAP_BE32 (bitMask)) != 0) { + LFHFS_LOG(LEVEL_ERROR, "BlockMarkAllocatedInternal: blocks already allocated!"); + hfs_assert(0); + } +#endif + *currentWord |= SWAP_BE32 (bitMask); // set the bits in the bitmap + + // No need to update currentWord or wordsLeft + } + +Exit: + + if (buffer) + (void)ReleaseBitmapBlock(vcb, blockRef, true); + + return err; +} + + +/* + * BlockMarkFree + * + * This is a wrapper function around the internal calls which will actually mark the blocks + * as freed. It will mark the blocks in the red-black tree if appropriate. We need to do + * this logic here to avoid callers having to deal with whether or not the red-black tree + * is enabled. + * + */ +OSErr BlockMarkFree( + ExtendedVCB *vcb, + u_int32_t startingBlock, + register u_int32_t numBlocks) +{ + return BlockMarkFreeInternal(vcb, startingBlock, numBlocks, true); +} + + +/* + * BlockMarkFreeUnused + * + * Scan the bitmap block beyond end of current file system for bits + * that are marked as used. If any of the bits are marked as used, + * this function marks them free. + * + * Note: This was specifically written to mark all bits beyond + * end of current file system during hfs_extendfs(), which makes + * sure that all the new blocks added to the file system are + * marked as free. We expect that all the blocks beyond end of + * current file system are always marked as free, but there might + * be cases where are marked as used. This function assumes that + * the number of blocks marked as used incorrectly are relatively + * small, otherwise this can overflow journal transaction size + * on certain file system configurations (example, large unused + * bitmap with relatively small journal). + * + * Input: + * startingBlock: First block of the range to mark unused + * numBlocks: Number of blocks in the range to mark unused + * + * Returns: zero on success, non-zero on error. + */ +OSErr BlockMarkFreeUnused(ExtendedVCB *vcb, u_int32_t startingBlock, register u_int32_t numBlocks) +{ + int error = 0; + struct hfsmount *hfsmp = VCBTOHFS(vcb); + u_int32_t curNumBlocks; + u_int32_t bitsPerBlock; + u_int32_t lastBit; + + /* Use the optimal bitmap I/O size instead of bitmap block size */ + bitsPerBlock = hfsmp->vcbVBMIOSize * kBitsPerByte; + + /* + * First clear any non bitmap allocation block aligned bits + * + * Calculate the first bit in the bitmap block next to + * the bitmap block containing the bit for startingBlock. + * Using this value, we calculate the total number of + * bits to be marked unused from startingBlock to the + * end of bitmap block containing startingBlock. + */ + lastBit = ((startingBlock + (bitsPerBlock - 1))/bitsPerBlock) * bitsPerBlock; + curNumBlocks = lastBit - startingBlock; + if (curNumBlocks > numBlocks) { + curNumBlocks = numBlocks; + } + error = BlockMarkFreeInternal(vcb, startingBlock, curNumBlocks, false); + if (error) { + return error; + } + startingBlock += curNumBlocks; + numBlocks -= curNumBlocks; + + /* + * Check a full bitmap block for any 'used' bit. If any bit is used, + * mark all the bits only in that bitmap block as free. This ensures + * that we do not write unmodified bitmap blocks and do not + * overwhelm the journal. + * + * The code starts by checking full bitmap block at a time, and + * marks entire bitmap block as free only if any bit in that bitmap + * block is marked as used. In the end, it handles the last bitmap + * block which might be partially full by only checking till the + * caller-specified last bit and if any bit is set, only mark that + * range as free. + */ + while (numBlocks) { + if (numBlocks >= bitsPerBlock) { + curNumBlocks = bitsPerBlock; + } else { + curNumBlocks = numBlocks; + } + if (hfs_isallocated(hfsmp, startingBlock, curNumBlocks) == true) { + error = BlockMarkFreeInternal(vcb, startingBlock, curNumBlocks, false); + if (error) { + return error; + } + } + startingBlock += curNumBlocks; + numBlocks -= curNumBlocks; + } + + return error; +} + +/* + _______________________________________________________________________ + + Routine: BlockMarkFreeInternal + + Function: Mark a contiguous group of blocks as free (clear in the + bitmap). It assumes those bits are currently marked + allocated (set in the bitmap). + + Inputs: + vcb Pointer to volume where space is to be freed + startingBlock First block number to mark as freed + numBlocks Number of blocks to mark as freed + do_validate If true, validate that the blocks being + deallocated to check if they are within totalBlocks + for current volume and whether they were allocated + before they are marked free. + _______________________________________________________________________ + */ +static +OSErr BlockMarkFreeInternal( + ExtendedVCB *vcb, + u_int32_t startingBlock_in, + register u_int32_t numBlocks_in, + Boolean do_validate) +{ + OSErr err; + u_int32_t startingBlock = startingBlock_in; + u_int32_t numBlocks = numBlocks_in; + uint32_t unmapStart = startingBlock_in; + uint32_t unmapCount = numBlocks_in; + uint32_t wordIndexInBlock; + u_int32_t *currentWord; // Pointer to current word within bitmap block + u_int32_t wordsLeft; // Number of words left in this bitmap block + u_int32_t bitMask; // Word with given bits already set (ready to OR in) + u_int32_t currentBit; // Bit index within word of current bit to allocate + u_int32_t numBits; // Number of bits in word to allocate + u_int32_t *buffer = NULL; + GenericLFBufPtr blockRef = NULL; + u_int32_t bitsPerBlock; + u_int32_t wordsPerBlock; + // XXXdbg + struct hfsmount *hfsmp = VCBTOHFS(vcb); + + /* + * NOTE: We use vcb->totalBlocks instead of vcb->allocLimit because we + * need to be able to free blocks being relocated during hfs_truncatefs. + */ + if ((do_validate == true) && + (startingBlock + numBlocks > vcb->totalBlocks)) { +#if ALLOC_DEBUG || DEBUG + LFHFS_LOG(LEVEL_ERROR, "lockMarkFreeInternal() free non-existent blocks at %u (numBlock=%u) on vol %s\n", startingBlock, numBlocks, vcb->vcbVN); + hfs_assert(0); + __builtin_unreachable(); +#else + LFHFS_LOG(LEVEL_ERROR, "BlockMarkFreeInternal() trying to free non-existent blocks starting at %u (numBlock=%u) on volume %s\n", startingBlock, numBlocks, vcb->vcbVN); + hfs_mark_inconsistent(vcb, HFS_INCONSISTENCY_DETECTED); + err = EIO; + goto Exit; +#endif + } + + // + // Pre-read the bitmap block containing the first word of allocation + // + + err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef, + HFS_ALLOC_IGNORE_RESERVED); + if (err != noErr) goto Exit; + + // XXXdbg + if (hfsmp->jnl) { + journal_modify_block_start(hfsmp->jnl, blockRef); + } + + uint32_t min_unmap = 0, max_unmap = UINT32_MAX; + + // Work out the bounds of any unmap we can send down + struct rl_entry *range; + for (int i = 0; i < 2; ++i) { + TAILQ_FOREACH(range, &hfsmp->hfs_reserved_ranges[i], rl_link) { + if (range->rl_start < startingBlock + && range->rl_end >= min_unmap) { + min_unmap = (uint32_t)(range->rl_end + 1); + } + if (range->rl_end >= startingBlock + numBlocks + && range->rl_start < max_unmap) { + max_unmap = (uint32_t)range->rl_start; + } + } + } + + // + // Figure out how many bits and words per bitmap block. + // + bitsPerBlock = vcb->vcbVBMIOSize * kBitsPerByte; + wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord; + wordIndexInBlock = (startingBlock & (bitsPerBlock-1)) / kBitsPerWord; + + // + // Look for a range of free blocks immediately before startingBlock + // (up to the start of the current bitmap block). Set unmapStart to + // the first free block. + // + currentWord = buffer + wordIndexInBlock; + currentBit = startingBlock % kBitsPerWord; + bitMask = kHighBitInWordMask >> currentBit; + while (unmapStart > min_unmap) { + // Move currentWord/bitMask back by one bit + bitMask <<= 1; + if (bitMask == 0) { + if (--currentWord < buffer) + break; + bitMask = kLowBitInWordMask; + } + + if (*currentWord & SWAP_BE32(bitMask)) + break; // Found an allocated block. Stop searching. + --unmapStart; + ++unmapCount; + } + + // + // If the first block to free doesn't start on a word + // boundary in the bitmap, then treat that first word + // specially. + // + + currentWord = buffer + wordIndexInBlock; + wordsLeft = wordsPerBlock - wordIndexInBlock; + currentBit = startingBlock % kBitsPerWord; + if (currentBit != 0) { + bitMask = kAllBitsSetInWord >> currentBit; // turn off all bits before currentBit + numBits = kBitsPerWord - currentBit; // number of remaining bits in this word + if (numBits > numBlocks) { + numBits = numBlocks; // entire allocation is inside this one word + bitMask &= ~(kAllBitsSetInWord >> (currentBit + numBits)); // turn off bits after last + } + if ((do_validate == true) && + (*currentWord & SWAP_BE32 (bitMask)) != SWAP_BE32 (bitMask)) { + goto Corruption; + } + *currentWord &= SWAP_BE32 (~bitMask); // clear the bits in the bitmap + numBlocks -= numBits; // adjust number of blocks left to free + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Free whole words (32 blocks) at a time. + // + + while (numBlocks >= kBitsPerWord) { + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += bitsPerBlock; // generate a block number in the next bitmap block + + buffer = NULL; + err = ReleaseBitmapBlock(vcb, blockRef, true); + if (err != noErr) goto Exit; + + err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef, + HFS_ALLOC_IGNORE_RESERVED); + if (err != noErr) goto Exit; + // XXXdbg + if (hfsmp->jnl) { + journal_modify_block_start(hfsmp->jnl, blockRef); + } + + // Readjust currentWord and wordsLeft + currentWord = buffer; + wordsLeft = wordsPerBlock; + } + if ((do_validate == true) && + (*currentWord != SWAP_BE32 (kAllBitsSetInWord))) { + goto Corruption; + } + *currentWord = 0; // clear the entire word + numBlocks -= kBitsPerWord; + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Free any remaining blocks. + // + + if (numBlocks != 0) { + bitMask = ~(kAllBitsSetInWord >> numBlocks); // set first numBlocks bits + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += bitsPerBlock; // generate a block number in the next bitmap block + + buffer = NULL; + err = ReleaseBitmapBlock(vcb, blockRef, true); + if (err != noErr) goto Exit; + + err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef, + HFS_ALLOC_IGNORE_RESERVED); + if (err != noErr) goto Exit; + + // XXXdbg + if (hfsmp->jnl) { + journal_modify_block_start(hfsmp->jnl, blockRef); + } + + currentWord = buffer; + } + if ((do_validate == true) && + (*currentWord & SWAP_BE32 (bitMask)) != SWAP_BE32 (bitMask)) { + goto Corruption; + } + *currentWord &= SWAP_BE32 (~bitMask); // clear the bits in the bitmap + + // No need to update currentWord or wordsLeft + } + + // + // Look for a range of free blocks immediately after the range we just freed + // (up to the end of the current bitmap block). + // + wordIndexInBlock = ((startingBlock_in + numBlocks_in - 1) & (bitsPerBlock-1)) / kBitsPerWord; + wordsLeft = wordsPerBlock - wordIndexInBlock; + currentWord = buffer + wordIndexInBlock; + currentBit = (startingBlock_in + numBlocks_in - 1) % kBitsPerWord; + bitMask = kHighBitInWordMask >> currentBit; + while (unmapStart + unmapCount < max_unmap) { + // Move currentWord/bitMask/wordsLeft forward one bit + bitMask >>= 1; + if (bitMask == 0) { + if (--wordsLeft == 0) + break; + ++currentWord; + bitMask = kHighBitInWordMask; + } + + if (*currentWord & SWAP_BE32(bitMask)) + break; // Found an allocated block. Stop searching. + ++unmapCount; + } + +Exit: + + if (buffer) + (void)ReleaseBitmapBlock(vcb, blockRef, true); + return err; + +Corruption: +#if DEBUG + LFHFS_LOG(LEVEL_ERROR, "BlockMarkFreeInternal: blocks not allocated!"); + hfs_assert(0); + __builtin_unreachable(); +#else + LFHFS_LOG(LEVEL_ERROR, "BlockMarkFreeInternal() trying to free unallocated blocks on volume %s <%u, %u>\n", + vcb->vcbVN, startingBlock_in, numBlocks_in); + + hfs_mark_inconsistent(vcb, HFS_INCONSISTENCY_DETECTED); + err = EIO; + goto Exit; +#endif +} + + +/* + _______________________________________________________________________ + + Routine: BlockFindContiguous + + Function: Find a contiguous range of blocks that are free (bits + clear in the bitmap). If a contiguous range of the + minimum size can't be found, an error will be returned. + This is only needed to support the bitmap-scanning logic, + as the red-black tree should be able to do this by internally + searching its tree. + + Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock Preferred first block of range + endingBlock Last possible block in range + 1 + minBlocks Minimum number of blocks needed. Must be > 0. + maxBlocks Maximum (ideal) number of blocks desired + useMetaZone OK to dip into metadata allocation zone + + Outputs: + actualStartBlock First block of range found, or 0 if error + actualNumBlocks Number of blocks found, or 0 if error + + Returns: + noErr Found at least minBlocks contiguous + dskFulErr No contiguous space found, or all less than minBlocks + _______________________________________________________________________ + */ + +static OSErr BlockFindContiguous( + ExtendedVCB *vcb, + u_int32_t startingBlock, + u_int32_t endingBlock, + u_int32_t minBlocks, + u_int32_t maxBlocks, + Boolean useMetaZone, + Boolean trustSummary, + u_int32_t *actualStartBlock, + u_int32_t *actualNumBlocks, + hfs_block_alloc_flags_t flags) +{ + OSErr err; + register u_int32_t currentBlock; // Block we're currently looking at. + u_int32_t firstBlock; // First free block in current extent. + u_int32_t stopBlock; // If we get to this block, stop searching for first free block. + u_int32_t foundBlocks; // Number of contiguous free blocks in current extent. + u_int32_t *buffer = NULL; + register u_int32_t *currentWord; + register u_int32_t bitMask; + register u_int32_t wordsLeft; + register u_int32_t tempWord; + GenericLFBufPtr blockRef = 0; + u_int32_t wordsPerBlock; + struct hfsmount *hfsmp = (struct hfsmount*) vcb; + HFSPlusExtentDescriptor best = { 0, 0 }; + + /* + * When we're skipping the metadata zone and the start/end + * range overlaps with the metadata zone then adjust the + * start to be outside of the metadata zone. If the range + * is entirely inside the metadata zone then we can deny the + * request (dskFulErr). + */ + if (!useMetaZone && (vcb->hfs_flags & HFS_METADATA_ZONE)) { + if (startingBlock <= vcb->hfs_metazone_end) { + if (endingBlock > (vcb->hfs_metazone_end + 2)) + startingBlock = vcb->hfs_metazone_end + 1; + else + goto DiskFull; + } + } + + if ((endingBlock - startingBlock) < minBlocks) + { + // The set of blocks we're checking is smaller than the minimum number + // of blocks, so we couldn't possibly find a good range. + goto DiskFull; + } + + stopBlock = endingBlock - minBlocks + 1; + currentBlock = startingBlock; + firstBlock = 0; + + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) + currentBlock = NextBitmapBlock(vcb, currentBlock); + + /* + * Use the summary table if we can. Skip over any totally + * allocated blocks. currentBlock should now point to the first + * block beyond the metadata zone if the metazone allocations are not + * allowed in this invocation. + */ + if ((trustSummary) && (hfsmp->hfs_flags & HFS_SUMMARY_TABLE)) { + uint32_t suggestion; + err = hfs_find_summary_free (hfsmp, currentBlock, &suggestion); + if (err && err != ENOSPC) + goto ErrorExit; + if (err == ENOSPC || suggestion >= stopBlock) + goto DiskFull; + currentBlock = suggestion; + } + + + // + // Pre-read the first bitmap block. + // + err = ReadBitmapBlock(vcb, currentBlock, &buffer, &blockRef, flags); + if ( err != noErr ) goto ErrorExit; + + // + // Figure out where currentBlock is within the buffer. + // + wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord; + + wordsLeft = (currentBlock / kBitsPerWord) & (wordsPerBlock-1); // Current index into buffer + currentWord = buffer + wordsLeft; + wordsLeft = wordsPerBlock - wordsLeft; + + uint32_t remaining = (hfsmp->freeBlocks - hfsmp->lockedBlocks + - (ISSET(flags, HFS_ALLOC_IGNORE_TENTATIVE) + ? 0 : hfsmp->tentativeBlocks)); + + /* + * This outer do-while loop is the main body of this function. Its job is + * to search through the blocks (until we hit 'stopBlock'), and iterate + * through swaths of allocated bitmap until it finds free regions. + */ + + do + { + foundBlocks = 0; + /* + * We will try and update the summary table as we search + * below. Note that we will never update the summary table + * for the first and last blocks that the summary table + * covers. Ideally, we should, but the benefits probably + * aren't that significant so we leave things alone for now. + */ + uint32_t summary_block_scan = 0; + /* + * Inner while loop 1: + * Look for free blocks, skipping over allocated ones. + * + * Initialization starts with checking the initial partial word + * if applicable. + */ + bitMask = currentBlock & kBitsWithinWordMask; + if (bitMask) + { + tempWord = SWAP_BE32(*currentWord); // Fetch the current word only once + bitMask = kHighBitInWordMask >> bitMask; + while (tempWord & bitMask) + { + bitMask >>= 1; + ++currentBlock; + } + + // Did we find an unused bit (bitMask != 0), or run out of bits (bitMask == 0)? + if (bitMask) + goto FoundUnused; + + // Didn't find any unused bits, so we're done with this word. + ++currentWord; + --wordsLeft; + } + + // + // Check whole words + // + while (currentBlock < stopBlock) + { + // See if it's time to read another block. + if (wordsLeft == 0) + { + buffer = NULL; + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + /* + * If summary_block_scan is non-zero, then we must have + * pulled a bitmap file block into core, and scanned through + * the entire thing. Because we're in this loop, we are + * implicitly trusting that the bitmap didn't have any knowledge + * about this particular block. As a result, update the bitmap + * (lazily, now that we've scanned it) with our findings that + * this particular block is completely used up. + */ + if (summary_block_scan != 0) { + uint32_t summary_bit; + err = hfs_get_summary_index (hfsmp, summary_block_scan, &summary_bit); + if (err != noErr) goto ErrorExit; + hfs_set_summary (hfsmp, summary_bit, 1); + } + } + err = ReleaseBitmapBlock(vcb, blockRef, false); + if (err != noErr) goto ErrorExit; + + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) { + currentBlock = NextBitmapBlock(vcb, currentBlock); + if (currentBlock >= stopBlock) { + goto LoopExit; + } + } + + /* Skip over fully allocated bitmap blocks if we can */ + if ((trustSummary) && (hfsmp->hfs_flags & HFS_SUMMARY_TABLE)) { + uint32_t suggestion; + err = hfs_find_summary_free (hfsmp, currentBlock, &suggestion); + if (err && err != ENOSPC) + goto ErrorExit; + if (err == ENOSPC || suggestion >= stopBlock) + goto LoopExit; + currentBlock = suggestion; + } + + err = ReadBitmapBlock(vcb, currentBlock, &buffer, &blockRef, flags); + if ( err != noErr ) goto ErrorExit; + + /* + * Set summary_block_scan to be the block we just read into the block cache. + * + * At this point, we've just read an allocation block worth of bitmap file + * into the buffer above, but we don't know if it is completely allocated or not. + * If we find that it is completely allocated/full then we will jump + * through this loop again and set the appropriate summary bit as fully allocated. + */ + summary_block_scan = currentBlock; + currentWord = buffer; + wordsLeft = wordsPerBlock; + } + + // See if any of the bits are clear + if ((tempWord = SWAP_BE32(*currentWord)) + 1) // non-zero if any bits were clear + { + // Figure out which bit is clear + bitMask = kHighBitInWordMask; + while (tempWord & bitMask) + { + bitMask >>= 1; + ++currentBlock; + } + + break; // Found the free bit; break out to FoundUnused. + } + + // Keep looking at the next word + currentBlock += kBitsPerWord; + ++currentWord; + --wordsLeft; + } + + FoundUnused: + // Make sure the unused bit is early enough to use + if (currentBlock >= stopBlock) + { + break; + } + + // Remember the start of the extent + firstBlock = currentBlock; + + + /* + * Inner while loop 2: + * We get here if we find a free block. Count the number + * of contiguous free blocks observed. + * + * Initialization starts with checking the initial partial word + * if applicable. + */ + bitMask = currentBlock & kBitsWithinWordMask; + if (bitMask) + { + tempWord = SWAP_BE32(*currentWord); // Fetch the current word only once + bitMask = kHighBitInWordMask >> bitMask; + while (bitMask && !(tempWord & bitMask)) + { + bitMask >>= 1; + ++currentBlock; + } + + // Did we find a used bit (bitMask != 0), or run out of bits (bitMask == 0)? + if (bitMask) + goto FoundUsed; + + // Didn't find any used bits, so we're done with this word. + ++currentWord; + --wordsLeft; + } + + // + // Check whole words + // + while (currentBlock < endingBlock) + { + // See if it's time to read another block. + if (wordsLeft == 0) + { + buffer = NULL; + err = ReleaseBitmapBlock(vcb, blockRef, false); + if (err != noErr) goto ErrorExit; + + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) { + u_int32_t nextBlock; + + nextBlock = NextBitmapBlock(vcb, currentBlock); + if (nextBlock != currentBlock) { + goto LoopExit; /* allocation gap, so stop */ + } + } + + err = ReadBitmapBlock(vcb, currentBlock, &buffer, &blockRef, flags); + if ( err != noErr ) goto ErrorExit; + + currentWord = buffer; + wordsLeft = wordsPerBlock; + } + + // See if any of the bits are set + if ((tempWord = SWAP_BE32(*currentWord)) != 0) + { + // Figure out which bit is set + bitMask = kHighBitInWordMask; + while (!(tempWord & bitMask)) + { + bitMask >>= 1; + ++currentBlock; + } + + break; // Found the used bit; break out to FoundUsed. + } + + // Keep looking at the next word + currentBlock += kBitsPerWord; + ++currentWord; + --wordsLeft; + + // If we found at least maxBlocks, we can quit early. + if ((currentBlock - firstBlock) >= maxBlocks) + break; + } + + FoundUsed: + // Make sure we didn't run out of bitmap looking for a used block. + // If so, pin to the end of the bitmap. + if (currentBlock > endingBlock) + currentBlock = endingBlock; + + // Figure out how many contiguous free blocks there were. + // Pin the answer to maxBlocks. + foundBlocks = currentBlock - firstBlock; + if (foundBlocks > maxBlocks) + foundBlocks = maxBlocks; + + if (remaining) { + if (foundBlocks > remaining) { + LFHFS_LOG( LEVEL_DEBUG, "hfs: found more blocks than are indicated free!\n"); + remaining = UINT32_MAX; + } else + remaining -= foundBlocks; + } + + if (ISSET(flags, HFS_ALLOC_TRY_HARD)) { + if (foundBlocks > best.blockCount) { + best.startBlock = firstBlock; + best.blockCount = foundBlocks; + } + + if (foundBlocks >= maxBlocks || best.blockCount >= remaining) + break; + + /* + * Note that we will go ahead and add this free extent to our + * cache below but that's OK because we'll remove it again if we + * decide to use this extent. + */ + } else if (foundBlocks >= minBlocks) + break; // Found what we needed! + + /* + * We did not find the total blocks we were looking for, but + * add this free block run to our free extent cache list, if possible. + */ + + // If we're ignoring tentative ranges, we need to account for them here + if (ISSET(flags, HFS_ALLOC_IGNORE_TENTATIVE)) { + struct rl_entry free_extent = rl_make(firstBlock, firstBlock + foundBlocks - 1); + struct rl_entry *range;; + TAILQ_FOREACH(range, &hfsmp->hfs_reserved_ranges[HFS_TENTATIVE_BLOCKS], rl_link) { + rl_subtract(&free_extent, range); + if (rl_len(range) == 0) + break; + } + firstBlock = (uint32_t)free_extent.rl_start; + foundBlocks = (uint32_t)rl_len(&free_extent); + } + } while (currentBlock < stopBlock); +LoopExit: + + if (ISSET(flags, HFS_ALLOC_TRY_HARD)) { + firstBlock = best.startBlock; + foundBlocks = best.blockCount; + } + + // Return the outputs. + if (foundBlocks < minBlocks) + { + DiskFull: + err = dskFulErr; + ErrorExit: + *actualStartBlock = 0; + *actualNumBlocks = 0; + } + else + { + err = noErr; + *actualStartBlock = firstBlock; + *actualNumBlocks = foundBlocks; + /* + * Sanity check for overflow + */ + if ((firstBlock + foundBlocks) > vcb->allocLimit) { + LFHFS_LOG(LEVEL_ERROR, "blk allocation overflow on \"%s\" sb:0x%08x eb:0x%08x cb:0x%08x fb:0x%08x stop:0x%08x min:0x%08x found:0x%08x", + vcb->vcbVN, startingBlock, endingBlock, currentBlock, + firstBlock, stopBlock, minBlocks, foundBlocks); + hfs_assert(0); + } + } + + if (buffer) + (void) ReleaseBitmapBlock(vcb, blockRef, false); + + return err; +} + + +/* + * Count number of bits set in the given 32-bit unsigned number + * + * Returns: + * Number of bits set + */ +static int num_bits_set(u_int32_t num) +{ + return __builtin_popcount(num); +} + +/* + * For a given range of blocks, find the total number of blocks + * allocated. If 'stop_on_first' is true, it stops as soon as it + * encounters the first allocated block. This option is useful + * to determine if any block is allocated or not. + * + * Inputs: + * startingBlock First allocation block number of the range to be scanned. + * numBlocks Total number of blocks that need to be scanned. + * stop_on_first Stop the search after the first allocated block is found. + * + * Output: + * allocCount Total number of allocation blocks allocated in the given range. + * + * On error, it is the number of allocated blocks found + * before the function got an error. + * + * If 'stop_on_first' is set, + * allocCount = 1 if any allocated block was found. + * allocCount = 0 if no allocated block was found. + * + * Returns: + * 0 on success, non-zero on failure. + */ +static int +hfs_isallocated_internal(struct hfsmount *hfsmp, u_int32_t startingBlock, + u_int32_t numBlocks, Boolean stop_on_first, u_int32_t *allocCount) +{ + u_int32_t *currentWord; // Pointer to current word within bitmap block + u_int32_t wordsLeft; // Number of words left in this bitmap block + u_int32_t bitMask; // Word with given bits already set (ready to test) + u_int32_t firstBit; // Bit index within word of first bit to allocate + u_int32_t numBits; // Number of bits in word to allocate + u_int32_t *buffer = NULL; + GenericLFBufPtr blockRef; + u_int32_t bitsPerBlock; + u_int32_t wordsPerBlock; + u_int32_t blockCount = 0; + int error; + + /* + * Pre-read the bitmap block containing the first word of allocation + */ + error = ReadBitmapBlock(hfsmp, startingBlock, &buffer, &blockRef, + HFS_ALLOC_IGNORE_TENTATIVE); + if (error) + goto JustReturn; + + /* + * Initialize currentWord, and wordsLeft. + */ + { + u_int32_t wordIndexInBlock; + + bitsPerBlock = hfsmp->vcbVBMIOSize * kBitsPerByte; + wordsPerBlock = hfsmp->vcbVBMIOSize / kBytesPerWord; + + wordIndexInBlock = (startingBlock & (bitsPerBlock-1)) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + wordsLeft = wordsPerBlock - wordIndexInBlock; + } + + /* + * First test any non word aligned bits. + */ + firstBit = startingBlock % kBitsPerWord; + if (firstBit != 0) { + bitMask = kAllBitsSetInWord >> firstBit; + numBits = kBitsPerWord - firstBit; + if (numBits > numBlocks) { + numBits = numBlocks; + bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits)); + } + if ((*currentWord & SWAP_BE32 (bitMask)) != 0) { + if (stop_on_first) { + blockCount = 1; + goto Exit; + } + blockCount += num_bits_set(*currentWord & SWAP_BE32 (bitMask)); + } + numBlocks -= numBits; + ++currentWord; + --wordsLeft; + } + + /* + * Test whole words (32 blocks) at a time. + */ + while (numBlocks >= kBitsPerWord) { + if (wordsLeft == 0) { + /* Read in the next bitmap block. */ + startingBlock += bitsPerBlock; + + buffer = NULL; + error = ReleaseBitmapBlock(hfsmp, blockRef, false); + if (error) goto Exit; + + error = ReadBitmapBlock(hfsmp, startingBlock, &buffer, &blockRef, + HFS_ALLOC_IGNORE_TENTATIVE); + if (error) goto Exit; + + /* Readjust currentWord and wordsLeft. */ + currentWord = buffer; + wordsLeft = wordsPerBlock; + } + if (*currentWord != 0) { + if (stop_on_first) { + blockCount = 1; + goto Exit; + } + blockCount += num_bits_set(*currentWord); + } + numBlocks -= kBitsPerWord; + ++currentWord; + --wordsLeft; + } + + /* + * Test any remaining blocks. + */ + if (numBlocks != 0) { + bitMask = ~(kAllBitsSetInWord >> numBlocks); + if (wordsLeft == 0) { + /* Read in the next bitmap block */ + startingBlock += bitsPerBlock; + + buffer = NULL; + error = ReleaseBitmapBlock(hfsmp, blockRef, false); + if (error) goto Exit; + + error = ReadBitmapBlock(hfsmp, startingBlock, &buffer, &blockRef, + HFS_ALLOC_IGNORE_TENTATIVE); + if (error) goto Exit; + + currentWord = buffer; + } + if ((*currentWord & SWAP_BE32 (bitMask)) != 0) { + if (stop_on_first) { + blockCount = 1; + goto Exit; + } + blockCount += num_bits_set(*currentWord & SWAP_BE32 (bitMask)); + } + } +Exit: + if (buffer) { + (void)ReleaseBitmapBlock(hfsmp, blockRef, false); + } + if (allocCount) { + *allocCount = blockCount; + } + +JustReturn: + + return (error); +} + +/* + * Count total number of blocks that are allocated in the given + * range from the bitmap. This is used to preflight total blocks + * that need to be relocated during volume resize. + * + * The journal or allocation file lock must be held. + * + * Returns: + * 0 on success, non-zero on failure. + * On failure, allocCount is zero. + */ +int +hfs_count_allocated(struct hfsmount *hfsmp, u_int32_t startBlock, + u_int32_t numBlocks, u_int32_t *allocCount) +{ + return hfs_isallocated_internal(hfsmp, startBlock, numBlocks, false, allocCount); +} + +/* + * Test to see if any blocks in a range are allocated. + * + * Note: On error, this function returns 1, which means that + * one or more blocks in the range are allocated. This function + * is primarily used for volume resize and we do not want + * to report to the caller that the blocks are free when we + * were not able to deterministically find it out. So on error, + * we always report that the blocks are allocated. + * + * The journal or allocation file lock must be held. + * + * Returns + * 0 if all blocks in the range are free. + * 1 if blocks in the range are allocated, or there was an error. + */ +int +hfs_isallocated(struct hfsmount *hfsmp, u_int32_t startingBlock, u_int32_t numBlocks) +{ + int error; + u_int32_t allocCount; + + error = hfs_isallocated_internal(hfsmp, startingBlock, numBlocks, true, &allocCount); + if (error) { + /* On error, we always say that the blocks are allocated + * so that volume resize does not return false success. + */ + return 1; + } else { + /* The function was deterministically able to find out + * if there was any block allocated or not. In that case, + * the value in allocCount is good enough to be returned + * back to the caller. + */ + return allocCount; + } +} + +/* + * CONFIG_HFS_RBTREE + * Check to see if the red-black tree is live. Allocation file lock must be held + * shared or exclusive to call this function. Note that we may call this even if + * HFS is built without activating the red-black tree code. + */ +int +hfs_isrbtree_active(struct hfsmount *hfsmp){ + +#pragma unused (hfsmp) + + /* Just return 0 for now */ + return 0; +} + + + +/* Summary Table Functions */ +/* + * hfs_check_summary: + * + * This function should be used to query the summary table to see if we can + * bypass a bitmap block or not when we're trying to find a free allocation block. + * + * + * Inputs: + * allocblock - allocation block number. Will be used to infer the correct summary bit. + * hfsmp -- filesystem in question. + * + * Output Arg: + * *freeblocks - set to 1 if we believe at least one free blocks in this vcbVBMIOSize + * page of bitmap file. + * + * + * Returns: + * 0 on success + * EINVAL on error + * + */ + +static int hfs_check_summary (struct hfsmount *hfsmp, uint32_t allocblock, uint32_t *freeblocks) { + + int err = EINVAL; + if (hfsmp->vcbVBMIOSize) { + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + uint32_t index; + if (hfs_get_summary_index (hfsmp, allocblock, &index)) { + *freeblocks = 0; + return EINVAL; + } + + /* Ok, now that we have the bit index into the array, what byte is it in ? */ + uint32_t byteindex = index / kBitsPerByte; + uint8_t current_byte = hfsmp->hfs_summary_table[byteindex]; + uint8_t bit_in_byte = index % kBitsPerByte; + + if (current_byte & (1 << bit_in_byte)) { + /* + * We do not believe there is anything free in the + * entire vcbVBMIOSize'd block. + */ + *freeblocks = 0; + } + else { + /* Looks like there might be a free block here... */ + *freeblocks = 1; + } + } + err = 0; + } + + return err; +} + +/* + * hfs_release_summary + * + * Given an extent that is about to be de-allocated on-disk, determine the number + * of summary bitmap bits that need to be marked as 'potentially available'. + * Then go ahead and mark them as free. + * + * Inputs: + * hfsmp - hfs mount + * block - starting allocation block. + * length - length of the extent. + * + * Returns: + * EINVAL upon any errors. + */ +static int hfs_release_summary(struct hfsmount *hfsmp, uint32_t start_blk, uint32_t length) { + int err = EINVAL; + uint32_t end_blk = (start_blk + length) - 1; + + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + /* Figure out what the starting / ending block's summary bits are */ + uint32_t start_bit; + uint32_t end_bit; + uint32_t current_bit; + + err = hfs_get_summary_index (hfsmp, start_blk, &start_bit); + if (err) { + goto release_err; + } + err = hfs_get_summary_index (hfsmp, end_blk, &end_bit); + if (err) { + goto release_err; + } + + if (ALLOC_DEBUG) { + if (start_bit > end_bit) { + LFHFS_LOG(LEVEL_ERROR, "hfs_release_summary: start > end!, %d %d ", start_bit, end_bit); + hfs_assert(0); + } + } + current_bit = start_bit; + while (current_bit <= end_bit) { + err = hfs_set_summary (hfsmp, current_bit, 0); + current_bit++; + } + } + +release_err: + return err; +} + +/* + * hfs_find_summary_free + * + * Given a allocation block as input, returns an allocation block number as output as a + * suggestion for where to start scanning the bitmap in order to find free blocks. It will + * determine the vcbVBMIOsize of the input allocation block, convert that into a summary + * bit, then keep iterating over the summary bits in order to find the first free one. + * + * Inputs: + * hfsmp - hfs mount + * block - starting allocation block + * newblock - output block as suggestion + * + * Returns: + * 0 on success + * ENOSPC if we could not find a free block + */ + +int hfs_find_summary_free (struct hfsmount *hfsmp, uint32_t block, uint32_t *newblock) { + + int err = ENOSPC; + uint32_t bit_index = 0; + uint32_t maybe_has_blocks = 0; + + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + uint32_t byte_index; + uint8_t curbyte; + uint8_t bit_in_byte; + uint32_t summary_cap; + + /* + * We generate a cap for the summary search because the summary table + * always represents a full summary of the bitmap FILE, which may + * be way more bits than are necessary for the actual filesystem + * whose allocations are mapped by the bitmap. + * + * Compute how much of hfs_summary_size is useable for the given number + * of allocation blocks eligible on this FS. + */ + err = hfs_get_summary_index (hfsmp, hfsmp->allocLimit - 1, &summary_cap); + if (err) { + goto summary_exit; + } + + /* Check the starting block first */ + err = hfs_check_summary (hfsmp, block, &maybe_has_blocks); + if (err) { + goto summary_exit; + } + + if (maybe_has_blocks) { + /* + * It looks like the initial start block could have something. + * Short-circuit and just use that. + */ + *newblock = block; + goto summary_exit; + } + + /* + * OK, now we know that the first block was useless. + * Get the starting summary bit, and find it in the array + */ + maybe_has_blocks = 0; + err = hfs_get_summary_index (hfsmp, block, &bit_index); + if (err) { + goto summary_exit; + } + + /* Iterate until we find something. */ + while (bit_index <= summary_cap) { + byte_index = bit_index / kBitsPerByte; + curbyte = hfsmp->hfs_summary_table[byte_index]; + bit_in_byte = bit_index % kBitsPerByte; + + if (curbyte & (1 << bit_in_byte)) { + /* nothing here. increment and move on */ + bit_index++; + } + else { + /* + * found something! convert bit_index back into + * an allocation block for use. 'newblock' will now + * contain the proper allocation block # based on the bit + * index. + */ + err = hfs_get_summary_allocblock (hfsmp, bit_index, newblock); + if (err) { + goto summary_exit; + } + maybe_has_blocks = 1; + break; + } + } + + /* If our loop didn't find anything, set err to ENOSPC */ + if (maybe_has_blocks == 0) { + err = ENOSPC; + } + } + + /* If the summary table is not active for this mount, we'll just return ENOSPC */ +summary_exit: + if (maybe_has_blocks) { + err = 0; + } + + return err; +} + +/* + * hfs_get_summary_allocblock + * + * Convert a summary bit into an allocation block number to use to start searching for free blocks. + * + * Inputs: + * hfsmp - hfs mount + * summarybit - summmary bit index + * *alloc - allocation block number in the bitmap file. + * + * Output: + * 0 on success + * EINVAL on failure + */ +int hfs_get_summary_allocblock (struct hfsmount *hfsmp, uint32_t + summarybit, uint32_t *alloc) { + uint32_t bits_per_iosize = hfsmp->vcbVBMIOSize * kBitsPerByte; + uint32_t allocblk; + + allocblk = summarybit * bits_per_iosize; + + if (allocblk >= hfsmp->totalBlocks) { + return EINVAL; + } + else { + *alloc = allocblk; + } + + return 0; +} + + +/* + * hfs_set_summary: + * + * This function should be used to manipulate the summary table + * + * The argument 'inuse' will set the value of the bit in question to one or zero + * depending on its value. + * + * Inputs: + * hfsmp - hfs mount + * summarybit - the bit index into the summary table to set/unset. + * inuse - the value to assign to the bit. + * + * Returns: + * 0 on success + * EINVAL on error + * + */ + +static int hfs_set_summary (struct hfsmount *hfsmp, uint32_t summarybit, uint32_t inuse) { + + int err = EINVAL; + if (hfsmp->vcbVBMIOSize) { + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + + if (ALLOC_DEBUG) { + if (hfsmp->hfs_summary_table == NULL) { + LFHFS_LOG(LEVEL_ERROR, "hfs_set_summary: no table for %p ", hfsmp); + hfs_assert(0); + } + } + + /* Ok, now that we have the bit index into the array, what byte is it in ? */ + uint32_t byte_index = summarybit / kBitsPerByte; + uint8_t current_byte = hfsmp->hfs_summary_table[byte_index]; + uint8_t bit_in_byte = summarybit % kBitsPerByte; + + if (inuse) { + current_byte = (current_byte | (1 << bit_in_byte)); + } + else { + current_byte = (current_byte & ~(1 << bit_in_byte)); + } + + hfsmp->hfs_summary_table[byte_index] = current_byte; + } + err = 0; + } + + return err; +} + + +/* + * hfs_get_summary_index: + * + * This is a helper function which determines what summary bit represents the vcbVBMIOSize worth + * of IO against the bitmap file. + * + * Returns: + * 0 on success + * EINVAL on failure + */ +static int hfs_get_summary_index (struct hfsmount *hfsmp, uint32_t block, uint32_t* index) { + uint32_t summary_bit; + uint32_t bits_per_iosize; + int err = EINVAL; + + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + /* Is the input block bigger than the total number of blocks? */ + if (block >= hfsmp->totalBlocks) { + return EINVAL; + } + + /* Is there even a vbmIOSize set? */ + if (hfsmp->vcbVBMIOSize == 0) { + return EINVAL; + } + + bits_per_iosize = hfsmp->vcbVBMIOSize * kBitsPerByte; + + summary_bit = block / bits_per_iosize; + + *index = summary_bit; + err = 0; + } + + return err; +} + +/* + * hfs_init_summary + * + * From a given mount structure, compute how big the summary table should be for the given + * filesystem, then allocate and bzero the memory. + * + * Returns: + * 0 on success + * EINVAL on failure + */ +int +hfs_init_summary (struct hfsmount *hfsmp) { + + uint32_t summary_size; + uint32_t summary_size_bytes; + uint8_t *summary_table; + + if (hfsmp->hfs_allocation_cp == NULL) { + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_DEBUG, "hfs_init_summary: summary table cannot progress without a bitmap cnode! \n"); + } + return EINVAL; + } + /* + * The practical maximum size of the summary table is 16KB: + * + * (512MB maximum bitmap size / (4k -- min alloc block size)) / 8 bits/byte. + * + * HFS+ will allow filesystems with allocation block sizes smaller than 4k, but + * the end result is that we'll start to issue I/O in 2k or 1k sized chunks, which makes + * supporting this much worse. The math would instead look like this: + * (512MB / 2k) / 8 == 32k. + * + * So, we will disallow the summary table if the allocation block size is < 4k. + */ + + if (hfsmp->blockSize < HFS_MIN_SUMMARY_BLOCKSIZE) { + LFHFS_LOG(LEVEL_ERROR, "hfs_init_summary: summary table not allowed on FS with block size of %d\n", hfsmp->blockSize); + return EINVAL; + } + + summary_size = hfsmp->hfs_allocation_cp->c_blocks; + + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_DEBUG, "HFS Summary Table Initialization: Bitmap %u blocks\n", + hfsmp->hfs_allocation_cp->c_blocks); + } + + /* + * If the bitmap IO size is not the same as the allocation block size then + * then re-compute the number of summary bits necessary. Note that above, the + * the default size is the number of allocation blocks in the bitmap *FILE* + * (not the number of bits in the bitmap itself). If the allocation block size + * is large enough though, we may need to increase this. + */ + if (hfsmp->blockSize != hfsmp->vcbVBMIOSize) { + uint64_t lrg_size = (uint64_t) hfsmp->hfs_allocation_cp->c_blocks * (uint64_t) hfsmp->blockSize; + lrg_size = lrg_size / (uint64_t)hfsmp->vcbVBMIOSize; + + /* With a full bitmap and 64k-capped iosize chunks, this would be 64k */ + summary_size = (uint32_t) lrg_size; + } + + /* + * If the block size is the same as the IO Size, then the total number of blocks + * is already equal to the number of IO units, which is our number of summary bits. + */ + + summary_size_bytes = summary_size / kBitsPerByte; + /* Always add one byte, just in case we have a dangling number of bits */ + summary_size_bytes++; + + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_DEBUG, "HFS Summary Table: vcbVBMIOSize %d summary bits %d \n", hfsmp->vcbVBMIOSize, summary_size); + LFHFS_LOG(LEVEL_DEBUG, "HFS Summary Table Size (in bytes) %d \n", summary_size_bytes); + + + } + + /* Store the field in the mount point */ + hfsmp->hfs_summary_size = summary_size; + hfsmp->hfs_summary_bytes = summary_size_bytes; + + summary_table = hfs_mallocz(summary_size_bytes); + + /* enable the summary table */ + hfsmp->hfs_flags |= HFS_SUMMARY_TABLE; + hfsmp->hfs_summary_table = summary_table; + + if (ALLOC_DEBUG) { + if (hfsmp->hfs_summary_table == NULL) { + LFHFS_LOG(LEVEL_ERROR, "HFS Summary Init: no table for %p\n", hfsmp); + hfs_assert(0); + } + } + return 0; +} + +#if ALLOC_DEBUG +/* + * hfs_validate_summary + * + * Validation routine for the summary table. Debug-only function. + * + * Bitmap lock must be held. + * + */ +void hfs_validate_summary (struct hfsmount *hfsmp) { + uint32_t i; + int err; + + /* + * Iterate over all of the bits in the summary table, and verify if + * there really are free blocks in the pages that we believe may + * may contain free blocks. + */ + + if (hfsmp->hfs_summary_table == NULL) { + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: No HFS summary table!"); + hfs_assert(0); + } + + /* 131072 bits == 16384 bytes. This is the theoretical max size of the summary table. we add 1 byte for slop */ + if (hfsmp->hfs_summary_size == 0 || hfsmp->hfs_summary_size > 131080) { + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: Size is bad! %d", hfsmp->hfs_summary_size); + hfs_assert(0); + } + + if (hfsmp->vcbVBMIOSize == 0) { + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: no VCB VBM IO Size !"); + hfs_assert(0); + } + + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: summary validation beginning on %s\n", hfsmp->vcbVN); + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: summary validation %d summary bits, %d summary blocks\n", hfsmp->hfs_summary_size, hfsmp->totalBlocks); + + /* iterate through all possible summary bits */ + for (i = 0; i < hfsmp->hfs_summary_size ; i++) { + + uint32_t bits_per_iosize = hfsmp->vcbVBMIOSize * kBitsPerByte; + uint32_t byte_offset = hfsmp->vcbVBMIOSize * i; + + /* Compute the corresponding allocation block for the summary bit. */ + uint32_t alloc_block = i * bits_per_iosize; + + /* + * We use a uint32_t pointer here because it will speed up + * access to the real bitmap data on disk. + */ + uint32_t *block_data; + struct buf *bp; + int counter; + int counter_max; + int saw_free_bits = 0; + + /* Get the block */ + if ((err = ReadBitmapRange (hfsmp, byte_offset, hfsmp->vcbVBMIOSize, &block_data, &bp))) { + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: error (%d) in ReadBitmapRange!", err); + hfs_assert(0); + } + + /* Query the status of the bit and then make sure we match */ + uint32_t maybe_has_free_blocks; + err = hfs_check_summary (hfsmp, alloc_block, &maybe_has_free_blocks); + if (err) { + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: hfs_check_summary returned error (%d) ", err); + hfs_assert(0); + } + counter_max = hfsmp->vcbVBMIOSize / kBytesPerWord; + + for (counter = 0; counter < counter_max; counter++) { + uint32_t word = block_data[counter]; + + /* We assume that we'll not find any free bits here. */ + if (word != kAllBitsSetInWord) { + if (maybe_has_free_blocks) { + /* All done */ + saw_free_bits = 1; + break; + } else { + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: hfs_check_summary saw free bits!"); + hfs_assert(0); + } + } + } + + if (maybe_has_free_blocks && (saw_free_bits == 0)) { + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: did not see free bits !"); + hfs_assert(0); + } + + /* Release the block. */ + if ((err = ReleaseScanBitmapRange (bp))) { + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: Error (%d) in ReleaseScanBitmapRange", err); + hfs_assert(0); + } + } + + LFHFS_LOG(LEVEL_ERROR, "hfs_validate_summary: summary validation completed successfully on %s\n", hfsmp->vcbVN); + return; +} +#endif + +/* + * hfs_alloc_scan_range: + * + * This function should be used to scan large ranges of the allocation bitmap + * at one time. It makes two key assumptions: + * + * 1) Bitmap lock is held during the duration of the call (exclusive) + * 2) There are no pages in the buffer cache for any of the bitmap + * blocks that we may encounter. It *MUST* be completely empty. + * + * The expected use case is when we are scanning the bitmap in full while we are + * still mounting the filesystem in order to issue TRIMs or build up the summary + * table for the mount point. It should be done after any potential journal replays + * are completed and their I/Os fully issued. + * + * The key reason for assumption (2) above is that this function will try to issue + * I/O against the bitmap file in chunks as large a possible -- essentially as + * much as the buffer layer will handle (1MB). Because the size of these I/Os + * is larger than what would be expected during normal runtime we must invalidate + * the buffers as soon as we are done with them so that they do not persist in + * the buffer cache for other threads to find, as they'll typically be doing + * allocation-block size I/Os instead. + * + * Input Args: + * hfsmp - hfs mount data structure + * startbit - allocation block # to start our scan. It must be aligned + * on a vcbVBMIOsize boundary. + * list - journal trim list data structure for issuing TRIMs + * + * Output Args: + * bitToScan - Return the next bit to scan if this function is called again. + * Caller will supply this into the next invocation + * of this call as 'startbit'. + */ + +static int hfs_alloc_scan_range(struct hfsmount *hfsmp, u_int32_t startbit, + u_int32_t *bitToScan, struct jnl_trim_list *list) { + + int error; + int readwrite = 1; + u_int32_t curAllocBlock; + GenericLFBufPtr blockRef = NULL; + u_int32_t *buffer = NULL; + u_int32_t free_offset = 0; //tracks the start of the current free range + u_int32_t size = 0; // tracks the length of the current free range. + u_int32_t iosize = 0; //how much io we should generate against the bitmap + u_int32_t byte_off; // byte offset into the bitmap file. + u_int32_t completed_size; // how much io was actually completed + u_int32_t last_bitmap_block; + u_int32_t current_word; + u_int32_t word_index = 0; + + /* summary table building */ + uint32_t summary_bit = 0; + uint32_t saw_free_blocks = 0; + uint32_t last_marked = 0; + + if (hfsmp->hfs_flags & HFS_READ_ONLY) { + readwrite = 0; + } + + /* + * Compute how much I/O we should generate here. + * hfs_scan_range_size will validate that the start bit + * converted into a byte offset into the bitmap file, + * is aligned on a VBMIOSize boundary. + */ + error = hfs_scan_range_size (hfsmp, startbit, &iosize); + if (error) { + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_ERROR, "hfs_alloc_scan_range: hfs_scan_range_size error %d\n", error); + hfs_assert(0); + } + return error; + } + + if (iosize < hfsmp->vcbVBMIOSize) { + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_ERROR, "hfs_alloc_scan_range: iosize too small! (iosize %d)\n", iosize); + hfs_assert(0); + } + return EINVAL; + } + + /* hfs_scan_range_size should have verified startbit. Convert it to bytes */ + byte_off = startbit / kBitsPerByte; + + /* + * When the journal replays blocks, it does so by writing directly to the disk + * device (bypassing any filesystem vnodes and such). When it finishes its I/Os + * it also immediately re-reads and invalidates the range covered by the bp so + * it does not leave anything lingering in the cache (for iosize reasons). + * + * As such, it is safe to do large I/Os here with ReadBitmapRange. + * + * NOTE: It is not recommended, but it is possible to call the function below + * on sections of the bitmap that may be in core already as long as the pages are not + * dirty. In that case, we'd notice that something starting at that + * logical block of the bitmap exists in the metadata cache, and we'd check + * if the iosize requested is the same as what was already allocated for it. + * Odds are pretty good we're going to request something larger. In that case, + * we just free the existing memory associated with the buf and reallocate a + * larger range. This function should immediately invalidate it as soon as we're + * done scanning, so this shouldn't cause any coherency issues. + */ + + error = ReadBitmapRange(hfsmp, byte_off, iosize, &buffer, &blockRef); + if (error) { + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_ERROR, "hfs_alloc_scan_range: start %d iosize %d ReadBitmapRange error %d\n", startbit, iosize, error); + hfs_assert(0); + } + return error; + } + + /* + * At this point, we have a giant wired buffer that represents some portion of + * the bitmap file that we want to analyze. We may not have gotten all 'iosize' + * bytes though, so clip our ending bit to what we actually read in. + */ + completed_size = blockRef->uValidBytes; + last_bitmap_block = completed_size * kBitsPerByte; + last_bitmap_block = last_bitmap_block + startbit; + + /* Cap the last block to the total number of blocks if required */ + if (last_bitmap_block > hfsmp->totalBlocks) { + last_bitmap_block = hfsmp->totalBlocks; + } + + /* curAllocBlock represents the logical block we're analyzing. */ + curAllocBlock = startbit; + word_index = 0; + size = 0; + + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + if (hfs_get_summary_index (hfsmp, startbit, &summary_bit)) { + error = EINVAL; + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_ERROR, "hfs_alloc_scan_range: Could not acquire summary index for %u", startbit); + hfs_assert(0); + } + return error; + } + /* + * summary_bit should now be set to the summary bit corresponding to + * the allocation block of the first bit that we're supposed to scan + */ + } + saw_free_blocks = 0; + + while (curAllocBlock < last_bitmap_block) { + u_int32_t bit; + + /* Update the summary table as needed */ + if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) { + if (ALLOC_DEBUG) { + if (hfsmp->hfs_summary_table == NULL) { + LFHFS_LOG(LEVEL_ERROR, "hfs_alloc_scan_range: no summary table!"); + hfs_assert(0); + } + } + + uint32_t temp_summary; + error = hfs_get_summary_index (hfsmp, curAllocBlock, &temp_summary); + if (error) { + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_ERROR, "hfs_alloc_scan_range: could not get summary index for %u", curAllocBlock); + hfs_assert(0); + } + return EINVAL; + } + + if (ALLOC_DEBUG) { + if (temp_summary < summary_bit) { + LFHFS_LOG(LEVEL_ERROR, "hfs_alloc_scan_range: backwards summary bit?\n"); + hfs_assert(0); + } + } + + /* + * If temp_summary is greater than summary_bit, then this + * means that the next allocation block crosses a vcbVBMIOSize boundary + * and we should treat this range of on-disk data as part of a new summary + * bit. + */ + if (temp_summary > summary_bit) { + if (saw_free_blocks == 0) { + /* Mark the bit as totally consumed in the summary table */ + hfs_set_summary (hfsmp, summary_bit, 1); + } + else { + /* Mark the bit as potentially free in summary table */ + hfs_set_summary (hfsmp, summary_bit, 0); + } + last_marked = summary_bit; + /* + * Any time we set the summary table, update our counter which tracks + * what the last bit that was fully marked in the summary table. + * + * Then reset our marker which says we haven't seen a free bit yet. + */ + saw_free_blocks = 0; + summary_bit = temp_summary; + } + } /* End summary table conditions */ + + current_word = SWAP_BE32(buffer[word_index]); + /* Iterate through the word 1 bit at a time... */ + for (bit = 0 ; bit < kBitsPerWord ; bit++, curAllocBlock++) { + if (curAllocBlock >= last_bitmap_block) { + break; + } + u_int32_t allocated = (current_word & (kHighBitInWordMask >> bit)); + + if (allocated) { + if (size != 0) { + if (readwrite) { + /* Insert the previously tracked range of free blocks to the trim list */ + hfs_track_unmap_blocks (hfsmp, free_offset, size, list); + } + add_free_extent_cache (hfsmp, free_offset, size); + size = 0; + free_offset = 0; + } + } + else { + /* Not allocated */ + size++; + if (free_offset == 0) { + /* Start a new run of free spcae at curAllocBlock */ + free_offset = curAllocBlock; + } + if (saw_free_blocks == 0) { + saw_free_blocks = 1; + } + } + } /* end for loop iterating through the word */ + + if (curAllocBlock < last_bitmap_block) { + word_index++; + } + + } /* End while loop (iterates through last_bitmap_block) */ + + + /* + * We've (potentially) completed our pass through this region of bitmap, + * but one thing we may not have done is updated that last summary bit for + * the last page we scanned, because we would have never transitioned across + * a vcbVBMIOSize boundary again. Check for that and update the last bit + * as needed. + * + * Note that 'last_bitmap_block' is *not* inclusive WRT the very last bit in the bitmap + * for the region of bitmap on-disk that we were scanning. (it is one greater). + */ + if ((curAllocBlock >= last_bitmap_block) && + (hfsmp->hfs_flags & HFS_SUMMARY_TABLE)) { + uint32_t temp_summary; + /* temp_block should be INSIDE the region we just scanned, so subtract 1 */ + uint32_t temp_block = last_bitmap_block - 1; + error = hfs_get_summary_index (hfsmp, temp_block, &temp_summary); + if (error) { + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_ERROR, "hfs_alloc_scan_range: end bit curAllocBlock %u, last_bitmap_block %u", curAllocBlock, last_bitmap_block); + hfs_assert(0); + } + return EINVAL; + } + + /* Did we already update this in the table? */ + if (temp_summary > last_marked) { + if (saw_free_blocks == 0) { + hfs_set_summary (hfsmp, temp_summary, 1); + } + else { + hfs_set_summary (hfsmp, temp_summary, 0); + } + } + } + + /* + * We may have been tracking a range of free blocks that hasn't been inserted yet. + * Keep the logic for the TRIM and free extent separate from that of the summary + * table management even though they are closely linked. + */ + if (size != 0) { + if (readwrite) { + hfs_track_unmap_blocks (hfsmp, free_offset, size, list); + } + add_free_extent_cache (hfsmp, free_offset, size); + } + + /* + * curAllocBlock represents the next block we need to scan when we return + * to this function. + */ + *bitToScan = curAllocBlock; + ReleaseScanBitmapRange(blockRef); + + return 0; + +} + + + +/* + * Compute the maximum I/O size to generate against the bitmap file + * Will attempt to generate at LEAST VBMIOsize I/Os for interior ranges of the bitmap. + * + * Inputs: + * hfsmp -- hfsmount to look at + * bitmap_off -- bit offset into the bitmap file + * + * Outputs: + * iosize -- iosize to generate. + * + * Returns: + * 0 on success; EINVAL otherwise + */ +static int hfs_scan_range_size (struct hfsmount *hfsmp, uint32_t bitmap_st, uint32_t *iosize) { + + /* + * The maximum bitmap size is 512MB regardless of ABN size, so we can get away + * with 32 bit math in this function. + */ + + uint32_t bitmap_len; + uint32_t remaining_bitmap; + uint32_t target_iosize; + uint32_t bitmap_off; + + /* Is this bit index not word aligned? If so, immediately fail. */ + if (bitmap_st % kBitsPerWord) { + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_ERROR, "hfs_scan_range_size: unaligned start bit! bitmap_st %d \n", bitmap_st); + hfs_assert(0); + } + return EINVAL; + } + + /* bitmap_off is in bytes, not allocation blocks/bits */ + bitmap_off = bitmap_st / kBitsPerByte; + + if ((hfsmp->totalBlocks <= bitmap_st) || (bitmap_off > (512 * 1024 * 1024))) { + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_ERROR, "hfs_scan_range_size: invalid start! bitmap_st %d, bitmap_off %d\n", bitmap_st, bitmap_off); + hfs_assert(0); + } + return EINVAL; + } + + /* + * Also invalid if it's not at least aligned to HFS bitmap logical + * block boundaries. We don't have to emit an iosize that's an + * exact multiple of the VBMIOSize, but it must start on such + * a boundary. + * + * The vcbVBMIOSize may be SMALLER than the allocation block size + * on a FS with giant allocation blocks, but it will never be + * greater than it, so it should be safe to start I/O + * aligned on a VBMIOsize boundary. + */ + if (bitmap_off & (hfsmp->vcbVBMIOSize - 1)) { + if (ALLOC_DEBUG) { + LFHFS_LOG(LEVEL_ERROR, "hfs_scan_range_size: unaligned start! bitmap_off %d\n", bitmap_off); + hfs_assert(0); + } + return EINVAL; + } + + /* + * Generate the total bitmap file length in bytes, then round up + * that value to the end of the last allocation block, if needed (It + * will probably be needed). We won't scan past the last actual + * allocation block. + * + * Unless we're completing the bitmap scan (or bitmap < 1MB), we + * have to complete the I/O on VBMIOSize boundaries, but we can only read + * up until the end of the bitmap file. + */ + bitmap_len = roundup(hfsmp->totalBlocks, hfsmp->blockSize * 8) / 8; + + remaining_bitmap = bitmap_len - bitmap_off; + + /* + * io size is the MIN of the maximum I/O we can generate or the + * remaining amount of bitmap. + */ + target_iosize = MIN((MAXBSIZE), remaining_bitmap); + *iosize = target_iosize; + + return 0; +} + +/* + * Remove an extent from the list of free extents. + * + * This is a low-level routine. It does not handle overlaps or splitting; + * that is the responsibility of the caller. The input extent must exactly + * match an extent already in the list; it will be removed, and any following + * extents in the list will be shifted up. + * + * Inputs: + * startBlock - Start of extent to remove + * blockCount - Number of blocks in extent to remove + * + * Result: + * The index of the extent that was removed. + */ +static void remove_free_extent_list(struct hfsmount *hfsmp, int index) +{ + if (index < 0 || (uint32_t)index >= hfsmp->vcbFreeExtCnt) { + if (ALLOC_DEBUG) + { + LFHFS_LOG(LEVEL_ERROR, "remove_free_extent_list: %p: index (%d) out of range (0, %u)", hfsmp, index, hfsmp->vcbFreeExtCnt); + hfs_assert(0); + } + else + LFHFS_LOG(LEVEL_ERROR, "remove_free_extent_list: %p: index (%d) out of range (0, %u)", hfsmp, index, hfsmp->vcbFreeExtCnt); + return; + } + int shift_count = hfsmp->vcbFreeExtCnt - index - 1; + if (shift_count > 0) { + memmove(&hfsmp->vcbFreeExt[index], &hfsmp->vcbFreeExt[index+1], shift_count * sizeof(hfsmp->vcbFreeExt[0])); + } + hfsmp->vcbFreeExtCnt--; +} + + +/* + * Add an extent to the list of free extents. + * + * This is a low-level routine. It does not handle overlaps or coalescing; + * that is the responsibility of the caller. This routine *does* make + * sure that the extent it is adding is inserted in the correct location. + * If the list is full, this routine will handle either removing the last + * extent in the list to make room for the new extent, or ignoring the + * new extent if it is "worse" than the last extent in the list. + * + * Inputs: + * startBlock - Start of extent to add + * blockCount - Number of blocks in extent to add + * + * Result: + * The index where the extent that was inserted, or kMaxFreeExtents + * if the extent was not inserted (the list was full, and the extent + * being added was "worse" than everything in the list). + */ +static int add_free_extent_list(struct hfsmount *hfsmp, u_int32_t startBlock, u_int32_t blockCount) +{ + uint32_t i; + + /* ALLOC_DEBUG: Make sure no extents in the list overlap or are contiguous with the input extent. */ + if (ALLOC_DEBUG) { + uint32_t endBlock = startBlock + blockCount; + for (i = 0; i < hfsmp->vcbFreeExtCnt; ++i) { + if (endBlock < hfsmp->vcbFreeExt[i].startBlock || + startBlock > (hfsmp->vcbFreeExt[i].startBlock + hfsmp->vcbFreeExt[i].blockCount)) { + continue; + } + LFHFS_LOG(LEVEL_ERROR, "add_free_extent_list: extent(%u %u) overlaps existing extent (%u %u) at index %d", + startBlock, blockCount, hfsmp->vcbFreeExt[i].startBlock, hfsmp->vcbFreeExt[i].blockCount, i); + hfs_assert(0); + } + } + + /* Figure out what index the new extent should be inserted at. */ + for (i = 0; i < hfsmp->vcbFreeExtCnt; ++i) { + /* The list is sorted by decreasing size. */ + if (blockCount > hfsmp->vcbFreeExt[i].blockCount) { + break; + } + } + + /* When we get here, i is the index where the extent should be inserted. */ + if (i == kMaxFreeExtents) { + /* + * The new extent is worse than anything already in the list, + * and the list is full, so just ignore the extent to be added. + */ + return i; + } + + /* + * Grow the list (if possible) to make room for an insert. + */ + if (hfsmp->vcbFreeExtCnt < kMaxFreeExtents) + hfsmp->vcbFreeExtCnt++; + + /* + * If we'll be keeping any extents after the insert position, then shift them. + */ + int shift_count = hfsmp->vcbFreeExtCnt - i - 1; + if (shift_count > 0) { + memmove(&hfsmp->vcbFreeExt[i+1], &hfsmp->vcbFreeExt[i], shift_count * sizeof(hfsmp->vcbFreeExt[0])); + } + + /* Finally, store the new extent at its correct position. */ + hfsmp->vcbFreeExt[i].startBlock = startBlock; + hfsmp->vcbFreeExt[i].blockCount = blockCount; + return i; +} + + +/* + * Remove an entry from free extent cache after it has been allocated. + * + * This is a high-level routine. It handles removing a portion of a + * cached extent, potentially splitting it into two (if the cache was + * already full, throwing away the extent that would sort last). It + * also handles removing an extent that overlaps multiple extents in + * the cache. + * + * Inputs: + * hfsmp - mount point structure + * startBlock - starting block of the extent to be removed. + * blockCount - number of blocks of the extent to be removed. + */ +static void remove_free_extent_cache(struct hfsmount *hfsmp, u_int32_t startBlock, u_int32_t blockCount) +{ + u_int32_t i, insertedIndex; + u_int32_t currentStart, currentEnd, endBlock; + int extentsRemoved = 0; + + endBlock = startBlock + blockCount; + + lf_lck_spin_lock(&hfsmp->vcbFreeExtLock); + + /* + * Iterate over all of the extents in the free extent cache, removing or + * updating any entries that overlap with the input extent. + */ + for (i = 0; i < hfsmp->vcbFreeExtCnt; ++i) { + currentStart = hfsmp->vcbFreeExt[i].startBlock; + currentEnd = currentStart + hfsmp->vcbFreeExt[i].blockCount; + + /* + * If the current extent is entirely before or entirely after the + * the extent to be removed, then we keep it as-is. + */ + if (currentEnd <= startBlock || currentStart >= endBlock) { + continue; + } + + /* + * If the extent being removed entirely contains the current extent, + * then remove the current extent. + */ + if (startBlock <= currentStart && endBlock >= currentEnd) { + remove_free_extent_list(hfsmp, i); + + /* + * We just removed the extent at index i. The extent at + * index i+1 just got shifted to index i. So decrement i + * to undo the loop's "++i", and the next iteration will + * examine index i again, which contains the next extent + * in the list. + */ + --i; + ++extentsRemoved; + continue; + } + + /* + * If the extent being removed is strictly "in the middle" of the + * current extent, then we need to split the current extent into + * two discontiguous extents (the "head" and "tail"). The good + * news is that we don't need to examine any other extents in + * the list. + */ + if (startBlock > currentStart && endBlock < currentEnd) { + remove_free_extent_list(hfsmp, i); + add_free_extent_list(hfsmp, currentStart, startBlock - currentStart); + add_free_extent_list(hfsmp, endBlock, currentEnd - endBlock); + break; + } + + /* + * The only remaining possibility is that the extent to be removed + * overlaps the start or end (but not both!) of the current extent. + * So we need to replace the current extent with a shorter one. + * + * The only tricky part is that the updated extent might be at a + * different index than the original extent. If the updated extent + * was inserted after the current extent, then we need to re-examine + * the entry at index i, since it now contains the extent that was + * previously at index i+1. If the updated extent was inserted + * before or at the same index as the removed extent, then the + * following extents haven't changed position. + */ + remove_free_extent_list(hfsmp, i); + if (startBlock > currentStart) { + /* Remove the tail of the current extent. */ + insertedIndex = add_free_extent_list(hfsmp, currentStart, startBlock - currentStart); + } else { + /* Remove the head of the current extent. */ + insertedIndex = add_free_extent_list(hfsmp, endBlock, currentEnd - endBlock); + } + if (insertedIndex > i) { + --i; /* Undo the "++i" in the loop, so we examine the entry at index i again. */ + } + } + + lf_lck_spin_unlock(&hfsmp->vcbFreeExtLock); + sanity_check_free_ext(hfsmp, 0); + + return; +} + + +/* + * Add an entry to free extent cache after it has been deallocated. + * + * This is a high-level routine. It will merge overlapping or contiguous + * extents into a single, larger extent. + * + * If the extent provided has blocks beyond current allocLimit, it is + * clipped to allocLimit (so that we won't accidentally find and allocate + * space beyond allocLimit). + * + * Inputs: + * hfsmp - mount point structure + * startBlock - starting block of the extent to be removed. + * blockCount - number of blocks of the extent to be removed. + * + * Returns: + * true - if the extent was added successfully to the list + * false - if the extent was not added to the list, maybe because + * the extent was beyond allocLimit, or is not best + * candidate to be put in the cache. + */ +static Boolean add_free_extent_cache(struct hfsmount *hfsmp, u_int32_t startBlock, u_int32_t blockCount) +{ + Boolean retval = false; + uint32_t endBlock; + uint32_t currentEnd; + uint32_t i; + +#if DEBUG + for (i = 0; i < 2; ++i) { + struct rl_entry *range; + TAILQ_FOREACH(range, &hfsmp->hfs_reserved_ranges[i], rl_link) { + hfs_assert(rl_overlap(range, startBlock, + startBlock + blockCount - 1) == RL_NOOVERLAP); + } + } +#endif + + /* No need to add extent that is beyond current allocLimit */ + if (startBlock >= hfsmp->allocLimit) { + goto out_not_locked; + } + + /* If end of the free extent is beyond current allocLimit, clip the extent */ + if ((startBlock + blockCount) > hfsmp->allocLimit) { + blockCount = hfsmp->allocLimit - startBlock; + } + + lf_lck_spin_lock(&hfsmp->vcbFreeExtLock); + + /* + * Make a pass through the free extent cache, looking for known extents that + * overlap or are contiguous with the extent to be added. We'll remove those + * extents from the cache, and incorporate them into the new extent to be added. + */ + endBlock = startBlock + blockCount; + for (i=0; i < hfsmp->vcbFreeExtCnt; ++i) { + currentEnd = hfsmp->vcbFreeExt[i].startBlock + hfsmp->vcbFreeExt[i].blockCount; + if (hfsmp->vcbFreeExt[i].startBlock > endBlock || currentEnd < startBlock) { + /* Extent i does not overlap and is not contiguous, so keep it. */ + continue; + } else { + /* We need to remove extent i and combine it with the input extent. */ + if (hfsmp->vcbFreeExt[i].startBlock < startBlock) + startBlock = hfsmp->vcbFreeExt[i].startBlock; + if (currentEnd > endBlock) + endBlock = currentEnd; + + remove_free_extent_list(hfsmp, i); + /* + * We just removed the extent at index i. The extent at + * index i+1 just got shifted to index i. So decrement i + * to undo the loop's "++i", and the next iteration will + * examine index i again, which contains the next extent + * in the list. + */ + --i; + } + } + add_free_extent_list(hfsmp, startBlock, endBlock - startBlock); + + lf_lck_spin_unlock(&hfsmp->vcbFreeExtLock); + +out_not_locked: + sanity_check_free_ext(hfsmp, 0); + + return retval; +} + +/* Debug function to check if the free extent cache is good or not */ +static void sanity_check_free_ext(struct hfsmount *hfsmp, int check_allocated) +{ + u_int32_t i, j; + + /* Do not do anything if debug is not on */ + if (ALLOC_DEBUG == 0) { + return; + } + + lf_lck_spin_lock(&hfsmp->vcbFreeExtLock); + + if (hfsmp->vcbFreeExtCnt > kMaxFreeExtents) + { + LFHFS_LOG(LEVEL_ERROR, "sanity_check_free_ext: free extent count (%u) is too large", hfsmp->vcbFreeExtCnt); + } + + /* + * Iterate the Free extent cache and ensure no entries are bogus or refer to + * allocated blocks. + */ + for(i=0; i < hfsmp->vcbFreeExtCnt; i++) { + u_int32_t start, nblocks; + + start = hfsmp->vcbFreeExt[i].startBlock; + nblocks = hfsmp->vcbFreeExt[i].blockCount; + + /* Check if any of the blocks in free extent cache are allocated. + * This should not be enabled always because it might take + * very long for large extents that get added to the list. + * + * We have to drop vcbFreeExtLock while we call hfs_isallocated + * because it is going to do I/O. Note that the free extent + * cache could change. That's a risk we take when using this + * debugging code. (Another alternative would be to try to + * detect when the free extent cache changed, and perhaps + * restart if the list changed while we dropped the lock.) + */ + if (check_allocated) { + lf_lck_spin_unlock(&hfsmp->vcbFreeExtLock); + if (hfs_isallocated(hfsmp, start, nblocks)) { + LFHFS_LOG(LEVEL_ERROR, "sanity_check_free_ext: slot %d:(%u,%u) in the free extent array is allocated\n", + i, start, nblocks); + hfs_assert(0); + } + lf_lck_spin_lock(&hfsmp->vcbFreeExtLock); + } + + /* Check if any part of the extent is beyond allocLimit */ + if ((start > hfsmp->allocLimit) || ((start + nblocks) > hfsmp->allocLimit)) { + LFHFS_LOG(LEVEL_ERROR, "sanity_check_free_ext: slot %d:(%u,%u) in the free extent array is beyond allocLimit=%u\n", + i, start, nblocks, hfsmp->allocLimit); + hfs_assert(0); + } + + /* Check if there are any duplicate start blocks */ + for(j=i+1; j < hfsmp->vcbFreeExtCnt; j++) { + if (start == hfsmp->vcbFreeExt[j].startBlock) { + LFHFS_LOG(LEVEL_ERROR, "sanity_check_free_ext: slot %d:(%u,%u) and %d:(%u,%u) are duplicate\n", + i, start, nblocks, j, hfsmp->vcbFreeExt[j].startBlock, + hfsmp->vcbFreeExt[j].blockCount); + hfs_assert(0); + } + } + + /* Check if the entries are out of order */ + if ((i+1) != hfsmp->vcbFreeExtCnt) { + /* normally sorted by block count (descending) */ + if (hfsmp->vcbFreeExt[i].blockCount < hfsmp->vcbFreeExt[i+1].blockCount) { + LFHFS_LOG(LEVEL_ERROR, "sanity_check_free_ext: %d:(%u,%u) and %d:(%u,%u) are out of order\n", + i, start, nblocks, i+1, hfsmp->vcbFreeExt[i+1].startBlock, + hfsmp->vcbFreeExt[i+1].blockCount); + hfs_assert(0); + } + } + } + lf_lck_spin_unlock(&hfsmp->vcbFreeExtLock); +} diff --git a/livefiles_hfs_plugin/lf_hfs_volume_allocation.h b/livefiles_hfs_plugin/lf_hfs_volume_allocation.h new file mode 100644 index 0000000..946d0eb --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_volume_allocation.h @@ -0,0 +1,18 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_volume_allocation.h + * livefiles_hfs + * + * Created by Or Haimovich on 22/3/18. + */ + +#ifndef lf_hfs_volume_allocation_h +#define lf_hfs_volume_allocation_h + +#include "lf_hfs.h" + +int hfs_init_summary (struct hfsmount *hfsmp); +u_int32_t ScanUnmapBlocks (struct hfsmount *hfsmp); +int hfs_isallocated(struct hfsmount *hfsmp, u_int32_t startingBlock, u_int32_t numBlocks); + +#endif /* lf_hfs_volume_allocation_h */ diff --git a/livefiles_hfs_plugin/lf_hfs_xattr.c b/livefiles_hfs_plugin/lf_hfs_xattr.c new file mode 100644 index 0000000..e15edaa --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_xattr.c @@ -0,0 +1,1854 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_xattr.c + * livefiles_hfs + * + * Created by Or Haimovich on 28/3/18. + */ + +#include +#include +#include +#include "lf_hfs_xattr.h" +#include "lf_hfs.h" +#include "lf_hfs_vnops.h" +#include "lf_hfs_raw_read_write.h" +#include "lf_hfs_btrees_io.h" +#include "lf_hfs_btrees_internal.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_sbunicode.h" +#include "lf_hfs_endian.h" +#include "lf_hfs_logger.h" +#include "lf_hfs_utils.h" + +#define ATTRIBUTE_FILE_NODE_SIZE 8192 + +//#define HFS_XATTR_VERBOSE 1 + +/* State information for the listattr_callback callback function. */ +struct listattr_callback_state { + u_int32_t fileID; + int result; + void *buf; + size_t bufsize; + size_t size; +}; + +static u_int32_t emptyfinfo[8] = {0}; + + +static int hfs_zero_hidden_fields (struct cnode *cp, u_int8_t *finderinfo); + +const char hfs_attrdatafilename[] = "Attribute Data"; + +static int listattr_callback(const HFSPlusAttrKey *key, const HFSPlusAttrData *data, + struct listattr_callback_state *state); + +static int remove_attribute_records(struct hfsmount *hfsmp, BTreeIterator * iterator); + +static int getnodecount(struct hfsmount *hfsmp, size_t nodesize); + +static size_t getmaxinlineattrsize(struct vnode * attrvp); + +static int read_attr_data(struct hfsmount *hfsmp, void * buf, size_t datasize, HFSPlusExtentDescriptor *extents); + +static int write_attr_data(struct hfsmount *hfsmp, void * buf, size_t datasize, HFSPlusExtentDescriptor *extents); + +static int alloc_attr_blks(struct hfsmount *hfsmp, size_t attrsize, size_t extentbufsize, HFSPlusExtentDescriptor *extents, int *blocks); + +static void free_attr_blks(struct hfsmount *hfsmp, int blkcnt, HFSPlusExtentDescriptor *extents); + +static int has_overflow_extents(HFSPlusForkData *forkdata); + +static int count_extent_blocks(int maxblks, HFSPlusExtentRecord extents); + + +/* Zero out the date added field for the specified cnode */ +static int hfs_zero_hidden_fields (struct cnode *cp, u_int8_t *finderinfo) +{ + u_int8_t *finfo = finderinfo; + + /* Advance finfo by 16 bytes to the 2nd half of the finderinfo */ + finfo = finfo + 16; + + if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) { + struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo; + extinfo->document_id = 0; + extinfo->date_added = 0; + extinfo->write_gen_counter = 0; + } else if (S_ISDIR(cp->c_attr.ca_mode)) { + struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)finfo; + extinfo->document_id = 0; + extinfo->date_added = 0; + extinfo->write_gen_counter = 0; + } else { + /* Return an error */ + return -1; + } + return 0; +} + +/* + * Retrieve the data of an extended attribute. + */ +int +hfs_vnop_getxattr(vnode_t vp, const char *attr_name, void *buf, size_t bufsize, size_t *actual_size) +{ + struct cnode *cp; + struct hfsmount *hfsmp; + int result; + + if (attr_name == NULL || attr_name[0] == '\0') { + return (EINVAL); /* invalid name */ + } + if (strlen(attr_name) > XATTR_MAXNAMELEN) { + return (ENAMETOOLONG); + } + if (actual_size == NULL) { + return (EINVAL); + } + if (VNODE_IS_RSRC(vp)) { + return (EPERM); + } + + cp = VTOC(vp); + + /* Get the Finder Info. */ + if (strcmp(attr_name, XATTR_FINDERINFO_NAME) == 0) { + u_int8_t finderinfo[32]; + size_t attrsize = 32; + + if ((result = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) { + return (result); + } + /* Make a copy since we may not export all of it. */ + bcopy(cp->c_finderinfo, finderinfo, sizeof(finderinfo)); + hfs_unlock(cp); + + /* Zero out the date added field in the local copy */ + hfs_zero_hidden_fields (cp, finderinfo); + + /* Don't expose a symlink's private type/creator. */ + if (vnode_islnk(vp)) { + struct FndrFileInfo *fip; + + fip = (struct FndrFileInfo *)&finderinfo; + fip->fdType = 0; + fip->fdCreator = 0; + } + /* If Finder Info is empty then it doesn't exist. */ + if (bcmp(finderinfo, emptyfinfo, sizeof(emptyfinfo)) == 0) { + return (ENOATTR); + } + *actual_size = attrsize; + + if (buf == NULL) { + return (0); + } + if (bufsize < attrsize) + return (ERANGE); + + memcpy(buf, (caddr_t)&finderinfo, attrsize); + return (0); + } + + /* Read the Resource Fork. */ + if (strcmp(attr_name, XATTR_RESOURCEFORK_NAME) == 0) { + return (ENOATTR); + } + + hfsmp = VTOHFS(vp); + if ((result = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) { + return (result); + } + + /* Check for non-rsrc, non-finderinfo EAs - getxattr_internal */ + + struct filefork *btfile; + BTreeIterator * iterator = NULL; + size_t attrsize = 0; + HFSPlusAttrRecord *recp = NULL; + size_t recp_size = 0; + FSBufferDescriptor btdata; + int lockflags = 0; + u_int16_t datasize = 0; + u_int32_t target_id = 0; + + if (cp) { + target_id = cp->c_fileid; + } else { + target_id = kHFSRootParentID; + } + + /* Bail if we don't have an EA B-Tree. */ + if ((hfsmp->hfs_attribute_vp == NULL) || + ((cp) && (cp->c_attr.ca_recflags & kHFSHasAttributesMask) == 0)) { + result = ENOATTR; + goto exit; + } + + /* Initialize the B-Tree iterator for searching for the proper EA */ + btfile = VTOF(hfsmp->hfs_attribute_vp); + + iterator = hfs_mallocz(sizeof(*iterator)); + + /* Allocate memory for reading in the attribute record. This buffer is + * big enough to read in all types of attribute records. It is not big + * enough to read inline attribute data which is read in later. + */ + recp = hfs_malloc(recp_size = sizeof(HFSPlusAttrRecord)); + btdata.bufferAddress = recp; + btdata.itemSize = sizeof(HFSPlusAttrRecord); + btdata.itemCount = 1; + + result = hfs_buildattrkey(target_id, attr_name, (HFSPlusAttrKey *)&iterator->key); + if (result) { + goto exit; + } + + /* Lookup the attribute in the Attribute B-Tree */ + lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE, HFS_SHARED_LOCK); + result = BTSearchRecord(btfile, iterator, &btdata, &datasize, NULL); + hfs_systemfile_unlock(hfsmp, lockflags); + + if (result) { + if (result == btNotFound) { + result = ENOATTR; + } + goto exit; + } + + /* + * Operate differently if we have inline EAs that can fit in the attribute B-Tree or if + * we have extent based EAs. + */ + switch (recp->recordType) { + + /* Attribute fits in the Attribute B-Tree */ + case kHFSPlusAttrInlineData: { + /* + * Sanity check record size. It's not required to have any + * user data, so the minimum size is 2 bytes less that the + * size of HFSPlusAttrData (since HFSPlusAttrData struct + * has 2 bytes set aside for attribute data). + */ + if (datasize < (sizeof(HFSPlusAttrData) - 2)) { + LFHFS_LOG(LEVEL_DEBUG, "hfs_getxattr: vol=%s %d,%s invalid record size %d (expecting %lu)\n", + hfsmp->vcbVN, target_id, attr_name, datasize, sizeof(HFSPlusAttrData)); + result = ENOATTR; + break; + } + *actual_size = recp->attrData.attrSize; + if (buf && recp->attrData.attrSize != 0) { + if (*actual_size > bufsize) { + /* User provided buffer is not large enough for the xattr data */ + result = ERANGE; + } else { + /* Previous BTreeSearchRecord() read in only the attribute record, + * and not the attribute data. Now allocate enough memory for + * both attribute record and data, and read the attribute record again. + */ + attrsize = sizeof(HFSPlusAttrData) - 2 + recp->attrData.attrSize; + hfs_free(recp); + recp = hfs_malloc(recp_size = attrsize); + + btdata.bufferAddress = recp; + btdata.itemSize = attrsize; + btdata.itemCount = 1; + + bzero(iterator, sizeof(*iterator)); + result = hfs_buildattrkey(target_id, attr_name, (HFSPlusAttrKey *)&iterator->key); + if (result) { + goto exit; + } + + /* Lookup the attribute record and inline data */ + lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE, HFS_SHARED_LOCK); + result = BTSearchRecord(btfile, iterator, &btdata, &datasize, NULL); + hfs_systemfile_unlock(hfsmp, lockflags); + if (result) { + if (result == btNotFound) { + result = ENOATTR; + } + goto exit; + } + + /* Copy-out the attribute data to the user buffer */ + *actual_size = recp->attrData.attrSize; + memcpy(buf, (caddr_t) &recp->attrData.attrData, recp->attrData.attrSize); + } + } + break; + } + + /* Extent-Based EAs */ + case kHFSPlusAttrForkData: { + if (datasize < sizeof(HFSPlusAttrForkData)) { + LFHFS_LOG(LEVEL_DEBUG, "hfs_getxattr: vol=%s %d,%s invalid record size %d (expecting %lu)\n", + hfsmp->vcbVN, target_id, attr_name, datasize, sizeof(HFSPlusAttrForkData)); + result = ENOATTR; + break; + } + *actual_size = recp->forkData.theFork.logicalSize; + if (buf == NULL) { + break; + } + if (*actual_size > bufsize) { + result = ERANGE; + break; + } + /* Process overflow extents if necessary. */ + if (has_overflow_extents(&recp->forkData.theFork)) { + HFSPlusExtentDescriptor *extentbuf; + HFSPlusExtentDescriptor *extentptr; + size_t extentbufsize; + u_int32_t totalblocks; + u_int32_t blkcnt; + u_int64_t attrlen; + + totalblocks = recp->forkData.theFork.totalBlocks; + /* Ignore bogus block counts. */ + if (totalblocks > howmany(HFS_XATTR_MAXSIZE, hfsmp->blockSize)) { + result = ERANGE; + break; + } + attrlen = recp->forkData.theFork.logicalSize; + + /* Get a buffer to hold the worst case amount of extents. */ + extentbufsize = totalblocks * sizeof(HFSPlusExtentDescriptor); + extentbufsize = roundup(extentbufsize, sizeof(HFSPlusExtentRecord)); + extentbuf = hfs_mallocz(extentbufsize); + extentptr = extentbuf; + + /* Grab the first 8 extents. */ + bcopy(&recp->forkData.theFork.extents[0], extentptr, sizeof(HFSPlusExtentRecord)); + extentptr += kHFSPlusExtentDensity; + blkcnt = count_extent_blocks(totalblocks, recp->forkData.theFork.extents); + + /* Now lookup the overflow extents. */ + lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE, HFS_SHARED_LOCK); + while (blkcnt < totalblocks) { + ((HFSPlusAttrKey *)&iterator->key)->startBlock = blkcnt; + result = BTSearchRecord(btfile, iterator, &btdata, &datasize, NULL); + if (result || + (recp->recordType != kHFSPlusAttrExtents) || + (datasize < sizeof(HFSPlusAttrExtents))) { + LFHFS_LOG(LEVEL_DEBUG, "hfs_getxattr: %s missing extents, only %d blks of %d found\n", + attr_name, blkcnt, totalblocks); + break; /* break from while */ + } + /* Grab the next 8 extents. */ + bcopy(&recp->overflowExtents.extents[0], extentptr, sizeof(HFSPlusExtentRecord)); + extentptr += kHFSPlusExtentDensity; + blkcnt += count_extent_blocks(totalblocks, recp->overflowExtents.extents); + } + + /* Release Attr B-Tree lock */ + hfs_systemfile_unlock(hfsmp, lockflags); + + if (blkcnt < totalblocks) { + result = ENOATTR; + } else { + result = read_attr_data(hfsmp, buf, attrlen, extentbuf); + } + hfs_free(extentbuf); + + } else { /* No overflow extents. */ + result = read_attr_data(hfsmp, buf, recp->forkData.theFork.logicalSize, recp->forkData.theFork.extents); + } + break; + } + + default: + /* We only support inline EAs. Default to ENOATTR for anything else */ + result = ENOATTR; + break; + } + +exit: + hfs_free(iterator); + hfs_free(recp); + hfs_unlock(cp); + + return MacToVFSError(result); +} + +/* + * Set the data of an extended attribute. + */ +int +hfs_vnop_setxattr(vnode_t vp, const char *attr_name, const void *buf, size_t bufsize, UVFSXattrHow option) +{ + struct cnode *cp = NULL; + struct hfsmount *hfsmp; + size_t attrsize; + int result; + + if (attr_name == NULL || attr_name[0] == '\0') { + return (EINVAL); /* invalid name */ + } + if (strlen(attr_name) > XATTR_MAXNAMELEN) { + return (ENAMETOOLONG); + } + if (buf == NULL) { + return (EINVAL); + } + if (VNODE_IS_RSRC(vp)) { + return (EPERM); + } + + hfsmp = VTOHFS(vp); + + /* Set the Finder Info. */ + if (strcmp(attr_name, XATTR_FINDERINFO_NAME) == 0) { + union { + uint8_t data[32]; + char cdata[32]; + struct FndrFileInfo info; + } fi; + void * finderinfo_start; + u_int8_t *finfo = NULL; + u_int16_t fdFlags; + u_int32_t dateadded = 0; + u_int32_t write_gen_counter = 0; + u_int32_t document_id = 0; + + attrsize = sizeof(VTOC(vp)->c_finderinfo); + + if (bufsize != attrsize) { + return (ERANGE); + } + /* Grab the new Finder Info data. */ + memcpy(fi.cdata, buf, attrsize); + + if ((result = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) { + return (result); + } + cp = VTOC(vp); + + /* Symlink's don't have an external type/creator. */ + if (vnode_islnk(vp)) { + /* Skip over type/creator fields. */ + finderinfo_start = &cp->c_finderinfo[8]; + attrsize -= 8; + } else { + finderinfo_start = &cp->c_finderinfo[0]; + /* + * Don't allow the external setting of + * file type to kHardLinkFileType. + */ + if (fi.info.fdType == SWAP_BE32(kHardLinkFileType)) { + hfs_unlock(cp); + return (EPERM); + } + } + + /* Grab the current date added from the cnode */ + dateadded = hfs_get_dateadded (cp); + if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) { + struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)((u_int8_t*)cp->c_finderinfo + 16); + /* + * Grab generation counter directly from the cnode + * instead of calling hfs_get_gencount(), because + * for zero generation count values hfs_get_gencount() + * lies and bumps it up to one. + */ + write_gen_counter = extinfo->write_gen_counter; + document_id = extinfo->document_id; + } else if (S_ISDIR(cp->c_attr.ca_mode)) { + struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)cp->c_finderinfo + 16); + write_gen_counter = extinfo->write_gen_counter; + document_id = extinfo->document_id; + } + + /* + * Zero out the finder info's reserved fields like date added, + * generation counter, and document id to ignore user's attempts + * to set it + */ + hfs_zero_hidden_fields(cp, fi.data); + + if (bcmp(finderinfo_start, emptyfinfo, attrsize)) { + /* attr exists and "create" was specified. */ + if (option == UVFSXattrHowCreate) { + hfs_unlock(cp); + return (EEXIST); + } + } else { /* empty */ + /* attr doesn't exists and "replace" was specified. */ + if (option == UVFSXattrHowReplace) { + hfs_unlock(cp); + return (ENOATTR); + } + } + + /* + * Now restore the date added and other reserved fields to the finderinfo to + * be written out. Advance to the 2nd half of the finderinfo to write them + * out into the buffer. + * + * Make sure to endian swap the date added back into big endian. When we used + * hfs_get_dateadded above to retrieve it, it swapped into local endianness + * for us. But now that we're writing it out, put it back into big endian. + */ + finfo = &fi.data[16]; + if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) { + struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo; + extinfo->date_added = OSSwapHostToBigInt32(dateadded); + extinfo->write_gen_counter = write_gen_counter; + extinfo->document_id = document_id; + } else if (S_ISDIR(cp->c_attr.ca_mode)) { + struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)finfo; + extinfo->date_added = OSSwapHostToBigInt32(dateadded); + extinfo->write_gen_counter = write_gen_counter; + extinfo->document_id = document_id; + } + + /* Set the cnode's Finder Info. */ + if (attrsize == sizeof(cp->c_finderinfo)) { + bcopy(&fi.data[0], finderinfo_start, attrsize); + } else { + bcopy(&fi.data[8], finderinfo_start, attrsize); + } + + /* Updating finderInfo updates change time and modified time */ + cp->c_touch_chgtime = TRUE; + cp->c_flag |= C_MODIFIED; + + /* + * Mirror the invisible bit to the UF_HIDDEN flag. + * + * The fdFlags for files and frFlags for folders are both 8 bytes + * into the userInfo (the first 16 bytes of the Finder Info). They + * are both 16-bit fields. + */ + fdFlags = *((u_int16_t *) &cp->c_finderinfo[8]); + if (fdFlags & OSSwapHostToBigConstInt16(kFinderInvisibleMask)) { + cp->c_bsdflags |= UF_HIDDEN; + } else { + cp->c_bsdflags &= ~UF_HIDDEN; + } + + result = hfs_update(vp, 0); + + hfs_unlock(cp); + return (result); + } + + /* Write the Resource Fork. */ + if (strcmp(attr_name, XATTR_RESOURCEFORK_NAME) == 0) { + return (ENOTSUP); + } + + attrsize = bufsize; + + result = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); + if (result) { + goto exit; + } + cp = VTOC(vp); + + /* + * If we're trying to set a non-finderinfo, non-resourcefork EA, then + * call the breakout function - hfs_setxattr_internal. + */ + int started_transaction = 0; + BTreeIterator * iterator = NULL; + struct filefork *btfile = NULL; + FSBufferDescriptor btdata; + HFSPlusAttrRecord attrdata; /* 90 bytes */ + HFSPlusAttrRecord *recp = NULL; + size_t recp_size = 0; + HFSPlusExtentDescriptor *extentptr = NULL; + size_t extentbufsize = 0; + int lockflags = 0; + int exists = 0; + int allocatedblks = 0; + u_int32_t target_id; + + if (cp) { + target_id = cp->c_fileid; + } else { + target_id = kHFSRootParentID; + } + + /* Start a transaction for our changes. */ + if (hfs_start_transaction(hfsmp) != 0) { + result = EINVAL; + goto exit; + } + started_transaction = 1; + + /* + * Once we started the transaction, nobody can compete + * with us, so make sure this file is still there. + */ + if ((cp) && (cp->c_flag & C_NOEXISTS)) { + result = ENOENT; + goto exit; + } + + /* + * If there isn't an attributes b-tree then create one. + */ + if (hfsmp->hfs_attribute_vp == NULL) { + result = hfs_create_attr_btree(hfsmp, ATTRIBUTE_FILE_NODE_SIZE, + getnodecount(hfsmp, ATTRIBUTE_FILE_NODE_SIZE)); + if (result) { + goto exit; + } + } + if (hfsmp->hfs_max_inline_attrsize == 0) { + hfsmp->hfs_max_inline_attrsize = getmaxinlineattrsize(hfsmp->hfs_attribute_vp); + } + + lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK); + + /* Build the b-tree key. */ + iterator = hfs_mallocz(sizeof(*iterator)); + result = hfs_buildattrkey(target_id, attr_name, (HFSPlusAttrKey *)&iterator->key); + if (result) { + goto exit_lock; + } + + /* Preflight for replace/create semantics. */ + btfile = VTOF(hfsmp->hfs_attribute_vp); + btdata.bufferAddress = &attrdata; + btdata.itemSize = sizeof(attrdata); + btdata.itemCount = 1; + exists = BTSearchRecord(btfile, iterator, &btdata, NULL, NULL) == 0; + + /* Replace requires that the attribute already exists. */ + if ((option == UVFSXattrHowReplace) && !exists) { + result = ENOATTR; + goto exit_lock; + } + /* Create requires that the attribute doesn't exist. */ + if ((option == UVFSXattrHowCreate) && exists) { + result = EEXIST; + goto exit_lock; + } + + /* Enforce an upper limit. */ + if (attrsize > HFS_XATTR_MAXSIZE) { + result = E2BIG; + goto exit_lock; + } + + /* If it won't fit inline then use extent-based attributes. */ + if (attrsize > hfsmp->hfs_max_inline_attrsize) { + int blkcnt; + int extentblks; + u_int32_t *keystartblk; + int i; + + /* Get some blocks. */ + blkcnt = (int)howmany(attrsize, hfsmp->blockSize); + extentbufsize = blkcnt * sizeof(HFSPlusExtentDescriptor); + extentbufsize = roundup(extentbufsize, sizeof(HFSPlusExtentRecord)); + extentptr = hfs_mallocz(extentbufsize); + result = alloc_attr_blks(hfsmp, attrsize, extentbufsize, extentptr, &allocatedblks); + if (result) { + allocatedblks = 0; + goto exit_lock; /* no more space */ + } + /* Copy data into the blocks. */ + result = write_attr_data(hfsmp, (void*)buf, attrsize, extentptr); + if (result) { + if (vp) { + LFHFS_LOG(LEVEL_DEBUG, "hfs_setxattr: write_attr_data vol=%s err (%d) :%s\n", + hfsmp->vcbVN, result, attr_name); + } + goto exit_lock; + } + + /* Now remove any previous attribute. */ + if (exists) { + result = remove_attribute_records(hfsmp, iterator); + if (result) { + if (vp) { + LFHFS_LOG(LEVEL_DEBUG, "hfs_setxattr: remove_attribute_records vol=%s err (%d) %s:%s\n", + hfsmp->vcbVN, result, "", attr_name); + } + goto exit_lock; + } + } + /* Create attribute fork data record. */ + recp = hfs_malloc(recp_size = sizeof(HFSPlusAttrRecord)); + + btdata.bufferAddress = recp; + btdata.itemCount = 1; + btdata.itemSize = sizeof(HFSPlusAttrForkData); + + recp->recordType = kHFSPlusAttrForkData; + recp->forkData.reserved = 0; + recp->forkData.theFork.logicalSize = attrsize; + recp->forkData.theFork.clumpSize = 0; + recp->forkData.theFork.totalBlocks = blkcnt; + bcopy(extentptr, recp->forkData.theFork.extents, sizeof(HFSPlusExtentRecord)); + + (void) hfs_buildattrkey(target_id, attr_name, (HFSPlusAttrKey *)&iterator->key); + + result = BTInsertRecord(btfile, iterator, &btdata, btdata.itemSize); + if (result) { + LFHFS_LOG(LEVEL_DEBUG, "hfs_setxattr: BTInsertRecord(): vol=%s %d,%s err=%d\n", + hfsmp->vcbVN, target_id, attr_name, result); + goto exit_lock; + } + extentblks = count_extent_blocks(blkcnt, recp->forkData.theFork.extents); + blkcnt -= extentblks; + keystartblk = &((HFSPlusAttrKey *)&iterator->key)->startBlock; + i = 0; + + /* Create overflow extents as needed. */ + while (blkcnt > 0) { + /* Initialize the key and record. */ + *keystartblk += (u_int32_t)extentblks; + btdata.itemSize = sizeof(HFSPlusAttrExtents); + recp->recordType = kHFSPlusAttrExtents; + recp->overflowExtents.reserved = 0; + + /* Copy the next set of extents. */ + i += kHFSPlusExtentDensity; + bcopy(&extentptr[i], recp->overflowExtents.extents, sizeof(HFSPlusExtentRecord)); + + result = BTInsertRecord(btfile, iterator, &btdata, btdata.itemSize); + if (result) { + LFHFS_LOG(LEVEL_DEBUG, "hfs_setxattr: BTInsertRecord() overflow: vol=%s %d,%s err=%d\n", + hfsmp->vcbVN, target_id, attr_name, result); + goto exit_lock; + } + extentblks = count_extent_blocks(blkcnt, recp->overflowExtents.extents); + blkcnt -= extentblks; + } + } else { /* Inline data */ + if (exists) { + result = remove_attribute_records(hfsmp, iterator); + if (result) { + goto exit_lock; + } + } + + /* Calculate size of record rounded up to multiple of 2 bytes. */ + btdata.itemSize = sizeof(HFSPlusAttrData) - 2 + attrsize + ((attrsize & 1) ? 1 : 0); + recp = hfs_malloc(recp_size = btdata.itemSize); + + recp->recordType = kHFSPlusAttrInlineData; + recp->attrData.reserved[0] = 0; + recp->attrData.reserved[1] = 0; + recp->attrData.attrSize = (u_int32_t)attrsize; + + /* Copy in the attribute data (if any). */ + if (attrsize > 0) { + bcopy(buf, &recp->attrData.attrData, attrsize); + } + + (void) hfs_buildattrkey(target_id, attr_name, (HFSPlusAttrKey *)&iterator->key); + + btdata.bufferAddress = recp; + btdata.itemCount = 1; + result = BTInsertRecord(btfile, iterator, &btdata, btdata.itemSize); + } + +exit_lock: + if (btfile && started_transaction) { + (void) BTFlushPath(btfile); + } + hfs_systemfile_unlock(hfsmp, lockflags); + if (result == 0) { + if (vp) { + cp = VTOC(vp); + /* Setting an attribute only updates change time and not + * modified time of the file. + */ + cp->c_touch_chgtime = TRUE; + cp->c_flag |= C_MODIFIED; + cp->c_attr.ca_recflags |= kHFSHasAttributesMask; + if ((strcmp(attr_name, KAUTH_FILESEC_XATTR) == 0)) { + cp->c_attr.ca_recflags |= kHFSHasSecurityMask; + } + (void) hfs_update(vp, 0); + } + } + if (started_transaction) { + if (result && allocatedblks) { + free_attr_blks(hfsmp, allocatedblks, extentptr); + } + hfs_end_transaction(hfsmp); + } + + hfs_free(recp); + hfs_free(extentptr); + hfs_free(iterator); + +exit: + if (cp) { + hfs_unlock(cp); + } + + return (result == btNotFound ? ENOATTR : MacToVFSError(result)); +} + +/* + * Remove an extended attribute. + */ +int +hfs_vnop_removexattr(vnode_t vp, const char *attr_name) +{ + struct cnode *cp = VTOC(vp); + struct hfsmount *hfsmp; + BTreeIterator * iterator = NULL; + int lockflags; + int result; + + if (attr_name == NULL || attr_name[0] == '\0') { + return (EINVAL); /* invalid name */ + } + hfsmp = VTOHFS(vp); + if (VNODE_IS_RSRC(vp)) { + return (EPERM); + } + + /* Write the Resource Fork. */ + if (strcmp(attr_name, XATTR_RESOURCEFORK_NAME) == 0) { + return (ENOTSUP); + } + + /* Clear out the Finder Info. */ + if (strcmp(attr_name, XATTR_FINDERINFO_NAME) == 0) { + void * finderinfo_start; + int finderinfo_size; + u_int8_t finderinfo[32]; + u_int32_t date_added = 0, write_gen_counter = 0, document_id = 0; + u_int8_t *finfo = NULL; + + if ((result = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) { + return (result); + } + + /* Use the local copy to store our temporary changes. */ + bcopy(cp->c_finderinfo, finderinfo, sizeof(finderinfo)); + + /* Zero out the date added field in the local copy */ + hfs_zero_hidden_fields (cp, finderinfo); + + /* Don't expose a symlink's private type/creator. */ + if (vnode_islnk(vp)) { + struct FndrFileInfo *fip; + + fip = (struct FndrFileInfo *)&finderinfo; + fip->fdType = 0; + fip->fdCreator = 0; + } + + /* Do the byte compare against the local copy */ + if (bcmp(finderinfo, emptyfinfo, sizeof(emptyfinfo)) == 0) { + hfs_unlock(cp); + return (ENOATTR); + } + + /* + * If there was other content, zero out everything except + * type/creator and date added. First, save the date added. + */ + finfo = cp->c_finderinfo; + finfo = finfo + 16; + if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) { + struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo; + date_added = extinfo->date_added; + write_gen_counter = extinfo->write_gen_counter; + document_id = extinfo->document_id; + } else if (S_ISDIR(cp->c_attr.ca_mode)) { + struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)finfo; + date_added = extinfo->date_added; + write_gen_counter = extinfo->write_gen_counter; + document_id = extinfo->document_id; + } + + if (vnode_islnk(vp)) { + /* Ignore type/creator */ + finderinfo_start = &cp->c_finderinfo[8]; + finderinfo_size = sizeof(cp->c_finderinfo) - 8; + } else { + finderinfo_start = &cp->c_finderinfo[0]; + finderinfo_size = sizeof(cp->c_finderinfo); + } + bzero(finderinfo_start, finderinfo_size); + + /* Now restore the date added */ + if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) { + struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo; + extinfo->date_added = date_added; + extinfo->write_gen_counter = write_gen_counter; + extinfo->document_id = document_id; + } else if (S_ISDIR(cp->c_attr.ca_mode)) { + struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)finfo; + extinfo->date_added = date_added; + extinfo->write_gen_counter = write_gen_counter; + extinfo->document_id = document_id; + } + + /* Updating finderInfo updates change time and modified time */ + cp->c_touch_chgtime = TRUE; + cp->c_flag |= C_MODIFIED; + hfs_update(vp, 0); + + hfs_unlock(cp); + + return (0); + } + + if (hfsmp->hfs_attribute_vp == NULL) { + return (ENOATTR); + } + + iterator = hfs_mallocz(sizeof(*iterator)); + + if ((result = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) { + goto exit_nolock; + } + + result = hfs_buildattrkey(cp->c_fileid, attr_name, (HFSPlusAttrKey *)&iterator->key); + if (result) { + goto exit; + } + + if (hfs_start_transaction(hfsmp) != 0) { + result = EINVAL; + goto exit; + } + lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + + result = remove_attribute_records(hfsmp, iterator); + + hfs_systemfile_unlock(hfsmp, lockflags); + + if (result == 0) { + cp->c_touch_chgtime = TRUE; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE, HFS_SHARED_LOCK); + + /* If no more attributes exist, clear attribute bit */ + result = file_attribute_exist(hfsmp, cp->c_fileid); + if (result == 0) { + cp->c_attr.ca_recflags &= ~kHFSHasAttributesMask; + cp->c_flag |= C_MODIFIED; + } + if (result == EEXIST) { + result = 0; + } + + hfs_systemfile_unlock(hfsmp, lockflags); + + /* If ACL was removed, clear security bit */ + if (strcmp(attr_name, KAUTH_FILESEC_XATTR) == 0) { + cp->c_attr.ca_recflags &= ~kHFSHasSecurityMask; + cp->c_flag |= C_MODIFIED; + } + (void) hfs_update(vp, 0); + } + + hfs_end_transaction(hfsmp); +exit: + hfs_unlock(cp); +exit_nolock: + hfs_free(iterator); + return MacToVFSError(result); +} + +/* + * Initialize vnode for attribute data I/O. + * + * On success, + * - returns zero + * - the attrdata vnode is initialized as hfsmp->hfs_attrdata_vp + * - an iocount is taken on the attrdata vnode which exists + * for the entire duration of the mount. It is only dropped + * during unmount + * - the attrdata cnode is not locked + * + * On failure, + * - returns non-zero value + * - the caller does not have to worry about any locks or references + */ +int init_attrdata_vnode(struct hfsmount *hfsmp) +{ + vnode_t vp; + int result = 0; + struct cat_desc cat_desc; + struct cat_attr cat_attr; + struct cat_fork cat_fork; + int newvnode_flags = 0; + + bzero(&cat_desc, sizeof(cat_desc)); + cat_desc.cd_parentcnid = kHFSRootParentID; + cat_desc.cd_nameptr = (const u_int8_t *)hfs_attrdatafilename; + cat_desc.cd_namelen = strlen(hfs_attrdatafilename); + cat_desc.cd_cnid = kHFSAttributeDataFileID; + /* Tag vnode as system file, note that we can still use cluster I/O */ + cat_desc.cd_flags |= CD_ISMETA; + + bzero(&cat_attr, sizeof(cat_attr)); + cat_attr.ca_linkcount = 1; + cat_attr.ca_mode = S_IFREG; + cat_attr.ca_fileid = cat_desc.cd_cnid; + cat_attr.ca_blocks = hfsmp->totalBlocks; + + /* + * The attribute data file is a virtual file that spans the + * entire file system space. + * + * Each extent-based attribute occupies a unique portion of + * in this virtual file. The cluster I/O is done using actual + * allocation block offsets so no additional mapping is needed + * for the VNOP_BLOCKMAP call. + * + * This approach allows the attribute data to be cached without + * incurring the high cost of using a separate vnode per attribute. + * + * Since we need to acquire the attribute b-tree file lock anyways, + * the virtual file doesn't introduce any additional serialization. + */ + bzero(&cat_fork, sizeof(cat_fork)); + cat_fork.cf_size = (u_int64_t)hfsmp->totalBlocks * (u_int64_t)hfsmp->blockSize; + cat_fork.cf_blocks = hfsmp->totalBlocks; + cat_fork.cf_extents[0].startBlock = 0; + cat_fork.cf_extents[0].blockCount = cat_fork.cf_blocks; + + result = hfs_getnewvnode(hfsmp, NULL, NULL, &cat_desc, 0, &cat_attr, + &cat_fork, &vp, &newvnode_flags); + if (result == 0) { + hfsmp->hfs_attrdata_vp = vp; + hfs_unlock(VTOC(vp)); + } + return (result); +} + +/* Check if any attribute record exist for given fileID. This function + * is called by hfs_vnop_removexattr to determine if it should clear the + * attribute bit in the catalog record or not. + * + * Note - you must acquire a shared lock on the attribute btree before + * calling this function. + * + * Output: + * EEXIST - If attribute record was found + * 0 - Attribute was not found + * (other) - Other error (such as EIO) + */ +int +file_attribute_exist(struct hfsmount *hfsmp, uint32_t fileID) +{ + HFSPlusAttrKey *key; + BTreeIterator * iterator = NULL; + struct filefork *btfile; + int result = 0; + + // if there's no attribute b-tree we sure as heck + // can't have any attributes! + if (hfsmp->hfs_attribute_vp == NULL) { + return 0; + } + + iterator = hfs_mallocz(sizeof(BTreeIterator)); + if (iterator == NULL) return ENOMEM; + + key = (HFSPlusAttrKey *)&iterator->key; + + result = hfs_buildattrkey(fileID, NULL, key); + if (result) { + goto out; + } + + btfile = VTOF(hfsmp->hfs_attribute_vp); + result = BTSearchRecord(btfile, iterator, NULL, NULL, NULL); + if (result && (result != btNotFound)) { + goto out; + } + + result = BTIterateRecord(btfile, kBTreeNextRecord, iterator, NULL, NULL); + /* If no next record was found or fileID for next record did not match, + * no more attributes exist for this fileID + */ + if ((result && (result == btNotFound)) || (key->fileID != fileID)) { + result = 0; + } else { + result = EEXIST; + } + +out: + hfs_free(iterator); + return result; +} + +/* + * Read an extent based attribute. + */ +static int +read_attr_data(struct hfsmount *hfsmp, void *buf, size_t datasize, HFSPlusExtentDescriptor *extents) +{ + vnode_t evp = hfsmp->hfs_attrdata_vp; + uint64_t iosize; + uint64_t attrsize; + uint64_t blksize; + uint64_t alreadyread; + int i; + int result = 0; + + hfs_lock_truncate(VTOC(evp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT); + + attrsize = (uint64_t)datasize; + blksize = (uint64_t)hfsmp->blockSize; + alreadyread = 0; + + /* + * Read the attribute data one extent at a time. + * For the typical case there is only one extent. + */ + for (i = 0; (attrsize > 0) && (extents[i].startBlock != 0); ++i) { + iosize = extents[i].blockCount * blksize; + iosize = MIN(iosize, attrsize); + + uint64_t actualread = 0; + + result = raw_readwrite_read_internal( evp, extents[i].startBlock, extents[i].blockCount * blksize, + alreadyread, iosize, buf, &actualread ); +#if HFS_XATTR_VERBOSE + LFHFS_LOG(LEVEL_DEBUG, "hfs: read_attr_data: cr iosize %lld [%d, %d] (%d)\n", + actualread, extents[i].startBlock, extents[i].blockCount, result); +#endif + if (result) + break; + + // read the remaining part after sector boundary if we have such + if (iosize != actualread) + { + result = raw_readwrite_read_internal( evp, extents[i].startBlock, extents[i].blockCount * blksize, + alreadyread + actualread, iosize - actualread, + (uint8_t*)buf + actualread, &actualread ); +#if HFS_XATTR_VERBOSE + LFHFS_LOG(LEVEL_DEBUG, "hfs: read_attr_data: cr iosize %lld [%d, %d] (%d)\n", + actualread, extents[i].startBlock, extents[i].blockCount, result); +#endif + if (result) + break; + } + + attrsize -= iosize; + + alreadyread += iosize; + buf = (uint8_t*)buf + iosize; + } + + hfs_unlock_truncate(VTOC(evp), HFS_LOCK_DEFAULT); + return (result); +} + +/* + * Write an extent based attribute. + */ +static int +write_attr_data(struct hfsmount *hfsmp, void *buf, size_t datasize, HFSPlusExtentDescriptor *extents) +{ + vnode_t evp = hfsmp->hfs_attrdata_vp; + uint64_t iosize; + uint64_t attrsize; + uint64_t blksize; + uint64_t alreadywritten; + int i; + int result = 0; + + hfs_lock_truncate(VTOC(evp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT); + + attrsize = (uint64_t)datasize; + blksize = (uint64_t)hfsmp->blockSize; + alreadywritten = 0; + + /* + * Write the attribute data one extent at a time. + */ + for (i = 0; (attrsize > 0) && (extents[i].startBlock != 0); ++i) { + iosize = extents[i].blockCount * blksize; + iosize = MIN(iosize, attrsize); + + uint64_t actualwritten = 0; + + result = raw_readwrite_write_internal( evp, extents[i].startBlock, extents[i].blockCount * blksize, + alreadywritten, iosize, buf, &actualwritten ); +#if HFS_XATTR_VERBOSE + LFHFS_LOG(LEVEL_DEBUG, "hfs: write_attr_data: cw iosize %lld [%d, %d] (%d)\n", + actualwritten, extents[i].startBlock, extents[i].blockCount, result); +#endif + if (result) + break; + + // write the remaining part after sector boundary if we have such + if (iosize != actualwritten) + { + result = raw_readwrite_write_internal( evp, extents[i].startBlock, extents[i].blockCount * blksize, + alreadywritten + actualwritten, iosize - actualwritten, + (uint8_t*)buf + actualwritten, &actualwritten ); +#if HFS_XATTR_VERBOSE + LFHFS_LOG(LEVEL_DEBUG, "hfs: write_attr_data: cw iosize %lld [%d, %d] (%d)\n", + actualwritten, extents[i].startBlock, extents[i].blockCount, result); +#endif + if (result) + break; + } + + attrsize -= iosize; + + alreadywritten += iosize; + buf = (uint8_t*)buf + iosize; + } + + hfs_unlock_truncate(VTOC(evp), HFS_LOCK_DEFAULT); + return (result); +} + +/* + * Allocate blocks for an extent based attribute. + */ +static int +alloc_attr_blks(struct hfsmount *hfsmp, size_t attrsize, size_t extentbufsize, HFSPlusExtentDescriptor *extents, int *blocks) +{ + int blkcnt; + int startblk; + int lockflags; + int i; + int maxextents; + int result = 0; + + startblk = hfsmp->hfs_metazone_end; + blkcnt = (int)howmany(attrsize, hfsmp->blockSize); + if (blkcnt > (int)hfs_freeblks(hfsmp, 0)) { + return (ENOSPC); + } + *blocks = blkcnt; + maxextents = (int)extentbufsize / sizeof(HFSPlusExtentDescriptor); + + lockflags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + + for (i = 0; (blkcnt > 0) && (i < maxextents); i++) { + /* Try allocating and see if we find something decent */ + result = BlockAllocate(hfsmp, startblk, blkcnt, blkcnt, 0, + &extents[i].startBlock, &extents[i].blockCount); + /* + * If we couldn't find anything, then re-try the allocation but allow + * journal flushes. + */ + if (result == dskFulErr) { + result = BlockAllocate(hfsmp, startblk, blkcnt, blkcnt, HFS_ALLOC_FLUSHTXN, + &extents[i].startBlock, &extents[i].blockCount); + } +#if HFS_XATTR_VERBOSE + LFHFS_LOG(LEVEL_DEBUG,"hfs: alloc_attr_blks: BA blkcnt %d [%d, %d] (%d)\n", + blkcnt, extents[i].startBlock, extents[i].blockCount, result); +#endif + if (result) { + extents[i].startBlock = 0; + extents[i].blockCount = 0; + break; + } + blkcnt -= extents[i].blockCount; + startblk = extents[i].startBlock + extents[i].blockCount; + } + /* + * If it didn't fit in the extents buffer then bail. + */ + if (blkcnt) { + result = ENOSPC; +#if HFS_XATTR_VERBOSE + LFHFS_LOG(LEVEL_DEBUG, "hfs: alloc_attr_blks: unexpected failure, %d blocks unallocated\n", blkcnt); +#endif + for (; i >= 0; i--) { + if ((blkcnt = extents[i].blockCount) != 0) { + (void) BlockDeallocate(hfsmp, extents[i].startBlock, blkcnt, 0); + extents[i].startBlock = 0; + extents[i].blockCount = 0; + } + } + } + + hfs_systemfile_unlock(hfsmp, lockflags); + return MacToVFSError(result); +} + +/* + * Release blocks from an extent based attribute. + */ +static void +free_attr_blks(struct hfsmount *hfsmp, int blkcnt, HFSPlusExtentDescriptor *extents) +{ + vnode_t evp = hfsmp->hfs_attrdata_vp; + int remblks = blkcnt; + int lockflags; + int i; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + + for (i = 0; (remblks > 0) && (extents[i].blockCount != 0); i++) { + if (extents[i].blockCount > (u_int32_t)blkcnt) { +#if HFS_XATTR_VERBOSE + LFHFS_LOG(LEVEL_DEBUG, "hfs: free_attr_blks: skipping bad extent [%d, %d]\n", + extents[i].startBlock, extents[i].blockCount); +#endif + extents[i].blockCount = 0; + continue; + } + if (extents[i].startBlock == 0) { + break; + } + (void)BlockDeallocate(hfsmp, extents[i].startBlock, extents[i].blockCount, 0); + remblks -= extents[i].blockCount; +#if HFS_XATTR_VERBOSE + LFHFS_LOG(LEVEL_DEBUG, "hfs: free_attr_blks: BlockDeallocate [%d, %d]\n", + extents[i].startBlock, extents[i].blockCount); +#endif + extents[i].startBlock = 0; + extents[i].blockCount = 0; + + /* Discard any resident pages for this block range. */ + if (evp) { +#if LF_HFS_FULL_VNODE_SUPPORT + off_t start, end; + start = (u_int64_t)extents[i].startBlock * (u_int64_t)hfsmp->blockSize; + end = start + (u_int64_t)extents[i].blockCount * (u_int64_t)hfsmp->blockSize; + //TBD - Need to update this vnode + (void) ubc_msync(hfsmp->hfs_attrdata_vp, start, end, &start, UBC_INVALIDATE); +#endif + } + } + + hfs_systemfile_unlock(hfsmp, lockflags); +} + +static int +has_overflow_extents(HFSPlusForkData *forkdata) +{ + u_int32_t blocks; + + if (forkdata->extents[7].blockCount == 0) + return (0); + + blocks = forkdata->extents[0].blockCount + + forkdata->extents[1].blockCount + + forkdata->extents[2].blockCount + + forkdata->extents[3].blockCount + + forkdata->extents[4].blockCount + + forkdata->extents[5].blockCount + + forkdata->extents[6].blockCount + + forkdata->extents[7].blockCount; + + return (forkdata->totalBlocks > blocks); +} + +static int +count_extent_blocks(int maxblks, HFSPlusExtentRecord extents) +{ + int blocks; + int i; + + for (i = 0, blocks = 0; i < kHFSPlusExtentDensity; ++i) { + /* Ignore obvious bogus extents. */ + if (extents[i].blockCount > (u_int32_t)maxblks) + continue; + if (extents[i].startBlock == 0 || extents[i].blockCount == 0) + break; + blocks += extents[i].blockCount; + } + return (blocks); +} + +/* + * Remove all the records for a given attribute. + * + * - Used by hfs_vnop_removexattr, hfs_vnop_setxattr and hfs_removeallattr. + * - A transaction must have been started. + * - The Attribute b-tree file must be locked exclusive. + * - The Allocation Bitmap file must be locked exclusive. + * - The iterator key must be initialized. + */ +static int +remove_attribute_records(struct hfsmount *hfsmp, BTreeIterator * iterator) +{ + struct filefork *btfile; + FSBufferDescriptor btdata; + HFSPlusAttrRecord attrdata; /* 90 bytes */ + u_int16_t datasize; + int result; + + btfile = VTOF(hfsmp->hfs_attribute_vp); + + btdata.bufferAddress = &attrdata; + btdata.itemSize = sizeof(attrdata); + btdata.itemCount = 1; + result = BTSearchRecord(btfile, iterator, &btdata, &datasize, NULL); + if (result) { + goto exit; /* no records. */ + } + /* + * Free the blocks from extent based attributes. + * + * Note that the block references (btree records) are removed + * before releasing the blocks in the allocation bitmap. + */ + if (attrdata.recordType == kHFSPlusAttrForkData) { + int totalblks; + int extentblks; + u_int32_t *keystartblk; + + if (datasize < sizeof(HFSPlusAttrForkData)) { + LFHFS_LOG(LEVEL_DEBUG, "remove_attribute_records: bad record size %d (expecting %lu)\n", datasize, sizeof(HFSPlusAttrForkData)); + } + totalblks = attrdata.forkData.theFork.totalBlocks; + + /* Process the first 8 extents. */ + extentblks = count_extent_blocks(totalblks, attrdata.forkData.theFork.extents); + if (extentblks > totalblks) + { + LFHFS_LOG(LEVEL_ERROR, "remove_attribute_records: corruption (1)..."); + hfs_assert(0); + } + if (BTDeleteRecord(btfile, iterator) == 0) { + free_attr_blks(hfsmp, extentblks, attrdata.forkData.theFork.extents); + } + totalblks -= extentblks; + keystartblk = &((HFSPlusAttrKey *)&iterator->key)->startBlock; + + /* Process any overflow extents. */ + while (totalblks) { + *keystartblk += (u_int32_t)extentblks; + + result = BTSearchRecord(btfile, iterator, &btdata, &datasize, NULL); + if (result || + (attrdata.recordType != kHFSPlusAttrExtents) || + (datasize < sizeof(HFSPlusAttrExtents))) { + LFHFS_LOG(LEVEL_ERROR, "remove_attribute_records: BTSearchRecord: vol=%s, err=%d (%d), totalblks %d\n", + hfsmp->vcbVN, MacToVFSError(result), attrdata.recordType != kHFSPlusAttrExtents, totalblks); + result = ENOATTR; + break; /* break from while */ + } + /* Process the next 8 extents. */ + extentblks = count_extent_blocks(totalblks, attrdata.overflowExtents.extents); + if (extentblks > totalblks) + { + LFHFS_LOG(LEVEL_ERROR, "remove_attribute_records: corruption (2)..."); + hfs_assert(0); + } + if (BTDeleteRecord(btfile, iterator) == 0) { + free_attr_blks(hfsmp, extentblks, attrdata.overflowExtents.extents); + } + totalblks -= extentblks; + } + } else { + result = BTDeleteRecord(btfile, iterator); + } + (void) BTFlushPath(btfile); +exit: + return (result == btNotFound ? ENOATTR : MacToVFSError(result)); +} + +/* + * Retrieve the list of extended attribute names. + */ +int +hfs_vnop_listxattr(vnode_t vp, void *buf, size_t bufsize, size_t *actual_size) +{ + struct cnode *cp = VTOC(vp); + struct hfsmount *hfsmp; + BTreeIterator * iterator = NULL; + struct filefork *btfile; + struct listattr_callback_state state; + int lockflags; + int result; + u_int8_t finderinfo[32]; + + if (actual_size == NULL) { + return (EINVAL); + } + if (VNODE_IS_RSRC(vp)) { + return (EPERM); + } + + hfsmp = VTOHFS(vp); + *actual_size = 0; + + /* + * Take the truncate lock; this serializes us against the ioctl + * to truncate data & reset the decmpfs state + * in the compressed file handler. + */ + hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT); + + /* Now the regular cnode lock (shared) */ + if ((result = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) { + hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); + return (result); + } + + /* + * Make a copy of the cnode's finderinfo to a local so we can + * zero out the date added field. Also zero out the private type/creator + * for symlinks. + */ + bcopy(cp->c_finderinfo, finderinfo, sizeof(finderinfo)); + hfs_zero_hidden_fields (cp, finderinfo); + + /* Don't expose a symlink's private type/creator. */ + if (vnode_islnk(vp)) { + struct FndrFileInfo *fip; + + fip = (struct FndrFileInfo *)&finderinfo; + fip->fdType = 0; + fip->fdCreator = 0; + } + + + /* If Finder Info is non-empty then export it's name. */ + if (bcmp(finderinfo, emptyfinfo, sizeof(emptyfinfo)) != 0) { + if (buf == NULL) { + *actual_size += sizeof(XATTR_FINDERINFO_NAME); + } else if (bufsize < sizeof(XATTR_FINDERINFO_NAME)) { + result = ERANGE; + goto exit; + } else { + *actual_size += sizeof(XATTR_FINDERINFO_NAME); + strcpy((char*)buf, XATTR_FINDERINFO_NAME); + } + } + + /* Bail if we don't have any extended attributes. */ + if ((hfsmp->hfs_attribute_vp == NULL) || + (cp->c_attr.ca_recflags & kHFSHasAttributesMask) == 0) { + result = 0; + goto exit; + } + btfile = VTOF(hfsmp->hfs_attribute_vp); + + iterator = hfs_mallocz(sizeof(*iterator)); + + result = hfs_buildattrkey(cp->c_fileid, NULL, (HFSPlusAttrKey *)&iterator->key); + if (result) { + goto exit; + } + + lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE, HFS_SHARED_LOCK); + + result = BTSearchRecord(btfile, iterator, NULL, NULL, NULL); + if (result && result != btNotFound) { + hfs_systemfile_unlock(hfsmp, lockflags); + goto exit; + } + + state.fileID = cp->c_fileid; + state.result = 0; + state.buf = (buf == NULL ? NULL : ((u_int8_t*)buf + *actual_size)); + state.bufsize = bufsize - *actual_size; + state.size = 0; + + /* + * Process entries starting just after iterator->key. + */ + result = BTIterateRecords(btfile, kBTreeNextRecord, iterator, + (IterateCallBackProcPtr)listattr_callback, &state); + hfs_systemfile_unlock(hfsmp, lockflags); + + *actual_size += state.size; + + if (state.result || result == btNotFound) { + result = state.result; + } + +exit: + hfs_free(iterator); + hfs_unlock(cp); + hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); + + return MacToVFSError(result); +} + +/* + * Callback - called for each attribute record + */ +static int +listattr_callback(const HFSPlusAttrKey *key, __unused const HFSPlusAttrData *data, struct listattr_callback_state *state) +{ + char attrname[XATTR_MAXNAMELEN + 1]; + ssize_t bytecount; + int result; + + if (state->fileID != key->fileID) { + state->result = 0; + return (0); /* stop */ + } + /* + * Skip over non-primary keys + */ + if (key->startBlock != 0) { + return (1); /* continue */ + } + + /* Convert the attribute name into UTF-8. */ + result = utf8_encodestr(key->attrName, key->attrNameLen * sizeof(UniChar), + (u_int8_t *)attrname, (size_t *)&bytecount, sizeof(attrname), '/', UTF_ADD_NULL_TERM); + if (result) { + state->result = result; + return (0); /* stop */ + } + bytecount++; /* account for null termination char */ + + state->size += bytecount; + + if (state->buf != NULL) { + if ((size_t)bytecount > state->bufsize) { + state->result = ERANGE; + return (0); /* stop */ + } + + memcpy(state->buf, attrname, bytecount); + + state->buf = (state->buf == NULL ? NULL : ((u_int8_t*)state->buf + bytecount)); + state->bufsize -= bytecount; + } + return (1); /* continue */ +} + +/* + * Remove all the attributes from a cnode. + * + * This function creates/ends its own transaction so that each + * attribute is deleted in its own transaction (to avoid having + * a transaction grow too large). + * + * This function takes the necessary locks on the attribute + * b-tree file and the allocation (bitmap) file. + * + * NOTE: Upon sucecss, this function will return with an open + * transaction. The reason we do it this way is because when we + * delete the last attribute, we must make sure the flag in the + * catalog record that indicates there are no more records is cleared. + * The caller is responsible for doing this and *must* do it before + * ending the transaction. + */ +int +hfs_removeallattr(struct hfsmount *hfsmp, u_int32_t fileid, bool *open_transaction) +{ + BTreeIterator *iterator = NULL; + HFSPlusAttrKey *key; + struct filefork *btfile; + int result, lockflags = 0; + + *open_transaction = false; + + if (hfsmp->hfs_attribute_vp == NULL) + return 0; + + btfile = VTOF(hfsmp->hfs_attribute_vp); + + iterator = hfs_mallocz(sizeof(BTreeIterator)); + if (iterator == NULL) + return ENOMEM; + + key = (HFSPlusAttrKey *)&iterator->key; + + /* Loop until there are no more attributes for this file id */ + do { + if (!*open_transaction) + lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE, HFS_SHARED_LOCK); + + (void) hfs_buildattrkey(fileid, NULL, key); + result = BTIterateRecord(btfile, kBTreeNextRecord, iterator, NULL, NULL); + if (result || key->fileID != fileid) + goto exit; + + hfs_systemfile_unlock(hfsmp, lockflags); + lockflags = 0; + + if (*open_transaction) { + hfs_end_transaction(hfsmp); + *open_transaction = false; + } + + if (hfs_start_transaction(hfsmp) != 0) { + result = EINVAL; + goto exit; + } + + *open_transaction = true; + + lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); + + result = remove_attribute_records(hfsmp, iterator); + + } while (!result); + +exit: + hfs_free(iterator); + + if (lockflags) + hfs_systemfile_unlock(hfsmp, lockflags); + + result = result == btNotFound ? 0 : MacToVFSError(result); + + if (result && *open_transaction) { + hfs_end_transaction(hfsmp); + *open_transaction = false; + } + + return result; +} + +/* + * hfs_attrkeycompare - compare two attribute b-tree keys. + * + * The name portion of the key is compared using a 16-bit binary comparison. + * This is called from the b-tree code. + */ +int +hfs_attrkeycompare(HFSPlusAttrKey *searchKey, HFSPlusAttrKey *trialKey) +{ + u_int32_t searchFileID, trialFileID; + int result; + + searchFileID = searchKey->fileID; + trialFileID = trialKey->fileID; + result = 0; + + if (searchFileID > trialFileID) { + ++result; + } else if (searchFileID < trialFileID) { + --result; + } else { + u_int16_t * str1 = &searchKey->attrName[0]; + u_int16_t * str2 = &trialKey->attrName[0]; + int length1 = searchKey->attrNameLen; + int length2 = trialKey->attrNameLen; + u_int16_t c1, c2; + int length; + + if (length1 < length2) { + length = length1; + --result; + } else if (length1 > length2) { + length = length2; + ++result; + } else { + length = length1; + } + + while (length--) { + c1 = *(str1++); + c2 = *(str2++); + + if (c1 > c2) { + result = 1; + break; + } + if (c1 < c2) { + result = -1; + break; + } + } + if (result) + return (result); + /* + * Names are equal; compare startBlock + */ + if (searchKey->startBlock == trialKey->startBlock) { + return (0); + } else { + return (searchKey->startBlock < trialKey->startBlock ? -1 : 1); + } + } + + return result; +} + +/* + * hfs_buildattrkey - build an Attribute b-tree key + */ +int +hfs_buildattrkey(u_int32_t fileID, const char *attrname, HFSPlusAttrKey *key) +{ + int result = 0; + size_t unicodeBytes = 0; + + if (attrname != NULL) { + /* + * Convert filename from UTF-8 into Unicode + */ + result = utf8_decodestr((const u_int8_t *)attrname, strlen(attrname), key->attrName, + &unicodeBytes, sizeof(key->attrName), 0, 0); + if (result) { + if (result != ENAMETOOLONG) + result = EINVAL; /* name has invalid characters */ + return (result); + } + key->attrNameLen = unicodeBytes / sizeof(UniChar); + key->keyLength = kHFSPlusAttrKeyMinimumLength + unicodeBytes; + } else { + key->attrNameLen = 0; + key->keyLength = kHFSPlusAttrKeyMinimumLength; + } + key->pad = 0; + key->fileID = fileID; + key->startBlock = 0; + + return (0); +} + +/* + * getnodecount - calculate starting node count for attributes b-tree. + */ +static int +getnodecount(struct hfsmount *hfsmp, size_t nodesize) +{ + u_int64_t freebytes; + u_int64_t calcbytes; + + /* + * 10.4: Scale base on current catalog file size (20 %) up to 20 MB. + * 10.5: Attempt to be as big as the catalog clump size. + * + * Use no more than 10 % of the remaining free space. + */ + freebytes = (u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize; + + calcbytes = MIN(hfsmp->hfs_catalog_cp->c_datafork->ff_size / 5, 20 * 1024 * 1024); + + calcbytes = MAX(calcbytes, hfsmp->hfs_catalog_cp->c_datafork->ff_clumpsize); + + calcbytes = MIN(calcbytes, freebytes / 10); + + return (MAX(2, (int)(calcbytes / nodesize))); +} + +/* + * getmaxinlineattrsize - calculate maximum inline attribute size. + * + * This yields 3,802 bytes for an 8K node size. + */ +static size_t +getmaxinlineattrsize(struct vnode * attrvp) +{ + BTreeInfoRec btinfo; + size_t nodesize = ATTRIBUTE_FILE_NODE_SIZE; + size_t maxsize; + + if (attrvp != NULL) { + (void) hfs_lock(VTOC(attrvp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT); + if (BTGetInformation(VTOF(attrvp), 0, &btinfo) == 0) + nodesize = btinfo.nodeSize; + hfs_unlock(VTOC(attrvp)); + } + maxsize = nodesize; + maxsize -= sizeof(BTNodeDescriptor); /* minus node descriptor */ + maxsize -= 3 * sizeof(u_int16_t); /* minus 3 index slots */ + maxsize /= 2; /* 2 key/rec pairs minumum */ + maxsize -= sizeof(HFSPlusAttrKey); /* minus maximum key size */ + maxsize -= sizeof(HFSPlusAttrData) - 2; /* minus data header */ + maxsize &= 0xFFFFFFFE; /* multiple of 2 bytes */ + + return (maxsize); +} diff --git a/livefiles_hfs_plugin/lf_hfs_xattr.h b/livefiles_hfs_plugin/lf_hfs_xattr.h new file mode 100644 index 0000000..8f90e6f --- /dev/null +++ b/livefiles_hfs_plugin/lf_hfs_xattr.h @@ -0,0 +1,25 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * lf_hfs_xattr.h + * livefiles_hfs + * + * Created by Or Haimovich on 28/3/18. + */ +#ifndef lf_hfs_xattr_h +#define lf_hfs_xattr_h + +#include "lf_hfs_vnode.h" +#include "lf_hfs_format.h" +#include + +int hfs_attrkeycompare(HFSPlusAttrKey *searchKey, HFSPlusAttrKey *trialKey); +int init_attrdata_vnode(struct hfsmount *hfsmp); +int file_attribute_exist(struct hfsmount *hfsmp, uint32_t fileID); +int hfs_buildattrkey(u_int32_t fileID, const char *attrname, HFSPlusAttrKey *key); +int hfs_removeallattr(struct hfsmount *hfsmp, u_int32_t fileid, bool *open_transaction); +int hfs_vnop_getxattr(vnode_t vp, const char *attr_name, void *buf, size_t bufsize, size_t *actual_size); +int hfs_vnop_setxattr(vnode_t vp, const char *attr_name, const void *buf, size_t bufsize, UVFSXattrHow How); +int hfs_vnop_removexattr(vnode_t vp, const char *attr_name); +int hfs_vnop_listxattr(vnode_t vp, void *buf, size_t bufsize, size_t *actual_size); + +#endif /* lf_hfs_xattr_h */ diff --git a/livefiles_hfs_plugin/livefiles_hfs_tester.c b/livefiles_hfs_plugin/livefiles_hfs_tester.c new file mode 100644 index 0000000..421829f --- /dev/null +++ b/livefiles_hfs_plugin/livefiles_hfs_tester.c @@ -0,0 +1,4782 @@ +/* Copyright © 2017-2018 Apple Inc. All rights reserved. + * + * livefiles_hfs_tester.c + * hfs + * + * Created by Yakov Ben Zaken on 31/12/2017. + */ + +#include +#include +#include +#include "livefiles_hfs_tester.h" +#include "lf_hfs_fsops_handler.h" +#include "lf_hfs_dirops_handler.h" +#include +#include +#include +#include "lf_hfs_journal.h" +#include "lf_hfs_generic_buf.h" +#include "lf_hfs_vfsutils.h" +#include "lf_hfs_raw_read_write.h" + +#define DEFAULT_SYNCER_PERIOD 100 // mS +#define MAX_UTF8_NAME_LENGTH (NAME_MAX*3+1) +#define MAX_MAC2SFM (0x80) +#define TEST_CYCLE_COUNT (1) +#define CMP_TIMES(timspec1, timspec2) \ + ((timspec1.tv_sec == timspec2.tv_sec) \ + && (timspec1.tv_nsec == timspec2.tv_nsec)) + +#define ARR_LEN(arr) ((sizeof(arr))/(sizeof(arr[0]))) + +uint32_t guSyncerPeriod = DEFAULT_SYNCER_PERIOD; + +typedef int (*test_hander_t)( UVFSFileNode RootNode ); + +#if HFS_CRASH_TEST + typedef int (*TesterCrashAbortFunction_FP)(void *psTestData, CrashAbort_E eAbort, int iFD, UVFSFileNode psNode, pthread_t bSyncerThread); +#endif + + +typedef struct { + + char* pcTestName; + char* pcDMGPath; + test_hander_t pfTestHandler; + + UVFSFileNode psRootNode; + pthread_t sSyncerThread; + bool bSyncerOn; + uint32_t uSyncerCount; + bool bSparseImage; + + #if HFS_CRASH_TEST + uint32_t uCrashAbortCnt; + CrashAbort_E eCrashID; + TesterCrashAbortFunction_FP pAbortFunc; + pthread_t sTestExeThread; + #endif +} TestData_S; + +typedef struct { + int iErr; + int iFD; + UVFSFileNode psNode; + pthread_t pSyncerThread; + #if HFS_CRASH_TEST + CrashAbort_E eCrashID; + #endif +} TesterThreadReturnStatus_S; + +#if HFS_CRASH_TEST +typedef struct { + uint32_t uCrashCount; + CrashAbort_E eCrashID; + int iFD; + UVFSFileNode psNode; + pthread_t pSyncerThread; +} CrashReport_S; +#endif + +#if HFS_CRASH_TEST +char *ppcCrashAbortDesc[CRASH_ABORT_LAST] = { + [CRASH_ABORT_NONE] = "None", + [CRASH_ABORT_MAKE_DIR] = "Make Dir", + [CRASH_ABORT_JOURNAL_BEFORE_FINISH] = "Journal, before transaction finish", + [CRASH_ABORT_JOURNAL_AFTER_JOURNAL_DATA] = "Journal, after journal data has been written", + [CRASH_ABORT_JOURNAL_AFTER_JOURNAL_HEADER] = "Journal, after journal header has been written", + [CRASH_ABORT_JOURNAL_IN_BLOCK_DATA] = "Journal, while block data is being written", + [CRASH_ABORT_JOURNAL_AFTER_BLOCK_DATA] = "Journal, after block data has been written", + [CRASH_ABORT_ON_UNMOUNT] = "Unmount", +}; +uint32_t guCrashAbortCnt = 0; +CrashReport_S gsCrashReport; +#endif + +int giFD = 0; + +// Multi-thread read-write test +#if 1 // Quick Regression + #define MTRW_NUM_OF_THREADS 10 + #define MTRW_FILE_SIZE 5*1000 + #define MTRW_NUM_OF_FILES 10 + #define MTRW_NUM_OF_SYMLINKS 10 + #define MTRW_SYMLINK_SIZE PATH_MAX + #define MTRW_NUM_OF_OPERATIONS 10 +#else // Longer Regression + #define MTRW_NUM_OF_THREADS 30 + #define MTRW_FILE_SIZE 5*1000 + #define MTRW_NUM_OF_FILES 30 + #define MTRW_NUM_OF_SYMLINKS 30 + #define MTRW_SYMLINK_SIZE PATH_MAX + #define MTRW_NUM_OF_OPERATIONS 30 +#endif + +typedef struct { + uint32_t uThreadNum; + UVFSFileNode psRootNode; + uint32_t uNumOfFiles; + uint32_t uNumOfSymLinks; + uint32_t uSymLinkSize; + uint64_t uFileSize; + int32_t iRetVal; +} RWThreadData_S; + + +static int SetAttrChangeSize(UVFSFileNode FileNode,uint64_t uNewSize); +static int SetAttrChangeMode(UVFSFileNode FileNode,uint32_t uNewMode); +static int SetAttrChangeUidGid(UVFSFileNode FileNode, uint32_t uNewUid, uint32_t uNewGid); +static int SetAttrChangeAtimeMtime(UVFSFileNode FileNode); +static int GetAttrAndCompare(UVFSFileNode FileNode,UVFSFileAttributes* sInAttrs); +static int HFSTest_RunTest(TestData_S *psTestData); +static void *ReadWriteThread(void *pvArgs); + + +struct unistr255 { + uint16_t length; + uint16_t chars[255]; +}; + +u_char +l2u[256] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 00-07 */ + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 08-0f */ + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 10-17 */ + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 18-1f */ + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 20-27 */ + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 28-2f */ + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 30-37 */ + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 38-3f */ + 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 40-47 */ + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 48-4f */ + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 50-57 */ + 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 58-5f */ + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 60-67 */ + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 68-6f */ + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 70-77 */ + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 78-7f */ + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 80-87 */ + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 88-8f */ + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 90-97 */ + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 98-9f */ + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* a0-a7 */ + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* a8-af */ + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* b0-b7 */ + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* b8-bf */ + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* c0-c7 */ + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* c8-cf */ + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* d0-d7 */ + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* d8-df */ + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* e0-e7 */ + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* e8-ef */ + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* f0-f7 */ + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* f8-ff */ +}; + +/* + * Macintosh Unicode (LSB) to Microsoft Services for Macintosh (SFM) Unicode + */ +static const uint16_t +mac2sfm[MAX_MAC2SFM] = { + 0x0, 0xf001, 0xf002, 0xf003, 0xf004, 0xf005, 0xf006, 0xf007, /* 00-07 */ + 0xf008, 0xf009, 0xf00a, 0xf00b, 0xf00c, 0xf00d, 0xf00e, 0xf00f, /* 08-0f */ + 0xf010, 0xf011, 0xf012, 0xf013, 0xf014, 0xf015, 0xf016, 0xf017, /* 10-17 */ + 0xf018, 0xf019, 0xf01a, 0xf01b, 0xf01c, 0xf01d, 0xf01e, 0xf01f, /* 18-1f */ + 0x20, 0x21, 0xf020, 0x23, 0x24, 0x25, 0x26, 0x27, /* 20-27 */ + 0x28, 0x29, 0xf021, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 28-2f */ + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 30-37 */ + 0x38, 0x39, 0xf022, 0x3b, 0xf023, 0x3d, 0xf024, 0xf025, /* 38-3f */ + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 40-47 */ + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 48-4f */ + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 50-57 */ + 0x58, 0x59, 0x5a, 0x5b, 0xf026, 0x5d, 0x5e, 0x5f, /* 58-5f */ + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 60-67 */ + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 68-6f */ + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 70-77 */ + 0x78, 0x79, 0x7a, 0x7b, 0xf027, 0x7d, 0x7e, 0x7f, /* 78-7f */ +}; + +static void +unistr255ToLowerCase( struct unistr255* psUnistr255 ) +{ + for ( uint16_t uIdx=0; uIdxlength; uIdx++ ) + { + if ( psUnistr255->chars[uIdx] < 0x100 ) + { + psUnistr255->chars[uIdx] = l2u[psUnistr255->chars[uIdx]]; + } + } +} + +void HFSTest_PrintCacheStats(void) { + printf("Cache Statistics: buf_cache_size %u, max_buf_cache_size %u, buf_cache_cleanup %u, buf_cache_remove %u, max_gen_buf_uncached %u, gen_buf_uncached %u.\n", + gCacheStat.buf_cache_size, + gCacheStat.max_buf_cache_size, + gCacheStat.buf_cache_cleanup, + gCacheStat.buf_cache_remove, + gCacheStat.max_gen_buf_uncached, + gCacheStat.gen_buf_uncached); +} + +__unused static long long int timestamp() +{ + /* Example of timestamp in second. */ + time_t timestamp_sec; /* timestamp in second */ + time(×tamp_sec); /* get current time; same as: timestamp_sec = time(NULL) */ + + /* Example of timestamp in microsecond. */ + struct timeval timer_usec; + long long int timestamp_usec; /* timestamp in microsecond */ + if (!gettimeofday(&timer_usec, NULL)) { + timestamp_usec = ((long long int) timer_usec.tv_sec) * 1000000ll + + (long long int) timer_usec.tv_usec; + } + else { + timestamp_usec = -1; + } + + return timestamp_usec; +} + +__unused static errno_t +CONV_UTF8ToUnistr255(const uint8_t *utf8, size_t utf8Length, struct unistr255 *unicode) +{ + size_t i; + uint32_t ch; + + unicode->length = 0; + for (i = 0; i < utf8Length; ++i) + { + ch = utf8[i]; + if ((ch & 0x80) == 0) + { + /* Plain ASCII */ + } + else if ((ch & 0xE0) == 0xC0) + { + /* Two-byte sequence */ + if (utf8Length - i >= 2 && (utf8[i+1] & 0xC0) == 0x80) + { + ch = ((ch << 6) + utf8[++i]) - 0x3080; + } + else + { + /* Ill-formed UTF-8 */ + return EILSEQ; + } + } + else if ((ch & 0xF0) == 0xE0) + { + /* Three-byte sequence */ + if (utf8Length - i >= 3 && (utf8[i+1] & 0xC0) == 0x80 && (utf8[i+2] & 0xC0) == 0x80) + { + ch <<= 6; + ch += utf8[++i]; + ch <<= 6; + ch += utf8[++i]; + ch -= 0xE2080; + } + else + { + /* Ill-formed UTF-8 */ + return EILSEQ; + } + } + else if ((ch & 0xF8) == 0xF0) + { + /* Four-byte sequence; requires surrogate pair for UTF-16 */ + if (utf8Length - i >= 4 && (utf8[i+1] & 0xC0) == 0x80 && (utf8[i+2] & 0xC0) == 0x80 && (utf8[i+3] & 0xC0) == 0x80) + { + ch <<= 6; + ch += utf8[++i]; + ch <<= 6; + ch += utf8[++i]; + ch <<= 6; + ch += utf8[++i]; + ch -= 0x3C82080; + } + else + { + /* Ill-formed UTF-8 */ + return EILSEQ; + } + } + + if (ch > 0xFFFF) + { + /* Requires surrogate pairs for UTF-16 */ + if (unicode->length < 254) + { + ch -= 0x00010000; + unicode->chars[unicode->length++] = 0xD800 | (ch >> 10); + unicode->chars[unicode->length++] = 0xDC00 | (ch & 0x003F); + } + else + { + return ENAMETOOLONG; + } + } + else + { + if (unicode->length < 255) + { + unicode->chars[unicode->length++] = ch; + } + else + { + /* No room to store character */ + return ENAMETOOLONG; + } + } + } + + //Only in "." and ".." we don't need to change the last char to speciel char. + bool bNeedToChangeLastChar = true; + if ( ((unicode->length == 1) && (unicode->chars[0] == '.')) || + ((unicode->length == 2) && (unicode->chars[0] == '.') && (unicode->chars[1] == '.')) ) + { + bNeedToChangeLastChar = false; + } + + for ( uint16_t uIdx=0; uIdxlength; uIdx++ ) + { + //If the last char is "." or " " we need to change it. + //We won't use the mac2sfm table, we will do it manually + if ( bNeedToChangeLastChar && uIdx == unicode->length-1 ) + { + if ( unicode->chars[uIdx] == ' ' ) + { + unicode->chars[uIdx] = 0xf028; + continue; + } + + if ( unicode->chars[uIdx] == '.' ) + { + unicode->chars[uIdx] = 0xf029; + continue; + } + } + + + if ( unicode->chars[uIdx] < MAX_MAC2SFM ) + { + unicode->chars[uIdx] = mac2sfm[unicode->chars[uIdx]]; + } + } + + return 0; +} + +__unused static void print_dir_entry_name( uint32_t uLen, char* pcName, char* pcSearchName, bool* pbFound ) +{ + struct unistr255 sU255; + struct unistr255 sU2552; + memset( &sU255, 0, sizeof(struct unistr255)); + memset( &sU2552, 0, sizeof(struct unistr255)); + errno_t status = CONV_UTF8ToUnistr255( (uint8_t*)pcName, strlen(pcName), &sU255 ); + status |= CONV_UTF8ToUnistr255( (uint8_t*)pcSearchName, strlen(pcSearchName), &sU2552 ); + + if ( status != 0 ) { assert(0); } + + uLen = sU255.length; + + char pcNameToPrint[uLen+1]; + memset( pcNameToPrint, 0, sizeof(pcNameToPrint) ); + + uint16_t* puName = sU255.chars; + for ( uint32_t uIdx=0; uIdxdea_attrs.fa_type != psExpectedAttr->dea_attrs.fa_type) || // Comapre Type + (psNewDirListEntry->dea_attrs.fa_size != psExpectedAttr->dea_attrs.fa_size) || // Comapre Size + (psNewDirListEntry->dea_attrs.fa_nlink != psExpectedAttr->dea_attrs.fa_nlink) || // Compare Nlink + (psNewDirListEntry->dea_attrs.fa_mtime.tv_sec != psExpectedAttr->dea_attrs.fa_mtime.tv_sec) || // Comapre MTime + (psNewDirListEntry->dea_attrs.fa_ctime.tv_sec != psExpectedAttr->dea_attrs.fa_ctime.tv_sec) || // Comapre Ctime + (psNewDirListEntry->dea_attrs.fa_atime.tv_sec != psExpectedAttr->dea_attrs.fa_atime.tv_sec) || // Comapre Atime + (psNewDirListEntry->dea_attrs.fa_birthtime.tv_sec != psExpectedAttr->dea_attrs.fa_birthtime.tv_sec) || // Comapre birthtime + (psNewDirListEntry->dea_attrs.fa_mtime.tv_nsec != psExpectedAttr->dea_attrs.fa_mtime.tv_nsec) || // Comapre MTime + (psNewDirListEntry->dea_attrs.fa_ctime.tv_nsec != psExpectedAttr->dea_attrs.fa_ctime.tv_nsec) || // Comapre Ctime + (psNewDirListEntry->dea_attrs.fa_atime.tv_nsec != psExpectedAttr->dea_attrs.fa_atime.tv_nsec) || // Comapre Atime + (psNewDirListEntry->dea_attrs.fa_birthtime.tv_nsec != psExpectedAttr->dea_attrs.fa_birthtime.tv_nsec) || // Comapre birthtime + (psNewDirListEntry->dea_attrs.fa_allocsize != psExpectedAttr->dea_attrs.fa_allocsize) ) + { + printf("HFSTest_CompareReadDir: failed.\n"); + + printf("HFSTest_CompareReadDir: expected- name [%s], type [%d], size [%llu], nlink [%u], allocsize [%llu].\n",UVFS_DIRENTRYATTR_NAMEPTR(psExpectedAttr), psExpectedAttr->dea_attrs.fa_type, psExpectedAttr->dea_attrs.fa_size, psExpectedAttr->dea_attrs.fa_nlink, psExpectedAttr->dea_attrs.fa_allocsize); + printf("HFSTest_CompareReadDir: expected- mtime [%ld.%ld], ctime [%ld.%ld], atime [%ld.%ld], btime [%ld.%ld] .\n",psExpectedAttr->dea_attrs.fa_mtime.tv_sec,psExpectedAttr->dea_attrs.fa_mtime.tv_nsec,psExpectedAttr->dea_attrs.fa_ctime.tv_sec,psExpectedAttr->dea_attrs.fa_ctime.tv_nsec, psExpectedAttr->dea_attrs.fa_atime.tv_sec, psExpectedAttr->dea_attrs.fa_atime.tv_nsec, psExpectedAttr->dea_attrs.fa_birthtime.tv_sec, psExpectedAttr->dea_attrs.fa_birthtime.tv_nsec); + + printf("HFSTest_CompareReadDir: got - name [%s], type [%d], size [%llu], nlink [%u], allocsize [%llu].\n",UVFS_DIRENTRYATTR_NAMEPTR(psNewDirListEntry), psNewDirListEntry->dea_attrs.fa_type, psNewDirListEntry->dea_attrs.fa_size, psNewDirListEntry->dea_attrs.fa_nlink, psNewDirListEntry->dea_attrs.fa_allocsize); + printf("HFSTest_CompareReadDir: got - mtime [%ld.%ld], ctime [%ld.%ld], atime [%ld.%ld], btime [%ld.%ld] .\n",psNewDirListEntry->dea_attrs.fa_mtime.tv_sec,psNewDirListEntry->dea_attrs.fa_mtime.tv_nsec,psNewDirListEntry->dea_attrs.fa_ctime.tv_sec,psNewDirListEntry->dea_attrs.fa_ctime.tv_nsec, psNewDirListEntry->dea_attrs.fa_atime.tv_sec,psNewDirListEntry->dea_attrs.fa_atime.tv_nsec, psNewDirListEntry->dea_attrs.fa_birthtime.tv_sec,psNewDirListEntry->dea_attrs.fa_birthtime.tv_nsec); + + bIsEqual = false; + } + } + else + { + UVFSDirEntry* psNewDirListEntry = ( UVFSDirEntry*) psReadEntry; + if ( (strcmp(UVFS_DIRENTRYATTR_NAMEPTR(psExpectedAttr),psNewDirListEntry->de_name)) || // Comapre Name + (psNewDirListEntry->de_filetype != psExpectedAttr->dea_attrs.fa_type)) // Comapre Type + { + bIsEqual = false; + } + } + + + + return bIsEqual; +} + +/* --------------------------------------------------------------------------------------------- */ +static void SetExpectedAttr(char* pcName, uint32_t uType, UVFSDirEntryAttr* psAttr); +static int ReadDirAttr(UVFSFileNode psNode, UVFSDirEntryAttr* psReadDirTestsData, uint32_t uDirEntries); +static int RemoveFolder(UVFSFileNode ParentNode,char* DirNameToRemove); +static int CreateNewFolder(UVFSFileNode ParentNode,UVFSFileNode* NewDirNode,char* NewDirName); +static int CreateHardLink(UVFSFileNode FromNode, UVFSFileNode ToDirNode, char* NewHardLinkName); +static int RemoveFile(UVFSFileNode ParentNode,char* FileNameToRemove); +static int CreateNewFile(UVFSFileNode ParentNode,UVFSFileNode* NewFileNode,char* NewFileName,uint64_t size); +static int read_directory_and_search_for_name( UVFSFileNode psNode, char* pcSearchName, bool* pbFound, UVFSDirEntryAttr* psReadDirTestsData, uint32_t uDirEntries ); +static int RenameFile(UVFSFileNode FromParentNode,UVFSFileNode FromNode,char* FromName, UVFSFileNode ToParentNode,UVFSFileNode ToNode,char* ToName); +/* --------------------------------------------------------------------------------------------- */ + +static int RemoveFolder(UVFSFileNode ParentNode,char* DirNameToRemove) +{ + int error =0; + + error = HFS_fsOps.fsops_rmdir(ParentNode, DirNameToRemove);; + + return error; +} + +static int RemoveFile(UVFSFileNode ParentNode,char* FileNameToRemove) +{ + int error =0; + + error = HFS_fsOps.fsops_remove( ParentNode, FileNameToRemove, NULL); + + return error; +} + +static int RenameFile(UVFSFileNode FromParentNode,UVFSFileNode FromNode,char* FromName, UVFSFileNode ToParentNode,UVFSFileNode ToNode,char* ToName) +{ + int error =0; + + error = HFS_fsOps.fsops_rename( FromParentNode, FromNode, FromName, ToParentNode, ToNode, ToName, 0); + + return error; +} + +static int CreateNewFile(UVFSFileNode ParentNode,UVFSFileNode* NewFileNode,char* NewFileName,uint64_t size) +{ + int error =0; + UVFSFileAttributes attrs = {0}; + + attrs.fa_validmask = UVFS_FA_VALID_MODE | UVFS_FA_VALID_SIZE; + attrs.fa_type = UVFS_FA_TYPE_FILE; + attrs.fa_mode = UVFS_FA_MODE_OTH(UVFS_FA_MODE_RWX)|UVFS_FA_MODE_GRP(UVFS_FA_MODE_RWX)|UVFS_FA_MODE_USR(UVFS_FA_MODE_RWX);; + attrs.fa_size = size; + + error = HFS_fsOps.fsops_create(ParentNode, NewFileName, &attrs, NewFileNode); + + return error; +} + +static int CreateNewFolder(UVFSFileNode ParentNode,UVFSFileNode* NewDirNode,char* NewDirName) +{ + int error =0; + + UVFSFileAttributes attrs; + memset(&attrs,0,sizeof(UVFSFileAttributes)); + attrs.fa_validmask = UVFS_FA_VALID_MODE; + attrs.fa_type = UVFS_FA_TYPE_DIR; + attrs.fa_mode = UVFS_FA_MODE_OTH(UVFS_FA_MODE_RWX)|UVFS_FA_MODE_GRP(UVFS_FA_MODE_RWX)|UVFS_FA_MODE_USR(UVFS_FA_MODE_RWX); + error = HFS_fsOps.fsops_mkdir(ParentNode, NewDirName, &attrs, NewDirNode); + + return error; +} + +static int CreateHardLink(UVFSFileNode FromNode, UVFSFileNode ToDirNode, char* NewHardLinkName) +{ + int error =0; + + UVFSFileAttributes sToDirAttrs; + UVFSFileAttributes sFromNodeAttrs; + memset(&sToDirAttrs,0,sizeof(UVFSFileAttributes)); + memset(&sFromNodeAttrs,0,sizeof(UVFSFileAttributes)); + + error = HFS_fsOps.fsops_link(FromNode, ToDirNode, NewHardLinkName, &sFromNodeAttrs, &sToDirAttrs); + + return error; +} + +static int read_directory_and_search_for_name( UVFSFileNode psNode, char* pcSearchName, bool* pbFound, UVFSDirEntryAttr* psReadDirTestsData, uint32_t uDirEntries ) +{ + if (pbFound) *pbFound = false; + + uint32_t uBufferSize = 1000; + uint8_t* puBuffer = malloc(uBufferSize*2); + if ( puBuffer == NULL ) + { + return ENOMEM; + } + memset(puBuffer, 0xff, uBufferSize*2); + + uint64_t uCookie = 0; + uint64_t uVerifier = UVFS_DIRCOOKIE_VERIFIER_INITIAL; + bool bConRead = true; + + uint32_t uDirsCounter = 0; + uint32_t uFilesCounter = 0; + uint32_t uLinksCounter = 0; + size_t outLen = 0; + uint32_t uDirIndex = 0; + int iReadDirERR = 0; + UVFSDirEntryAttr* psDirData = psReadDirTestsData; + + do { + uint32_t uBufCurOffset = 0; + + memset(puBuffer, 0, uBufferSize); + + iReadDirERR = HFS_fsOps.fsops_readdir (psNode, puBuffer, uBufferSize, uCookie, &outLen, &uVerifier); + // assert(0xffffffffffffffff == *(uint64_t*)(&puBuffer[100])); + if ( (iReadDirERR != 0 && iReadDirERR != UVFS_READDIR_EOF_REACHED) || outLen==0) + { + bConRead = false; + } + else + { + //Go over all entries in the list and check if we got to the end of the directory + bool bEndOfDirectoryList = false; + + while ( !bEndOfDirectoryList && iReadDirERR != UVFS_READDIR_EOF_REACHED ) + { + UVFSDirEntry* psNewDirListEntry = (UVFSDirEntry*) &puBuffer[uBufCurOffset]; + uCookie = psNewDirListEntry->de_nextcookie; + + //We found all the files in the root directory + if ( ( psNewDirListEntry->de_nextcookie == UVFS_DIRCOOKIE_EOF ) || ( psNewDirListEntry->de_reclen == 0 ) ) + { + bEndOfDirectoryList = true; + } + + print_dir_entry_name( psNewDirListEntry->de_namelen, psNewDirListEntry->de_name, pcSearchName, pbFound ); + switch ( psNewDirListEntry->de_filetype ) + { + case UVFS_FA_TYPE_DIR: + { + //printf("found dir: ID: %llu, named [%s], in offset [%u]\n",psNewDirListEntry->de_fileid, psNewDirListEntry->de_name, uBufCurOffset); + uDirsCounter++; + } + break; + case UVFS_FA_TYPE_FILE: + { + //printf("found file: ID: %llu, named [%s], in offset [%u]\n",psNewDirListEntry->de_fileid, psNewDirListEntry->de_name, uBufCurOffset); + uFilesCounter++; + } + break; + case UVFS_FA_TYPE_SYMLINK: + { + //printf("found link: ID: %llu, named [%s], in offset [%u]\n",psNewDirListEntry->de_fileid, psNewDirListEntry->de_name, uBufCurOffset); + uLinksCounter++; + } + break; + + default: + printf("Found Unkown file type %d, named [%s], Exiting\n", psNewDirListEntry->de_filetype, psNewDirListEntry->de_name); + bEndOfDirectoryList = true; + bConRead = false; + break; + } + + if (psDirData != NULL) + { + printf("Expected FileName = [%s], FileType = [%d].\n", UVFS_DIRENTRYATTR_NAMEPTR(psDirData), psDirData->dea_attrs.fa_type); + + //TBD - When getAttr will work need to change the compare to getAttr vs ReadDirAttr + if ( !HFSTest_CompareReadDir(psNewDirListEntry, psDirData, false) ) + { + iReadDirERR = EINVAL; + bConRead = false; + break; + } + assert(uDirIndexde_name)); + } + } + } while( bConRead && (iReadDirERR != UVFS_READDIR_EOF_REACHED) ); + + if ( puBuffer ) free( puBuffer ); + + if(iReadDirERR == UVFS_READDIR_EOF_REACHED) + iReadDirERR = 0; + + if ( psDirData ) + assert(uDirIndex==uDirEntries); + + return iReadDirERR; +} + +static int +ReadDirAttr( UVFSFileNode psNode, UVFSDirEntryAttr * psReadDirTestsData, uint32_t uDirEntries) +{ + uint32_t uBufferSize = 1000; + uint8_t* puBuffer = malloc(uBufferSize); + if ( puBuffer == NULL ) + { + return ENOMEM; + } + + uint64_t uCookie = 0; + uint64_t uVerifier = UVFS_DIRCOOKIE_VERIFIER_INITIAL; + bool bConRead = true; + size_t outLen = 0; + int iReadDirERR = 0; + uint32_t uDirIndex = 0; + UVFSDirEntryAttr* psReadDir = psReadDirTestsData; + + do { + + uint32_t uBufCurOffset = 0; + + memset(puBuffer, 0, uBufferSize); + + iReadDirERR = HFS_fsOps.fsops_readdirattr (psNode, puBuffer, uBufferSize, uCookie, &outLen, &uVerifier); + if ( (iReadDirERR != 0 && iReadDirERR != UVFS_READDIR_EOF_REACHED) || (outLen == 0) ) + { + bConRead = false; + } + else + { + //Go over all entries in the list and check if we got to the end of the directory + bool bEndOfDirectoryList = false; + + while ( !bEndOfDirectoryList && iReadDirERR != UVFS_READDIR_EOF_REACHED ) + { + UVFSDirEntryAttr* psNewDirListEntry = (UVFSDirEntryAttr*) &puBuffer[uBufCurOffset]; + uCookie = psNewDirListEntry->dea_nextcookie; + //We found all the files in the root directory + if ( ( psNewDirListEntry->dea_nextcookie == UVFS_DIRCOOKIE_EOF ) || ( psNewDirListEntry->dea_nextrec == 0 ) ) + { + bEndOfDirectoryList = true; + } + + printf("Found FileName = [%s], FileID = [%llu], FileSize = [%llu].\n", UVFS_DIRENTRYATTR_NAMEPTR(psNewDirListEntry), psNewDirListEntry->dea_attrs.fa_fileid, psNewDirListEntry->dea_attrs.fa_size); + + if (psReadDir != NULL) + { + printf("Expected FileName = [%s], FileType = [%d].\n", UVFS_DIRENTRYATTR_NAMEPTR(psReadDir), psReadDir->dea_attrs.fa_type); + + //TBD - When getAttr will work need to change the compare to getAttr vs ReadDirAttr + if ( !HFSTest_CompareReadDir((void*)psNewDirListEntry, psReadDir, true) ) + { + iReadDirERR = EINVAL; + bConRead = false; + break; + } + assert( uDirIndex < uDirEntries ); + uDirIndex++; + psReadDir = (UVFSDirEntryAttr*) ((void*) psReadDir + sizeof(UVFSDirEntryAttr) + MAX_UTF8_NAME_LENGTH); + } + + uBufCurOffset += UVFS_DIRENTRYATTR_RECLEN(psNewDirListEntry, strlen(UVFS_DIRENTRYATTR_NAMEPTR(psNewDirListEntry))); + } + } + } while( bConRead && (iReadDirERR != UVFS_READDIR_EOF_REACHED) ); + + if ( puBuffer ) + free( puBuffer ); + + if(iReadDirERR == UVFS_READDIR_EOF_REACHED) + iReadDirERR = 0; + + if (psReadDir != NULL) + assert( uDirIndex == uDirEntries ); + + return iReadDirERR; +} + +static int +GetAttrAndCompare(UVFSFileNode FileNode,UVFSFileAttributes* sInAttrs) +{ + int error =0; + UVFSFileAttributes sOutAttrs; + error = HFS_fsOps.fsops_getattr(FileNode, &sOutAttrs); + if (error) + { + printf("Failed in get attr with err [%d]\n",error); + return error; + } + if (sInAttrs->fa_validmask & UVFS_FA_VALID_SIZE) + if (sOutAttrs.fa_size != sInAttrs->fa_size) + goto fail; + + if (sInAttrs->fa_validmask & UVFS_FA_VALID_MODE) + if (sOutAttrs.fa_mode != sInAttrs->fa_mode) + goto fail; + + if (sInAttrs->fa_validmask & UVFS_FA_VALID_BSD_FLAGS) + if (sOutAttrs.fa_bsd_flags != sInAttrs->fa_bsd_flags) + goto fail; + + if (sInAttrs->fa_validmask & UVFS_FA_VALID_ATIME) + if (CMP_TIMES(sOutAttrs.fa_atime,sInAttrs->fa_atime)) + goto fail; + + if (sInAttrs->fa_validmask & UVFS_FA_VALID_MTIME) + if (CMP_TIMES(sOutAttrs.fa_atime,sInAttrs->fa_atime)) + goto fail; + + if (sInAttrs->fa_validmask & UVFS_FA_VALID_CTIME) + if (CMP_TIMES(sOutAttrs.fa_ctime, sInAttrs->fa_ctime)) + goto fail; + + if (sInAttrs->fa_validmask & UVFS_FA_VALID_BIRTHTIME) + if (CMP_TIMES(sOutAttrs.fa_birthtime, sInAttrs->fa_birthtime)) + goto fail; + + goto out; + +fail: + error = 1; +out: + if (error) printf("Failed in compare attr\n"); + return error; +} + +static int +SetAttrChangeSize(UVFSFileNode FileNode,uint64_t uNewSize) +{ + int error =0; + UVFSFileAttributes sInAttrs; + UVFSFileAttributes sOutAttrs; + memset(&sInAttrs,0,sizeof(UVFSFileAttributes)); + sInAttrs.fa_validmask |= UVFS_FA_VALID_SIZE; + sInAttrs.fa_size = uNewSize; + + error = HFS_fsOps.fsops_setattr( FileNode, &sInAttrs , &sOutAttrs ); + + error = GetAttrAndCompare(FileNode,&sInAttrs); + + return error; +} + +static int +SetAttrChangeMode(UVFSFileNode FileNode,uint32_t uNewMode) +{ + int error =0; + UVFSFileAttributes sInAttrs; + UVFSFileAttributes sOutAttrs; + memset(&sInAttrs,0,sizeof(UVFSFileAttributes)); + sInAttrs.fa_validmask |= UVFS_FA_VALID_MODE; + sInAttrs.fa_mode = uNewMode; + + error = HFS_fsOps.fsops_setattr( FileNode, &sInAttrs , &sOutAttrs ); + + return error; +} + +static int +SetAttrChangeUidGid(UVFSFileNode FileNode, uint32_t uNewUid, uint32_t uNewGid) +{ + int error =0; + UVFSFileAttributes sInAttrs; + UVFSFileAttributes sOutAttrs; + memset(&sInAttrs,0,sizeof(UVFSFileAttributes)); + sInAttrs.fa_validmask |= UVFS_FA_VALID_UID; + sInAttrs.fa_validmask |= UVFS_FA_VALID_GID; + sInAttrs.fa_uid = uNewUid; + sInAttrs.fa_gid = uNewGid; + + error = HFS_fsOps.fsops_setattr( FileNode, &sInAttrs , &sOutAttrs ); + + return error; +} + +static int +SetAttrChangeAtimeMtime(UVFSFileNode FileNode) +{ + int error =0; + UVFSFileAttributes sInAttrs; + UVFSFileAttributes sOutAttrs; + + error = HFS_fsOps.fsops_getattr(FileNode, &sOutAttrs); + if (error) + { + printf("Failed in get attr (1) with err [%d]\n",error); + return error; + } + + memset(&sInAttrs,0,sizeof(UVFSFileAttributes)); + sInAttrs.fa_validmask |= UVFS_FA_VALID_ATIME; + sInAttrs.fa_validmask |= UVFS_FA_VALID_MTIME; + sInAttrs.fa_atime.tv_sec = sOutAttrs.fa_atime.tv_sec + 90000000; + sInAttrs.fa_mtime.tv_sec = sOutAttrs.fa_mtime.tv_sec + 90000000; + + error = HFS_fsOps.fsops_setattr( FileNode, &sInAttrs , &sOutAttrs ); + + if (error) + { + printf("Failed to set attr to change atime and mtime err [%d]", error); + return error; + } + error = HFS_fsOps.fsops_getattr(FileNode, &sOutAttrs); + if (error) + { + printf("Failed in get attr (2) with err [%d]\n",error); + return error; + } + + if ( (sOutAttrs.fa_atime.tv_sec != sInAttrs.fa_atime.tv_sec) || (sOutAttrs.fa_mtime.tv_sec != sInAttrs.fa_mtime.tv_sec) ) + { + printf("Failed to update time!\n"); + error = 1; + } + return error; +} + +/*******************************************/ +/*******************************************/ +/*******************************************/ +// Predefined Tests START. +/*******************************************/ +/*******************************************/ +/*******************************************/ + +#define HFS_TEST_PREFIX "RUN_HFS_TESTS" +#define HFS_RUN_FSCK "RUN_FSCK" +#define HFS_DMGS_FOLDER "/Volumes/SSD_Shared/FS_DMGs/" +#define TEMP_DMG "/tmp/hfstester.dmg" +#define TEMP_DMG_SPARSE "/tmp/hfstester.dmg.sparseimage" +#define TEMP_DMG_BKUP "/tmp/hfstester_bkup.dmg" +#define TEMP_DMG_BKUP_SPARSE "/tmp/hfstester_bkup.dmg.sparseimage" +#define TEMP_DEV_PATH "/tmp/dev_path.txt" +#define TEMP_DEV_PATH2 "/tmp/dev_path2.txt" +#define TEMP_DEV_PATH3 "/tmp/dev_path3.txt" +#define CREATE_SPARSE_VOLUME "CREATE_SPARSE_VOLUME" +#define CREATE_HFS_DMG "CREATE_HFS_DMG" + +#define MAX_CMN_LEN (1024*2) + +typedef int (*test_hander_t)( UVFSFileNode RootNode ); + +char pcLastDevPathName[50] = {0}; +char pcDevPath[50] = {0}; +char pcDevNum[50] = {0}; +char gpcResultsFolder[256] = {0}; + +static int +HFSTest_PrepareEnv(TestData_S *psTestData ) +{ + int iErr = 0; + bool bMountedAlready = false; + char* pcCmd = malloc(MAX_CMN_LEN); + assert(pcCmd); + + // Remove old dmg if exist. + strcpy(pcCmd, "rm -rf "); + strcat(pcCmd, TEMP_DMG" "TEMP_DEV_PATH" "TEMP_DEV_PATH2" "TEMP_DMG_SPARSE); + #if HFS_CRASH_TEST + if (psTestData->eCrashID) { + strcat(pcCmd, " "TEMP_DMG_BKUP" "TEMP_DMG_BKUP_SPARSE); + } + #endif + printf("Execute %s:\n", pcCmd); + system(pcCmd); + + if (!strcmp(CREATE_SPARSE_VOLUME, psTestData->pcDMGPath)) { + // Create a spase volume + psTestData->bSparseImage = true; + strcpy(pcCmd, "hdiutil create -ov -size 20G -type SPARSE -layout NONE "); + strcat(pcCmd, TEMP_DMG); + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + strcpy(pcCmd, "hdiutil attach -nomount "); + strcat(pcCmd, TEMP_DMG_SPARSE" > "TEMP_DEV_PATH); + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + bMountedAlready = true; + + // Extract disk number (disk??) + strcpy(pcCmd, "cat "TEMP_DEV_PATH" | sed 's/\\/dev\\/disk\\([0-9]*\\)/\\1/' | awk '{print $1}' > "TEMP_DEV_PATH2); + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + FILE *psCat = fopen(TEMP_DEV_PATH2, "r"); + fgets(pcLastDevPathName, sizeof(pcLastDevPathName), psCat); + pclose(psCat); + pcLastDevPathName[strlen(pcLastDevPathName)-1] = '\0'; + + sprintf(pcCmd, "newfs_hfs -v SparsedVolume -J /dev/disk%s ", pcLastDevPathName); + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + strcpy(pcDevPath, "/dev/rdisk"); + strcat(pcDevPath, pcLastDevPathName); + printf("%s\n", pcDevPath); + pcDevNum[0] = '\0'; + + } else if (!strcmp(CREATE_HFS_DMG, psTestData->pcDMGPath)) { + // No dmg filename provided. Create one: + strcpy(pcCmd, "hdiutil create -size 20G -fs HFS+ -volname TwentyGigDmg "); + strcat(pcCmd, TEMP_DMG); + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + + } else if (psTestData->pcDMGPath[0] == '\0') { + // No dmg filename provided. Create one: + strcpy(pcCmd, "hdiutil create -size 20G -fs HFS+J -volname TwentyGigJournalDmg "); + strcat(pcCmd, TEMP_DMG); + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + } else { + // use dmg from provided path + psTestData->bSparseImage = (strstr(psTestData->pcDMGPath, ".sparseimage") != NULL); + // Copy dmg to tmp folder + strcpy(pcCmd, "cp "); + strcat(pcCmd, psTestData->pcDMGPath); + strcat(pcCmd, " "); + if (psTestData->bSparseImage) { + strcat(pcCmd, TEMP_DMG_SPARSE); + } else { + strcat(pcCmd, TEMP_DMG); + } + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + } + + if (!bMountedAlready) { + // Attach DMG. + strcpy(pcCmd, "hdiutil attach -nomount "); + if (psTestData->bSparseImage) { + strcat(pcCmd, TEMP_DMG_SPARSE); + } else { + strcat(pcCmd, TEMP_DMG); + } + strcat(pcCmd," > "); + strcat(pcCmd, TEMP_DEV_PATH); + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + + // Do we have multiple partitions? + strcpy(pcCmd, "cat "TEMP_DEV_PATH" | grep Apple_HFS > "TEMP_DEV_PATH2); + printf("Execute %s:\n", pcCmd); + int iSinglePartition = system( pcCmd ); + + if (iSinglePartition) { + // Extract disk number (disk??) + strcpy(pcCmd, "cat "TEMP_DEV_PATH" | sed 's/\\/dev\\/disk\\([0-9]*\\)/\\1/' | awk '{print $1}' > "TEMP_DEV_PATH2); + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + FILE *psCat = fopen(TEMP_DEV_PATH2, "r"); + fgets(pcLastDevPathName, sizeof(pcLastDevPathName), psCat); + pclose(psCat); + pcLastDevPathName[strlen(pcLastDevPathName)-1] = '\0'; + + // Generate the full path + pcDevNum[0] = '\0'; + strcpy(pcDevPath, "/dev/rdisk"); + strcat(pcDevPath, pcLastDevPathName); + printf("%s\n", pcDevPath); + + } else { // Multilpe partitions + // Extract disk number (disk??) + strcpy(pcCmd, "cat "TEMP_DEV_PATH" | grep Apple_HFS | sed 's/\\/dev\\/disk\\([0-9]*\\)s[0-9]*/\\1/' | awk '{print $1}' > "TEMP_DEV_PATH2); + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + FILE *psCat = fopen(TEMP_DEV_PATH2, "r"); + fgets(pcLastDevPathName, sizeof(pcLastDevPathName), psCat); + pclose(psCat); + pcLastDevPathName[strlen(pcLastDevPathName)-1] = '\0'; + + // Extract s number (s??) + strcpy(pcCmd, "cat "TEMP_DEV_PATH" | grep Apple_HFS | sed 's/\\/dev\\/disk[0-9]*s\\([0-9]*\\)/\\1/' | awk '{print $1}' > "TEMP_DEV_PATH3); + printf("Execute %s:\n", pcCmd); + iErr = system( pcCmd ); + if ( iErr != 0 ) + { + exit(-1); + } + psCat = fopen(TEMP_DEV_PATH3, "r"); + fgets(pcDevNum, sizeof(pcDevNum), psCat); + pclose(psCat); + pcDevNum[strlen(pcDevNum)-1] = '\0'; + + // Generate the full path + strcpy(pcDevPath, "/dev/rdisk"); + strcat(pcDevPath, pcLastDevPathName); + strcat(pcDevPath, "s"); + strcat(pcDevPath, pcDevNum); + printf("%s\n", pcDevPath); + } + + } + + // Open file. + printf("pcDevPath is %s\n", pcDevPath); + int iFD = open( pcDevPath, O_RDWR ); + if ( iFD < 0 ) + { + printf("Failed to open %s\n", pcDevPath); + exit(EBADF); + } + + free(pcCmd); + + return iFD; +} + +static void +HFSTest_DestroyEnv(__unused int iFD ) +{ + int iErr = 0; + char* pcCmd = malloc(MAX_CMN_LEN); + assert(pcCmd); + + // Detach DMG. + memset(pcCmd, 0, MAX_CMN_LEN); + strcat(pcCmd, "hdiutil detach /dev/disk"); + strcat(pcCmd, pcLastDevPathName); + printf("Execute %s:\n", pcCmd); + iErr = system(pcCmd); + if ( iErr != 0 ) + { + exit(-1); + } + + // Remove old dmg if exist. + memset(pcCmd, 0, MAX_CMN_LEN); + strcat(pcCmd, "rm -rf "); + strcat(pcCmd, TEMP_DEV_PATH); + strcat(pcCmd, " "); + strcat(pcCmd, TEMP_DEV_PATH2); + printf("Execute %s:\n", pcCmd); + system(pcCmd); + + free(pcCmd); +} + +#if HFS_CRASH_TEST +void HFSTest_ClearCrashAbortFunctionArray(void) { + for(unsigned u=0; u /tmp/tmp.txt "); + printf("Execute %s:\n", pcMountCmd); + iErr = system(pcMountCmd); + printf("*** %s returned %d\n", pcMountCmd,iErr); + if (iErr) + return(iErr); + + char pcCatCmd[512] = {0}; + strcat( pcCatCmd, "cat /tmp/tmp.txt | grep '\\/dev\\/disk[0-9]*' | tr -d '\\t' | sed 's///' | sed 's/<\\/string>//' > /tmp/dev.txt"); + printf("Execute %s:\n", pcCatCmd); + iErr = system(pcCatCmd); + printf("returned %d\n", iErr); + if (iErr) + return(iErr); + + psMntd->pcMountedDev[0] = '\0'; + FILE *psCat = fopen("/tmp/dev.txt", "r"); + fscanf(psCat, "%s", psMntd->pcMountedDev); + fclose(psCat); + printf("pcMountedDev is %s\n", psMntd->pcMountedDev); + + strcpy( pcCatCmd, "cat /tmp/tmp.txt | grep '\\/Volumes' | tr -d '\\t' | sed 's///' | sed 's/<\\/string>//' > /tmp/vol.txt"); + printf("Execute %s:\n", pcCatCmd); + iErr = system(pcCatCmd); + printf("returned %d\n", iErr); + if (iErr) + return(iErr); + + psCat = fopen("/tmp/vol.txt", "r"); + strcpy(psMntd->pcMountedVol, "\""); + fscanf(psCat, "%[^\n]", &psMntd->pcMountedVol[1]); + strcat(psMntd->pcMountedVol, "\""); + fclose(psCat); + printf("pcMountedVol is %s\n", psMntd->pcMountedVol); + + return(iErr); +} + +int HFSTest_KextFindAll(MountedDrive_S *psMntd, char *pcSearchFile) { + int iErr = 0; + + char pcFindCmd[512] = {0}; + sprintf(pcFindCmd, "find %s | tee /tmp/file-list.txt", psMntd->pcMountedVol); + printf("Execute %s :\n", pcFindCmd); + iErr = system(pcFindCmd); + printf("returned %d\n", iErr); + if (iErr) + return(iErr); + + sprintf(pcFindCmd, "cat /tmp/file-list.txt | grep %s", pcSearchFile); + printf("Execute %s :\n", pcFindCmd); + iErr = system(pcFindCmd); + printf("returned %d\n", iErr); + if (iErr) + return(iErr); + + return(iErr); +} + +int HFSTest_KextCount(MountedDrive_S *psMntd, char *pcSearchFile, uint32_t *puCount) { + int iErr = 0; + + char pcFindCmd[512] = {0}; + sprintf(pcFindCmd, "find %s | tee /tmp/file-list.txt", psMntd->pcMountedVol); + printf("Execute %s :\n", pcFindCmd); + iErr = system(pcFindCmd); + printf("returned %d\n", iErr); + if (iErr) + return(iErr); + + sprintf(pcFindCmd, "cat /tmp/file-list.txt | grep %s | wc -l > /tmp/word-count.txt", pcSearchFile); + printf("Execute %s :\n", pcFindCmd); + iErr = system(pcFindCmd); + printf("returned %d\n", iErr); + + FILE *psCat = fopen("/tmp/word-count.txt", "r"); + fscanf(psCat, "%u", puCount); + fclose(psCat); + + return(iErr); +} + +int HFSTest_KextUnMount(MountedDrive_S *psMntd) { + int iErr = 0; + + char pcUnMountCmd[512] = {0}; + strcat( pcUnMountCmd, "hdiutil detach "); + strcat( pcUnMountCmd, psMntd->pcMountedDev ); + printf("Execute %s:\n", pcUnMountCmd); + iErr = system(pcUnMountCmd); + printf("returned %d\n", iErr); + + return(iErr); +} + +int HFSTest_RunFsck(void) { + int iErr = 0; + char pcFsckCmd[512] = {0}; + + strcat( pcFsckCmd, "/System/Library/Filesystems/hfs.fs/Contents/Resources/fsck_hfs -fd -D 0x22 /dev/disk"); + strcat( pcFsckCmd, pcLastDevPathName ); + + if (pcDevNum[0] != '\0') { + strcat( pcFsckCmd, "s" ); + strcat( pcFsckCmd, pcDevNum); + } + printf("Execute %s:\n", pcFsckCmd); + + iErr = system( pcFsckCmd ); + + if (iErr) { + printf( "*** Fsck CMD failed! (%d) \n", iErr); + } else { + printf( "*** Fsck CMD succeeded!\n"); + } + + return(iErr); +} + +int HFSTest_RestartEnv(__unused int iFD) { + // Restart FS for following tests + HFS_fsOps.fsops_fini(); + + int iErr = HFS_fsOps.fsops_init(); + printf("Init err [%d]\n", iErr); + if (iErr) { + printf("Can't re-init (%d).\n", iErr); + } + return(iErr); +} + +int HFSTest_FailTestOnCrashAbort(__unused void *psTestData, CrashAbort_E eAbort, int iFD, UVFSFileNode psNode, __unused pthread_t bSyncerThread) { + + printf("**** HFSTest_FailTestOnCrashAbort: eAbort (%u) \"%s\", iFD %d, psNode %p ****\n", eAbort, ppcCrashAbortDesc[eAbort], iFD, psNode); + + if (eAbort != CRASH_ABORT_NONE) { + panic("We should never get here!\n"); + return(-1); + } + + close(iFD); + // Seek & destroy + HFSTest_DestroyEnv( iFD ); + + return(0); +} + +static int HFSTest_ConfirmTestFolderExists(UVFSFileNode RootNode ) { + bool bFound; + + printf("HFSTest_ConfirmTestFolderExists:\n"); + bFound = false; + char pcFolderName[256]; + for(unsigned u=0; u<5; u++) { + sprintf(pcFolderName, "TestFolder_%u", u); + read_directory_and_search_for_name( RootNode, pcFolderName, &bFound, NULL, 0 ); + if (!bFound) { + printf("Error: Can not find replayed dir! (%s)\n", pcFolderName); + return -1; + } else { + printf("dir %s found after journal replay.\n", pcFolderName); + } + } + + return 0; +} + +static int HFSTest_ConfirmTestFolderDoesntExists(UVFSFileNode RootNode ) { + bool bFound; + + printf("HFSTest_ConfirmTestFolderExists:\n"); + bFound = false; + read_directory_and_search_for_name( RootNode, "TestFolder", &bFound, NULL, 0 ); + if (bFound) { + printf("dir \"TestFolder\" found.\n"); + return -1; + } + + printf("As expected, \"TestFolder\" was not found.\n"); + + return 0; +} + +int HFSTest_ValidateImageOnMac(__unused TestData_S *psTestData, char *pcDmgFilename, char *pcSearchItem, bool bFindNotFind) { + int iErr = 0; + MountedDrive_S sMntd; + + // Validate image with fsck + Kext mount + // attach -nomount + iErr = HFSTest_KextMount(pcDmgFilename, &sMntd, false); + if (iErr) { + printf("Can't HFSTest_KextMount(false).\n"); + goto exit; + } + + // Run FSCK + iErr = HFSTest_RunFsck(); + if (iErr) { + printf("Can't HFSTest_RunFsck.\n"); + goto exit; + } + + // detach + iErr = HFSTest_KextUnMount(&sMntd); + if (iErr) { + printf("Can't HFSTest_KextUnMount.\n"); + goto exit; + } + + // Validate that we can mount + iErr = HFSTest_KextMount(pcDmgFilename, &sMntd, true); + if (iErr) { + printf("Can't HFSTest_KextMount(true).\n"); + goto exit; + } + + if (pcSearchItem) { + char pcSearchPath[512] = {0}; + strcpy(pcSearchPath, sMntd.pcMountedVol); + strcat(pcSearchPath, pcSearchItem); + printf("pcSearchPath is %s\n", pcSearchPath); + + iErr = HFSTest_KextFindAll(&sMntd, pcSearchPath); + printf("grep returned %d.\n", iErr); + if (bFindNotFind == false) { + // Make sure string was not found on drive + if (iErr == 256) { + iErr = 0; + } else { + iErr = 1; + } + } + if (iErr) { + goto exit; + } + + // Count SymLinks + uint32_t uNumOfSymLinks = 0; + strcpy(pcSearchPath, "TestSymLink_thread"); + iErr = HFSTest_KextCount(&sMntd, pcSearchPath, &uNumOfSymLinks); + printf("*** found %u SymLinks\n", uNumOfSymLinks); + + // Count Files + uint32_t uNumOfFiles = 0; + strcpy(pcSearchPath, "file_Thread_"); + iErr = HFSTest_KextCount(&sMntd, pcSearchPath, &uNumOfFiles); + printf("*** found %u files\n", uNumOfFiles); + + } + + iErr = HFSTest_KextUnMount(&sMntd); + if (iErr) { + goto exit; + } + +exit: + return(iErr); +} + +int HFSTest_SaveDMG(void *pvTestData, CrashAbort_E eAbort, int iFD, UVFSFileNode psNode, __unused pthread_t pSyncerThread) { + int iErr = 0; + TestData_S *psTestData = pvTestData; + + printf("**** HFSTest_SaveDMG: eAbort (%u) \"%s\", psNode %p ****\n", eAbort, ppcCrashAbortDesc[eAbort], psNode); + + close(iFD); + // Seek & destroy + HFSTest_DestroyEnv( iFD ); + + // Create a snapshot of the dmg + char pcDmgFilename[256]; + strcpy(pcDmgFilename, psTestData->bSparseImage?TEMP_DMG_SPARSE:TEMP_DMG); + + char pcCpCmd[512] = {0}; + strcat( pcCpCmd, "cp "); + strcat( pcCpCmd, pcDmgFilename); + if (psTestData->bSparseImage) { + strcat( pcCpCmd, " "TEMP_DMG_BKUP_SPARSE); + } else { + strcat( pcCpCmd, " "TEMP_DMG_BKUP); + } + printf("Execute %s:\n", pcCpCmd); + iErr = system(pcCpCmd); + printf("returned %d\n", iErr); + if (iErr) { + goto exit; + } +exit: + return(iErr); +} + +int HFSTest_CrashAbortAtRandom(void *pvTestData, CrashAbort_E eAbort, int iFD, UVFSFileNode psNode, __unused pthread_t pSyncerThread) { + int iErr = 0; + TestData_S *psTestData = pvTestData; + + printf("**** HFSTest_CrashAbortAtRandom: eAbort (%u) \"%s\", psNode %p ****\n", eAbort, ppcCrashAbortDesc[eAbort], psNode); + + close(iFD); + // Seek & destroy + HFSTest_DestroyEnv( iFD ); + + // Create a snapshot of the crashed dmg + char pcDmgFilename[256]; + strcpy(pcDmgFilename, psTestData->bSparseImage?TEMP_DMG_SPARSE:TEMP_DMG); + + char pcCpCmd[512] = {0}; + strcat( pcCpCmd, "cp "); + strcat( pcCpCmd, pcDmgFilename); + if (psTestData->bSparseImage) { + strcat( pcCpCmd, " "TEMP_DMG_BKUP_SPARSE); + } else { + strcat( pcCpCmd, " "TEMP_DMG_BKUP); + } + printf("Execute %s:\n", pcCpCmd); + iErr = system(pcCpCmd); + printf("returned %d\n", iErr); + if (iErr) { + goto exit; + } + + bool bFindNotFind = true; + if (eAbort == CRASH_ABORT_JOURNAL_AFTER_JOURNAL_HEADER) { + bFindNotFind = true; + } else { + bFindNotFind = false; + } + + iErr = HFSTest_ValidateImageOnMac(psTestData, pcDmgFilename, "/TestFolder", bFindNotFind); + if (iErr) { + goto exit; + } + +exit: + return(iErr); +} + +int HFSTest_CrashAbortOnMkDir(void *pvTestData, CrashAbort_E eAbort, int iFD, UVFSFileNode psNode, __unused pthread_t pSyncerThread) { + int iErr = 0; + TestData_S *psTestData = pvTestData; + + printf("**** HFSTest_CrashAbortOnMkDir: eAbort (%u) \"%s\", psNode %p ****\n", eAbort, ppcCrashAbortDesc[eAbort], psNode); + + close(iFD); + // Seek & destroy + HFSTest_DestroyEnv( iFD ); + + // Create a snapshot of the crashed dmg + char pcDmgFilename[256]; + strcpy(pcDmgFilename, psTestData->bSparseImage?TEMP_DMG_SPARSE:TEMP_DMG); + + char pcCpCmd[512] = {0}; + strcat( pcCpCmd, "cp "); + strcat( pcCpCmd, pcDmgFilename); + if (psTestData->bSparseImage) { + strcat( pcCpCmd, " "TEMP_DMG_BKUP_SPARSE); + } else { + strcat( pcCpCmd, " "TEMP_DMG_BKUP); + } + printf("Execute %s:\n", pcCpCmd); + iErr = system(pcCpCmd); + printf("returned %d\n", iErr); + if (iErr) { + goto exit; + } + + bool bFindNotFind = true; + if (eAbort == CRASH_ABORT_JOURNAL_AFTER_JOURNAL_HEADER) { + printf("CRASH_ABORT_JOURNAL_AFTER_JOURNAL_HEADER, expecting to find!\n"); + bFindNotFind = true; + } else { + bFindNotFind = false; + } + + iErr = HFSTest_ValidateImageOnMac(psTestData, pcDmgFilename, "/TestFolder", bFindNotFind); + if (iErr) { + goto exit; + } + +exit: + printf("HFSTest_CrashAbortOnMkDir returns %d\n", iErr); + return(iErr); +} + +int HFSTest_CrashAbort(void *pvTestData, CrashAbort_E eAbort, int iFD, UVFSFileNode psNode, __unused pthread_t pSyncerThread) { + int iErr = 0; + TestData_S *psTestData = pvTestData; + + printf("**** HFSTest_CrashAbort: eAbort (%u) \"%s\", psNode %p ****\n", eAbort, ppcCrashAbortDesc[eAbort], psNode); + + close(iFD); + // Seek & destroy + HFSTest_DestroyEnv( iFD ); + + // Create a snapshot of the crashed dmg + char pcDmgFilename[256]; + strcpy(pcDmgFilename, psTestData->bSparseImage?TEMP_DMG_SPARSE:TEMP_DMG); + + char pcCpCmd[512] = {0}; + strcat( pcCpCmd, "cp "); + strcat( pcCpCmd, pcDmgFilename); + if (psTestData->bSparseImage) { + strcat( pcCpCmd, " "TEMP_DMG_BKUP_SPARSE); + } else { + strcat( pcCpCmd, " "TEMP_DMG_BKUP); + } + printf("Execute %s:\n", pcCpCmd); + iErr = system(pcCpCmd); + printf("returned %d\n", iErr); + if (iErr) { + goto exit; + } + + iErr = HFSTest_ValidateImageOnMac(psTestData, pcDmgFilename, NULL, false); + if (iErr) { + goto exit; + } + +exit: + return(iErr); +} +#endif + +static int +HFSTest_ScanID( UVFSFileNode RootNode ) +{ + int iErr = 0; + printf("HFSTest_ScanID\n"); + __block uint64_t uScanIDFileIDArray; + __block char* pcScanIDPath = malloc(sizeof(char)* MAX_UTF8_NAME_LENGTH); + __block char* pcTempPath; + __block int iFoundRoot = 0; + + //Creating the following path + // /TestFolder/TestFolder2/TestFolder3/TestFolder4/TestFolder5/file.txt + UVFSFileNode TestFolder = NULL; + UVFSFileNode TestFolder2 = NULL; + UVFSFileNode TestFolder3 = NULL; + UVFSFileNode TestFolder4 = NULL; + UVFSFileNode TestFolder5 = NULL; + UVFSFileNode TestFile1 = NULL; + + iErr = CreateNewFolder( RootNode, &TestFolder, "TestFolder"); + printf("CreateNewFolder TestFolder err [%d]\n", iErr); + if (iErr) goto exit; + + iErr = CreateNewFolder( TestFolder, &TestFolder2, "TestFolder2"); + printf("CreateNewFolder TestFolder2 err [%d]\n", iErr); + if (iErr) goto exit; + + iErr = CreateNewFolder( TestFolder2, &TestFolder3, "TestFolder3"); + printf("CreateNewFolder TestFolder3 err [%d]\n", iErr); + if (iErr) goto exit; + + iErr = CreateNewFolder( TestFolder3, &TestFolder4, "TestFolder4"); + printf("CreateNewFolder TestFolder4 err [%d]\n", iErr); + if (iErr) goto exit; + + iErr = CreateNewFolder( TestFolder4, &TestFolder5, "TestFolder5"); + printf("CreateNewFolder TestFolder5 err [%d]\n", iErr); + if (iErr) goto exit; + + //Create new file with size 0 + iErr = CreateNewFile(TestFolder5, &TestFile1, "file.txt",512); + printf("Create file.txt in TestFolder5 err [%d]\n", iErr); + if (iErr) goto exit; + + LIFileAttributes_t FileAttr; + iErr = HFS_fsOps.fsops_getattr( TestFile1, &FileAttr); + if (iErr) goto exit; + + HFS_fsOps.fsops_reclaim(TestFile1); + + memset(pcScanIDPath, 0, MAX_UTF8_NAME_LENGTH); + uScanIDFileIDArray = FileAttr.fa_fileid; + + do + { + iErr = HFS_fsOps.fsops_scanids(RootNode, 0, &uScanIDFileIDArray, 1, + ^(__unused unsigned int fileid_index, const UVFSFileAttributes *file_attrs, const char *file_name) { + iFoundRoot = (file_attrs->fa_parentid == file_attrs->fa_fileid); + uScanIDFileIDArray = file_attrs->fa_parentid; + size_t uTmpPathSize = strlen(pcScanIDPath) + 1; + pcTempPath = malloc(uTmpPathSize); + strlcpy(pcTempPath, pcScanIDPath, uTmpPathSize); + strlcpy(pcScanIDPath, file_name, MAX_UTF8_NAME_LENGTH); + + if (uTmpPathSize != 1) { //For the first time we don't want to set / + strcat(pcScanIDPath,"/"); + strcat(pcScanIDPath,pcTempPath); + } + + free(pcTempPath); + }); + printf("HFS_fsOps.fsops_scanids err [%d]\n", iErr); + if (iErr) goto exit; + } while (!iFoundRoot); + + if (strcmp(pcScanIDPath,"/TestFolder/TestFolder2/TestFolder3/TestFolder4/TestFolder5/file.txt")) + { + iErr = EFAULT; + printf("Found path to file [%s]\n", pcScanIDPath); + } + + // ********************* Add Hard Links: ************************ + uint32_t uOriginalFileSize = 500000; + UVFSFileNode psFile = NULL; + size_t iActuallyWrite = 0; + size_t iActuallyRead = 0; + void* pvOutBuf = malloc(uOriginalFileSize); + void* pvInBuf = malloc(uOriginalFileSize); + + if (pvOutBuf == NULL || pvInBuf == NULL) { + printf("ERROR: HFSTest_ScanID: can't malloc (%p, %p)\n", pvOutBuf, pvInBuf); + iErr = -1; + goto exit; + } + + uint64_t* puOutBuf = pvOutBuf; + uint64_t* puInBuf = pvInBuf; + + // Create the original file with size 500,000 Bytes + iErr = CreateNewFile( RootNode, &psFile, "original_file.txt", uOriginalFileSize); + if (iErr) { + printf("ERROR: CreateNewFile return %d\n", iErr); + iErr = -1; + goto exit; + } + + // lets write 10,000 Bytes with 0xCD + memset(pvOutBuf, 0, uOriginalFileSize); + memset(pvInBuf, 0, uOriginalFileSize); + + memset(pvOutBuf, 0xCD, uOriginalFileSize); + + iErr = HFS_fsOps.fsops_write( psFile, 0, uOriginalFileSize, pvOutBuf, &iActuallyWrite ); + if (iErr) { + printf("ERROR: fsops_write return %d\n", iErr); + goto exit; + } + + iErr = HFS_fsOps.fsops_read( psFile, 0, uOriginalFileSize, pvInBuf, &iActuallyRead ); + if (iErr) { + printf("ERROR: fsops_read return %d\n", iErr); + goto exit; + } + + // Lets test it... + for ( uint64_t uIdx=0; uIdx<(uOriginalFileSize/sizeof(uint64_t)); uIdx++ ) { + if (puInBuf[uIdx] != puOutBuf[uIdx] ) { + printf("ERROR: puInBuf[uIdx] != puOutBuf[uIdx]\n"); + iErr = -1; + goto exit; + } + } + + UVFSFileNode psDirectory = NULL; + iErr = CreateNewFolder(RootNode,&psDirectory,"dir"); + if (iErr) { + printf("ERROR: CreateNewFolder return %d\n", iErr); + goto exit; + } + iErr = CreateHardLink(psFile,RootNode,"first_link.txt"); + if (iErr) { + printf("ERROR: CreateHardLink return %d\n", iErr); + goto exit; + } + iErr = CreateHardLink(psFile,psDirectory,"second_link.txt"); + if (iErr) { + printf("ERROR: CreateHardLink return %d\n", iErr); + goto exit; + } + + UVFSFileNode psFirstLink = NULL; + iErr = HFS_fsOps.fsops_lookup( psDirectory, "second_link.txt", &psFirstLink ); + if (iErr) goto exit; + + iErr = HFS_fsOps.fsops_getattr( psFirstLink, &FileAttr); + if (iErr) goto exit; + + uScanIDFileIDArray = FileAttr.fa_fileid; + memset(pcScanIDPath, 0, MAX_UTF8_NAME_LENGTH); + + HFS_fsOps.fsops_reclaim( psFile ); + HFS_fsOps.fsops_reclaim( psFirstLink ); + HFS_fsOps.fsops_reclaim( psDirectory ); + + // Now run SanID on hardlinks: + do { + iErr = HFS_fsOps.fsops_scanids(RootNode, 0, &uScanIDFileIDArray, 1, + ^(__unused unsigned int fileid_index, const UVFSFileAttributes *file_attrs, const char *file_name) { + iFoundRoot = (file_attrs->fa_parentid == file_attrs->fa_fileid); + uScanIDFileIDArray = file_attrs->fa_parentid; + size_t uTmpPathSize = strlen(pcScanIDPath) + 1; + pcTempPath = malloc(uTmpPathSize); + strlcpy(pcTempPath, pcScanIDPath, uTmpPathSize); + strlcpy(pcScanIDPath, file_name, MAX_UTF8_NAME_LENGTH); + + if (uTmpPathSize != 1) { //For the first time we don't want to set / + strcat(pcScanIDPath,"/"); + strcat(pcScanIDPath,pcTempPath); + } + + free(pcTempPath); + }); + printf("HFS_fsOps.fsops_scanids err [%d]\n", iErr); + if (iErr) goto exit; + } while (!iFoundRoot); + + if (strcmp(pcScanIDPath,"/original_file.txt")) + { + iErr = EFAULT; + printf("Found path to file [%s]\n", pcScanIDPath); + } + +exit: + free(pcScanIDPath); + + if (TestFile1) { + RemoveFile(TestFolder5,"file.txt"); + } + if (TestFolder5) { + HFS_fsOps.fsops_reclaim(TestFolder5); + RemoveFolder(TestFolder4,"TestFolder5"); + } + if (TestFolder4) { + HFS_fsOps.fsops_reclaim(TestFolder4); + RemoveFolder(TestFolder3,"TestFolder4"); + } + if (TestFolder3) { + HFS_fsOps.fsops_reclaim(TestFolder3); + RemoveFolder(TestFolder2,"TestFolder3"); + } + if (TestFolder2) { + HFS_fsOps.fsops_reclaim(TestFolder2); + RemoveFolder(TestFolder,"TestFolder2"); + } + if (TestFolder) { + HFS_fsOps.fsops_reclaim(TestFolder); + RemoveFolder(RootNode,"TestFolder"); + } + + return iErr; +} + +static int +HFSTest_Create( UVFSFileNode RootNode ) +{ + int iErr = 0; + printf("HFSTest_Create\n"); + + UVFSFileNode TestFolder = NULL; + iErr = CreateNewFolder( RootNode, &TestFolder, "TestFolder"); + printf("CreateNewFolder err [%d]\n", iErr); + if (iErr) + return iErr; + + //Create new file with size 0 + UVFSFileNode TestFile1 = NULL; + CreateNewFile(TestFolder, &TestFile1, "TestFile",0); + printf("Create TestFile in TestFolder err [%d]\n", iErr); + if (iErr) + return iErr; + + //Create new file with size 512 + UVFSFileNode TestFile2 = NULL; + CreateNewFile(TestFolder, &TestFile2, "TestFile2",512); + printf("Create TestFile2 in TestFolder err [%d]\n", iErr); + if (iErr) + return iErr; + + uint32_t uEntrySize = sizeof(UVFSDirEntryAttr) + MAX_UTF8_NAME_LENGTH; + UVFSDirEntryAttr *psReadDirTestsData = malloc(2*uEntrySize); + if (psReadDirTestsData == NULL) + return ENOMEM; + + UVFSDirEntryAttr *psCurrentReadDirTestsData = psReadDirTestsData; + SetExpectedAttr("TestFile", UVFS_FA_TYPE_FILE, psCurrentReadDirTestsData); + iErr = HFS_fsOps.fsops_getattr( TestFile1, &psCurrentReadDirTestsData->dea_attrs ); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr("TestFile2", UVFS_FA_TYPE_FILE, psCurrentReadDirTestsData); + iErr = HFS_fsOps.fsops_getattr( TestFile2, &psCurrentReadDirTestsData->dea_attrs ); + +// { +// {.pcTestName = "TestFile", .uTyppe = UVFS_FA_TYPE_FILE, .uSize = 0, .uNlink = 1, .uAllocatedSize = 0}, +// {.pcTestName = "TestFile2", .uTyppe = UVFS_FA_TYPE_FILE, .uSize = 512, .uNlink = 1, .uAllocatedSize = 4096}, +// }; + + iErr = ReadDirAttr(TestFolder, psReadDirTestsData, 2); + free(psReadDirTestsData); + printf("ReadDirAttr err [%d]\n", iErr); + if (iErr) + goto exit; + + // Remove File1 + iErr = RemoveFile(TestFolder,"TestFile"); + printf("Remove File TestFile from TestFolder err [%d]\n", iErr); + if (iErr) + goto exit; + + // Remove File2 + iErr = RemoveFile(TestFolder,"TestFile2"); + printf("Remove File TestFile2 from TestFolder err [%d]\n", iErr); + if (iErr) + goto exit; + + // Remove TestFolder + iErr = RemoveFolder(RootNode,"TestFolder"); + printf("Remove Folder TestFolder from Root err [%d]\n", iErr); + if (iErr) + goto exit; + +exit: + HFS_fsOps.fsops_reclaim(TestFolder); + HFS_fsOps.fsops_reclaim(TestFile1); + HFS_fsOps.fsops_reclaim(TestFile2); + return iErr; +} + +// This function tests the system behavior when deleting a very large defragmented file, +// which will cause the creation of a very large journal transaction and lots of BT buffers +static int HFSTest_DeleteAHugeDefragmentedFile_wJournal(UVFSFileNode RootNode) { + #define DHF_NUM_OF_FILES_TO_CREATE 1000 + #define DHF_HUGE_FILE_SIZE (15000000000ULL) // 15GBytes + #define DHF_SMALL_FILENAME "SmallFile" + #define DHF_HUGE_FILENAME "HugeFile" + + int iErr = 0; + + // Create many small files + char pcName[100] = {0}; + UVFSFileNode psNode; + for ( int i=0; ipsRootNode; + char pcName[100] = {0}; + char *pcFilenameFormat = "file_Thread_%u_OpCnt_%u_FileNum_%u_Len_%u.txt"; + + for(uint32_t uOpCnt=0; uOpCntuNumOfFiles; uNumOfFiles++) { + + UVFSFileNode psNode; + uint64_t uLen = psThrdData->uFileSize * uNumOfFiles; + sprintf(pcName, pcFilenameFormat, psThrdData->uThreadNum, uOpCnt, uNumOfFiles, uLen); + printf("Creating file %s\n", pcName); + iErr = CreateNewFile(psRootNode, &psNode, pcName, uLen); + if (iErr) { + printf("Failed creating file %s with iErr %d.\n", pcName,iErr); + goto exit; + } + HFS_fsOps.fsops_reclaim(psNode); + } + + // Create SymLinks + UVFSFileNode *pOutNode = malloc(sizeof(UVFSFileNode) * psThrdData->uNumOfSymLinks); + uint32_t uSymLinkMode = UVFS_FA_MODE_USR(UVFS_FA_MODE_RWX) | UVFS_FA_MODE_GRP(UVFS_FA_MODE_R) | UVFS_FA_MODE_OTH(UVFS_FA_MODE_R | UVFS_FA_MODE_X); + for(uint32_t uNumOfSymLinks=0; uNumOfSymLinksuNumOfSymLinks; uNumOfSymLinks++) { + + // Create Symlink + char pcSymLinkName[256] = {0}; + sprintf(pcSymLinkName, "TestSymLink_thread_%u_op_%u_symlink_%u", psThrdData->uThreadNum, uOpCnt, uNumOfSymLinks); + uint32_t uSymLinkSize = psThrdData->uSymLinkSize; + char *pcSymLinkContent = malloc(uSymLinkSize); + memset(pcSymLinkContent, 0, uSymLinkSize); + uint32_t uStampLen = sprintf(pcSymLinkContent, "/just/to/check/that/symlink/works/properly/thread/%u/op/%u/symlink_%u",psThrdData->uThreadNum, uOpCnt, uNumOfSymLinks); + assert(uSymLinkSize >= uStampLen); + for(uint32_t uStampCount=1; uSymLinkSize>(uStampCount+1)*uStampLen+1; uStampCount++) { + memcpy(pcSymLinkContent + uStampCount*uStampLen, pcSymLinkContent, uStampLen); + } + assert(strlen(pcSymLinkContent) + 1 <= uSymLinkSize); + + UVFSFileAttributes sAttr = {0}; + sAttr.fa_validmask = UVFS_FA_VALID_MODE; + sAttr.fa_type = UVFS_FA_TYPE_SYMLINK; + sAttr.fa_mode = uSymLinkMode; + + pOutNode[uNumOfSymLinks] = NULL; + printf("Creating Symlink %s\n", pcSymLinkName); + iErr = HFS_fsOps.fsops_symlink(psRootNode, pcSymLinkName, pcSymLinkContent, &sAttr, &pOutNode[uNumOfSymLinks] ); + if ( iErr != 0 ) { + printf( "fsops_symlink failed with eror code : %d\n", iErr ); + goto exit; + } + free(pcSymLinkContent); + } + + // Verify Symlink content + for(uint32_t uNumOfSymLinks=0; uNumOfSymLinksuNumOfSymLinks; uNumOfSymLinks++) { + + uint32_t uSymLinkSize = psThrdData->uSymLinkSize; + char pcSymLinkName[256] = {0}; + sprintf(pcSymLinkName, "TestSymLink_thread_%u_op_%u_symlink_%u", psThrdData->uThreadNum, uOpCnt, uNumOfSymLinks); + char *pcSymLinkReadContent = malloc(uSymLinkSize); + memset(pcSymLinkReadContent, 0, uSymLinkSize); + size_t iActuallyRead; + + UVFSFileAttributes sOutAttr = {0}; + + printf("Reading Symlink %s\n", pcSymLinkName); + iErr = HFS_fsOps.fsops_readlink( pOutNode[uNumOfSymLinks], pcSymLinkReadContent, uSymLinkSize, &iActuallyRead, &sOutAttr ); + + if ( iErr != 0 ) { + printf( "fsops_readlink failed with eror code : %d\n", iErr ); + goto exit; + } + + char *pcSymLinkContent = malloc(uSymLinkSize); + memset(pcSymLinkContent, 0, uSymLinkSize); + uint32_t uStampLen = sprintf(pcSymLinkContent, "/just/to/check/that/symlink/works/properly/thread/%u/op/%u/symlink_%u",psThrdData->uThreadNum, uOpCnt, uNumOfSymLinks); + assert(uSymLinkSize >= uStampLen); + for(uint32_t uStampCount=1; uSymLinkSize>(uStampCount+1)*uStampLen+1; uStampCount++) { + memcpy(pcSymLinkContent + uStampCount*uStampLen, pcSymLinkContent, uStampLen); + } + + if ( memcmp( pcSymLinkContent, pcSymLinkReadContent, uSymLinkSize) != 0 ) { + printf( "Read bad symlink content\n" ); + iErr = -1; + goto exit; + } + + if ( sOutAttr.fa_mode != uSymLinkMode) { + printf( "Mode mismatch [%d != %d]\n", sOutAttr.fa_mode, uSymLinkMode); + iErr = -1; + goto exit; + } + + if ( sOutAttr.fa_type != UVFS_FA_TYPE_SYMLINK ) { + printf( "Type mismatch\n" ); + iErr = -1; + goto exit; + } + + HFS_fsOps.fsops_reclaim( pOutNode[uNumOfSymLinks] ); + free(pcSymLinkContent); + free(pcSymLinkReadContent); + } + free(pOutNode); + + // Remove files + for(uint32_t uNumOfFiles=0; uNumOfFilesuNumOfFiles; uNumOfFiles++) { + uint64_t uLen = psThrdData->uFileSize * uNumOfFiles; + sprintf(pcName, pcFilenameFormat, psThrdData->uThreadNum, uOpCnt, uNumOfFiles, uLen); + printf("Removing file %s\n", pcName); + iErr = RemoveFile(psRootNode, pcName); + if (iErr) { + printf("Failed deleting file %s with iErr %d.\n", pcName, iErr); + goto exit; + } + } + + // Remove SymLinks + for(uint32_t uNumOfSymLinks=0; uNumOfSymLinksuNumOfSymLinks; uNumOfSymLinks++) { + + char pcSymLinkName[256] = {0}; + sprintf(pcSymLinkName, "TestSymLink_thread_%u_op_%u_symlink_%u", psThrdData->uThreadNum, uOpCnt, uNumOfSymLinks); + + printf("Deleting Symlink %s\n", pcSymLinkName); + iErr = HFS_fsOps.fsops_remove(psRootNode, pcSymLinkName, NULL); + if ( iErr != 0 ) { + printf( "Failed to remove symlink %d\n", iErr ); + goto exit; + } + } + } +exit: + psThrdData->iRetVal = iErr; + return psThrdData; +} + +#if HFS_CRASH_TEST +static int MultiThreadedRW_wJournal_RandomCrash(UVFSFileNode psRootNode) { + int iErr = 0; + + pthread_attr_t sAttr; + pthread_attr_setdetachstate(&sAttr, PTHREAD_CREATE_JOINABLE); + pthread_attr_init(&sAttr); + pthread_t psExecThread[MTRW_NUM_OF_THREADS]; + RWThreadData_S pcThreadData[MTRW_NUM_OF_THREADS] = {{0}}; + for(uint32_t u = 0; u < MTRW_NUM_OF_THREADS; u++) { + pcThreadData[u].uThreadNum = u; + pcThreadData[u].psRootNode = psRootNode; + pcThreadData[u].uNumOfFiles = MTRW_NUM_OF_FILES*(u+1); + pcThreadData[u].uFileSize = MTRW_FILE_SIZE; + pcThreadData[u].uNumOfSymLinks = MTRW_NUM_OF_SYMLINKS*(u+1); + pcThreadData[u].uSymLinkSize = MTRW_SYMLINK_SIZE; + + iErr = pthread_create(&psExecThread[u], &sAttr, ReadWriteThread, &pcThreadData[u]); + if (iErr) { + printf("can't pthread_create\n"); + goto exit; + } + } + pthread_attr_destroy(&sAttr); + + time_t sTime; + srand((unsigned) time(&sTime)); + uint32_t uRandTime = 0; + if (guSyncerPeriod) { + uRandTime = rand() % (guSyncerPeriod * 150); + } else { + uRandTime = rand() % (15000); + } + printf("******* uRandTime is %u mS ******\n", uRandTime); + + uint32_t uAbortTime = uRandTime; // mSec + usleep(uAbortTime * 1000); + close(giFD); + printf("******* now: close(giFD) **************\n"); + + for(uint32_t u = 0; u < MTRW_NUM_OF_THREADS; u++) { + iErr = pthread_join(psExecThread[u], NULL); + if (iErr) { + printf("can't pthread_join\n"); + goto exit; + } + iErr = pcThreadData[u].iRetVal; + if (iErr) { + printf("Thread %u return error %d\n", u, iErr); + } + } + +exit: + return iErr; + +} +#endif + +static int HFSTest_MultiThreadedRW_wJournal(UVFSFileNode psRootNode) { + + int iErr = 0; + + pthread_attr_t sAttr; + pthread_attr_setdetachstate(&sAttr, PTHREAD_CREATE_JOINABLE); + pthread_attr_init(&sAttr); + pthread_t psExecThread[MTRW_NUM_OF_THREADS]; + RWThreadData_S pcThreadData[MTRW_NUM_OF_THREADS] = {{0}}; + for(uint32_t u = 0; u < MTRW_NUM_OF_THREADS; u++) { + pcThreadData[u].uThreadNum = u; + pcThreadData[u].psRootNode = psRootNode; + pcThreadData[u].uNumOfFiles = MTRW_NUM_OF_FILES*(u+1); + pcThreadData[u].uFileSize = MTRW_FILE_SIZE; + pcThreadData[u].uNumOfSymLinks = MTRW_NUM_OF_SYMLINKS; + pcThreadData[u].uSymLinkSize = MTRW_SYMLINK_SIZE/3; + + iErr = pthread_create(&psExecThread[u], &sAttr, ReadWriteThread, &pcThreadData[u]); + if (iErr) { + printf("can't pthread_create\n"); + goto exit; + } + } + pthread_attr_destroy(&sAttr); + + for(uint32_t u = 0; u < MTRW_NUM_OF_THREADS; u++) { + iErr = pthread_join(psExecThread[u], NULL); + if (iErr) { + printf("can't pthread_join\n"); + goto exit; + } + iErr = pcThreadData[u].iRetVal; + if (iErr) { + printf("Thread %u return error %d\n", u, iErr); + } + } + +exit: + return iErr; +} + +static int +HFSTest_Create1000Files( UVFSFileNode RootNode ) +{ + #define CREATE_NUM_OF_FILES (1000) + #define FILE_NAME "Iamjustasimplefile_%d" + + int iErr = 0; + + char pcName[100] = {0}; + UVFSFileNode psNode; + for ( int i=0; idea_attrs ); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr("TestFile2", UVFS_FA_TYPE_FILE, psCurrentReadDirTestsData); + iErr = HFS_fsOps.fsops_getattr( TestFile2, &psCurrentReadDirTestsData->dea_attrs ); + + iErr = ReadDirAttr(TestFolder, psReadDirTestsData, 2); + free(psReadDirTestsData); + printf("ReadDirAttr err [%d]\n", iErr); + if (iErr) + goto exit; + + + iErr = RenameFile(TestFolder, TestFile1, "TestFile", TestFolder, NULL, "TestFile3"); + printf("Rename TestFile to TestFile3 in same dir err [%d]\n", iErr); + if (iErr) + goto exit; + + iErr = RenameFile(TestFolder, TestFile2, "TestFile2", RootNode, NULL, "TestFile4"); + printf("Rename TestFile2 to TestFile4 in diff Dir err [%d]\n", iErr); + if (iErr) + goto exit; + + iErr = RenameFile(RootNode, TestFolder, "TestFolder", RootNode, NULL, "TestFolder5"); + printf("Rename Dir TestFolder to TestFolder5 err [%d]\n", iErr); + if (iErr) + goto exit; + + iErr = RenameFile(TestFolder, TestFile1, "TestFile3", RootNode, TestFile2, "TestFile4"); + printf("Rename TestFile3 to TestFile4 in diff Dir err [%d]\n", iErr); + if (iErr) + goto exit; + + + // Remove File2 + iErr = RemoveFile(RootNode,"TestFile4"); + printf("Remove File TestFile2 from TestFolder err [%d]\n", iErr); + if (iErr) + goto exit; + + // Remove TestFolder + iErr = RemoveFolder(RootNode,"TestFolder5"); + printf("Remove Folder TestFolder from Root err [%d]\n", iErr); + if (iErr) + goto exit; + +exit: + HFS_fsOps.fsops_reclaim(TestFolder); + HFS_fsOps.fsops_reclaim(TestFile1); + HFS_fsOps.fsops_reclaim(TestFile2); + return iErr; +} + +static int ScanDir(UVFSFileNode UVFSFolderNode, char** contain_str_array, char** end_with_str_array, struct timespec mTime) +{ + int err = 0; + + scandir_matching_request_t sMatchingCriteria = {0}; + UVFSFileAttributes smr_attribute_filter = {0}; + scandir_matching_reply_t sMatchingResult = {0}; + void* pvAttrBuf = malloc(sizeof(UVFSDirEntryAttr) + MAX_UTF8_NAME_LENGTH*sizeof(char)); + sMatchingResult.smr_entry = pvAttrBuf; + + sMatchingCriteria.smr_filename_contains = contain_str_array; + sMatchingCriteria.smr_filename_ends_with = end_with_str_array; + sMatchingCriteria.smr_attribute_filter = &smr_attribute_filter; + + if (mTime.tv_nsec != 0 || mTime.tv_sec != 0 ) + { + sMatchingCriteria.smr_attribute_filter->fa_validmask |= UVFS_FA_VALID_MTIME; + sMatchingCriteria.smr_attribute_filter->fa_mtime = mTime; + } + + bool bConRead = true; + + do + { + err = HFS_fsOps.fsops_scandir (UVFSFolderNode, &sMatchingCriteria, &sMatchingResult); + if ( err != 0 || ( sMatchingResult.smr_entry->dea_nextcookie == UVFS_DIRCOOKIE_EOF && sMatchingResult.smr_result_type == 0 ) ) + { + bConRead = false; + } + else + { + if ( sMatchingResult.smr_entry->dea_nextcookie == UVFS_DIRCOOKIE_EOF ) + { + bConRead = false; + } + printf("SearchDir Returned with status %d, FileName = [%s], M-Time sec:[%ld] nsec:[%ld].\n", sMatchingResult.smr_result_type, UVFS_DIRENTRYATTR_NAMEPTR(sMatchingResult.smr_entry),sMatchingResult.smr_entry->dea_attrs.fa_mtime.tv_sec,sMatchingResult.smr_entry->dea_attrs.fa_mtime.tv_nsec); + + sMatchingCriteria.smr_start_cookie = sMatchingResult.smr_entry->dea_nextcookie; + sMatchingCriteria.smr_verifier = sMatchingResult.smr_verifier; + } + + }while (bConRead); + + free(pvAttrBuf); + + return err; +} + +static int HFSTest_Corrupted2ndDiskImage(__unused UVFSFileNode RootNode ) +{ + int iErr = 0; + printf("HFSTest_Corrupted2ndDiskImage:\n"); + + UVFSFileNode TestFolder1 = NULL; + iErr = CreateNewFolder( RootNode, &TestFolder1, "StamFolder"); + printf("CreateNewFolder err [%d]\n", iErr); + if (iErr) goto exit; + + HFS_fsOps.fsops_reclaim(TestFolder1); + +exit: + return iErr; +} + +static int HFSTest_ScanDir(UVFSFileNode RootNode ) +{ + int iErr = 0; + UVFSFileNode TestFolder1 = NULL; + UVFSFileNode TestFolder2 = NULL; + UVFSFileNode TestFile1 = NULL; + + iErr = CreateNewFolder( RootNode, &TestFolder1, "D2"); + printf("CreateNewFolder err [%d]\n", iErr); + if (iErr) goto exit; + + iErr = CreateNewFolder( RootNode, &TestFolder2, "ÖÖ"); + printf("CreateNewFolder err [%d]\n", iErr); + if (iErr) goto exit; + + //Create new file with size 0 + iErr = CreateNewFile(RootNode, &TestFile1, "F🤪2",0); + printf("Create TestFile in TestFolder err [%d]\n", iErr); + if (iErr) goto exit; + + UVFSFileAttributes sOutAttrs; + iErr = HFS_fsOps.fsops_getattr(TestFile1, &sOutAttrs); + printf("fsops_getattr F🤪2 err [%d]\n",iErr); + if (iErr) goto exit; + + struct timespec mTime = {0}; + mTime.tv_nsec = sOutAttrs.fa_mtime.tv_nsec; + mTime.tv_sec = sOutAttrs.fa_mtime.tv_sec; + + char* name_contains_array[5] = {0}; + char* name_end_with_array[5] = {0}; + char Smile[5] = "🤪"; + char ContainLetter[2] = "d"; + char EndsWithLetter[2] = "2"; + char SpecialChar[3] = "ö"; + + name_contains_array[0] = (char*) Smile; + name_contains_array[1] = (char*) ContainLetter; + name_contains_array[2] = (char*) SpecialChar; + name_contains_array[3] = NULL; + + name_end_with_array[0] = (char*) EndsWithLetter; + name_end_with_array[1] = (char*) SpecialChar; + name_end_with_array[2] = NULL; + + iErr = ScanDir(RootNode, (char**) &name_contains_array, (char**) &name_end_with_array, mTime); + printf("ScanDir err [%d]\n",iErr); + + + HFS_fsOps.fsops_reclaim(TestFolder1); + HFS_fsOps.fsops_reclaim(TestFolder2); + HFS_fsOps.fsops_reclaim(TestFile1); + + // Remove File + iErr = RemoveFile(RootNode,"F🤪2"); + printf("Remove File F🤪2 from TestFolder err [%d]\n", iErr); + if (iErr) goto exit; + // Remove Folders + iErr = RemoveFolder(RootNode,"D2"); + printf("Remove Folder D1 from Root err [%d]\n", iErr); + if (iErr) goto exit; + + iErr = RemoveFolder(RootNode,"ÖÖ"); + printf("Remove Folder ÖÖ from Root err [%d]\n", iErr); + if (iErr) goto exit; +exit: + return iErr; +} + +static int HFSTest_RootFillUp( UVFSFileNode RootNode ) { + #define ROOT_FILL_UP_NUM_OF_FOLDERS 512 + #define ROOT_FILL_UP_NUM_OF_SYMLINKS 512 + UVFSFileNode pTestFolder[ROOT_FILL_UP_NUM_OF_FOLDERS] = {NULL}; + + int iErr = 0; + unsigned u = 0; + bool bFound; + + printf("HFSTest_RootFillUp\n"); + + // Create folders + for(u=0; u||??\\.txt", "file1.txt" }; + + for ( uint8_t uIdx=0; uIdxdea_nameoff = UVFS_DIRENTRYATTR_NAMEOFF; + memcpy (UVFS_DIRENTRYATTR_NAMEPTR(psAttr),pcName, strlen(pcName) + 1); + psAttr->dea_attrs.fa_type = uType; +} + +static int +HFSTest_ReadDir( UVFSFileNode RootNode ) +{ + int iErr = 0; + + uint32_t uEntrySize = sizeof(UVFSDirEntryAttr) + MAX_UTF8_NAME_LENGTH; + UVFSDirEntryAttr *psReadDirTestsData = malloc(6*uEntrySize); + if (psReadDirTestsData == NULL) + return ENOMEM; + + UVFSDirEntryAttr *psCurrentReadDirTestsData = psReadDirTestsData; + SetExpectedAttr(".", UVFS_FA_TYPE_DIR, psCurrentReadDirTestsData); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr("..", UVFS_FA_TYPE_DIR, psCurrentReadDirTestsData); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr(".DS_Store", UVFS_FA_TYPE_FILE, psCurrentReadDirTestsData); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr("D1", UVFS_FA_TYPE_DIR, psCurrentReadDirTestsData); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr("F1", UVFS_FA_TYPE_FILE, psCurrentReadDirTestsData); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr("L1", UVFS_FA_TYPE_SYMLINK, psCurrentReadDirTestsData); + +// +// {.pcTestName = ".", .uTyppe = UVFS_FA_TYPE_DIR}, +// {.pcTestName = "..", .uTyppe = UVFS_FA_TYPE_DIR}, +// {.pcTestName = ".DS_Store", .uTyppe = UVFS_FA_TYPE_FILE}, +// {.pcTestName = "D1", .uTyppe = UVFS_FA_TYPE_DIR}, +// {.pcTestName = "F1", .uTyppe = UVFS_FA_TYPE_FILE}, +// {.pcTestName = "L1", .uTyppe = UVFS_FA_TYPE_SYMLINK}, +// }; + + bool bFound; + UVFSFileNode MainDir = NULL; + iErr = HFS_fsOps.fsops_lookup( RootNode, "D1", &MainDir ); + printf("Lookup err [%d]\n", iErr); + if ( iErr ) + return iErr; + + iErr = read_directory_and_search_for_name( MainDir, "D1", &bFound, psReadDirTestsData, 6); + free(psReadDirTestsData); + // Reclaim main dir. + HFS_fsOps.fsops_reclaim(MainDir); + + return iErr; +} + +static int __used +HFSTest_ReadDirAttr( UVFSFileNode RootNode ) +{ + int iErr = 0; + +// struct ReadDirTestData_s psReadDirTestsData[] = { +// {.pcTestName = ".DS_Store", .uTyppe = UVFS_FA_TYPE_FILE, .uSize = 6148, .uNlink = 1, .uAllocatedSize = 8192}, +// {.pcTestName = "D1", .uTyppe = UVFS_FA_TYPE_DIR, .uSize = 0, .uNlink = 2, .uAllocatedSize = 0}, +// {.pcTestName = "F1", .uTyppe = UVFS_FA_TYPE_FILE, .uSize = 4, .uNlink = 1, .uAllocatedSize = 4096}, +// {.pcTestName = "L1", .uTyppe = UVFS_FA_TYPE_SYMLINK, .uSize = 23, .uNlink = 1, .uAllocatedSize = 4096}, +// }; + + UVFSFileNode MainDir = NULL; + iErr = HFS_fsOps.fsops_lookup( RootNode, "D1", &MainDir ); + printf("Lookup err [%d]\n", iErr); + if ( iErr ) + return iErr; + + uint32_t uEntrySize = sizeof(UVFSDirEntryAttr) + MAX_UTF8_NAME_LENGTH; + UVFSDirEntryAttr *psReadDirTestsData = malloc(4*uEntrySize); + if (psReadDirTestsData == NULL) + return ENOMEM; + + UVFSFileNode psVnode = NULL; + UVFSFileNode psVnode1 = NULL; + UVFSFileNode psVnode2 = NULL; + UVFSFileNode psVnode3 = NULL; + UVFSDirEntryAttr *psCurrentReadDirTestsData = psReadDirTestsData; + SetExpectedAttr(".DS_Store", UVFS_FA_TYPE_FILE, psCurrentReadDirTestsData); + iErr = HFS_fsOps.fsops_lookup( MainDir, ".DS_Store", &psVnode ); + iErr = HFS_fsOps.fsops_getattr( psVnode, &psCurrentReadDirTestsData->dea_attrs ); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr("D1", UVFS_FA_TYPE_DIR, psCurrentReadDirTestsData); + iErr = HFS_fsOps.fsops_lookup( MainDir, "D1", &psVnode1 ); + iErr = HFS_fsOps.fsops_getattr( psVnode1, &psCurrentReadDirTestsData->dea_attrs ); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr("F1", UVFS_FA_TYPE_FILE, psCurrentReadDirTestsData); + iErr = HFS_fsOps.fsops_lookup( MainDir, "F1", &psVnode2 ); + iErr = HFS_fsOps.fsops_getattr( psVnode2, &psCurrentReadDirTestsData->dea_attrs ); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr("L1", UVFS_FA_TYPE_SYMLINK, psCurrentReadDirTestsData); + iErr = HFS_fsOps.fsops_lookup( MainDir, "L1", &psVnode3 ); + iErr = HFS_fsOps.fsops_getattr( psVnode3, &psCurrentReadDirTestsData->dea_attrs ); + + iErr = ReadDirAttr(MainDir, psReadDirTestsData, 4); + free(psReadDirTestsData); + // Reclaim main dir. + HFS_fsOps.fsops_reclaim(MainDir); + HFS_fsOps.fsops_reclaim(psVnode); + HFS_fsOps.fsops_reclaim(psVnode1); + HFS_fsOps.fsops_reclaim(psVnode2); + HFS_fsOps.fsops_reclaim(psVnode3); + + return iErr; +} + +static int +HFSTest_ReadSymlink( UVFSFileNode RootNode ) +{ + void* pvBuf = malloc(200); + assert( pvBuf != NULL ); + memset( pvBuf, 0, 200 ); + char* pcSymLinkContent = "/just/for/check/that/symlink/work/properly"; + char* pcSymlinkFileName = "symlinkfile"; + UVFSFileNode outNode = NULL; + + int iErr = HFS_fsOps.fsops_lookup(RootNode, pcSymlinkFileName, &outNode); + if (iErr) + printf("Dir read failed, D2 wasn't found in Root"); + + // Verify Symlink content + size_t iActuallyRead; + UVFSFileAttributes sOutAttr = {0}; + iErr = HFS_fsOps.fsops_readlink( outNode, pvBuf, 200, &iActuallyRead, &sOutAttr ); + if ( iErr != 0 ) + { + printf( "fsops_readlink failed with eror code : %d\n", iErr ); + goto exit; + } + + if ( strcmp( pvBuf, pcSymLinkContent) != 0 ) + { + printf( "Read bad symlink content\n" ); + iErr = -1; + goto exit; + } + + HFS_fsOps.fsops_reclaim( outNode ); + +exit: + if (pvBuf) + free(pvBuf); + + return iErr; +} + +static int +HFSTest_Symlink( UVFSFileNode RootNode ) +{ + +#define SYMLINK_MODE \ +( UVFS_FA_MODE_USR(UVFS_FA_MODE_RWX) | \ + UVFS_FA_MODE_GRP(UVFS_FA_MODE_R) | \ + UVFS_FA_MODE_OTH(UVFS_FA_MODE_R | UVFS_FA_MODE_X) ) + + void* pvBuf = malloc(200); + assert( pvBuf != NULL ); + memset( pvBuf, 0xff, 200 ); + char* pcSymLinkContent = "/just/for/check/that/symlink/work/properly"; + char* pcSymlinkFileName = "symlinkfile"; + UVFSFileAttributes sAttr = {0}; + sAttr.fa_validmask = UVFS_FA_VALID_MODE; + sAttr.fa_type = UVFS_FA_TYPE_SYMLINK; + sAttr.fa_mode = SYMLINK_MODE; + UVFSFileNode outNode = NULL; + + // Create Symlink. + int iErr = HFS_fsOps.fsops_symlink( RootNode, pcSymlinkFileName, pcSymLinkContent, &sAttr, &outNode ); + if ( iErr != 0 ) + { + printf( "fsops_symlink failed with eror code : %d\n", iErr ); + goto exit; + } + + // Enable once vnode functionality will be merged. + + // Verify Symlink content + size_t iActuallyRead; + UVFSFileAttributes sOutAttr = {0}; + iErr = HFS_fsOps.fsops_readlink( outNode, pvBuf, 200, &iActuallyRead, &sOutAttr ); + if ( iErr != 0 ) + { + printf( "fsops_readlink failed with eror code : %d\n", iErr ); + goto exit; + } + + if ( strcmp( pvBuf, pcSymLinkContent) != 0 ) + { + printf( "Read bad symlink content\n" ); + iErr = -1; + goto exit; + } + + if ( sOutAttr.fa_mode != SYMLINK_MODE) + { + printf( "Mode mismatch [%d != %d]\n", sOutAttr.fa_mode, SYMLINK_MODE); + iErr = -1; + goto exit; + } + + if ( sOutAttr.fa_type != UVFS_FA_TYPE_SYMLINK ) + { + printf( "Type mismatch\n" ); + iErr = -1; + goto exit; + } + + + HFS_fsOps.fsops_reclaim( outNode ); + + // Remove link. + iErr = HFS_fsOps.fsops_remove( RootNode, pcSymlinkFileName, NULL); + if ( iErr != 0 ) + { + printf( "Failed to remove symlink %d\n", iErr ); + goto exit; + } + + bool bFound = false; + read_directory_and_search_for_name( RootNode, pcSymlinkFileName, &bFound, NULL, 0 ); + if ( bFound ) + { + printf( "Failed to remove symlink\n"); + iErr = -1; + goto exit; + } + +exit: + if (pvBuf) + free(pvBuf); + + return iErr; +} + +static int HFSTest_SymlinkOnFile( UVFSFileNode pRootNode ) { + // This test creates a file and a folder on root. + // It then tries to create a SymLink inside the folder and expects pass, + // and creates a SymLink inside a file and expects a failure. + printf("HFSTest_SymlinkOnFile\n"); + + char *pcFolderName = "NewFolder"; + char *pcFileName = "NewFile.txt"; + uint32_t uFileLen = 985; + char *pcSymlinkFilename = "SymLinkFile"; + char *pcSymLinkContent = "/SymlinkContent"; + int iErr = 0; + UVFSFileNode pFolderNode = NULL; + UVFSFileNode pFileNode = NULL; + UVFSFileNode pSymLinkOnRootNode = NULL; + UVFSFileNode pSymLinkOnFolderNode = NULL; + UVFSFileNode pSymLinkOnFileNode = NULL; + + iErr = CreateNewFolder( pRootNode, &pFolderNode, pcFolderName); + printf("CreateNewFolder err [%d]\n", iErr); + if (iErr) { + printf("Error: CreateNewFolder failed.\n"); + return iErr; + } + + //Create new file with size 512 + CreateNewFile(pRootNode, &pFileNode, pcFileName, uFileLen); + printf("Create %s Len %u err [%d]\n", pcFileName, uFileLen, iErr); + if (iErr) { + printf("Error: CreateNewFile failed.\n"); + return iErr; + } + + bool bFound = false; + read_directory_and_search_for_name( pRootNode, pcFolderName, &bFound, NULL, 0); + if (!bFound) { + printf("Error: %s wasn't found in Root.\n", pcFolderName); + return -1; + } else { + printf("%s found in Root!\n", pcFolderName); + } + + read_directory_and_search_for_name( pRootNode, pcFileName, &bFound, NULL, 0); + if (!bFound) { + printf("Error: %s wasn't found in Root.\n", pcFileName); + return -1; + } else { + printf("%s found in Root!\n", pcFileName); + } + + UVFSFileAttributes sAttr = {0}; + sAttr.fa_validmask = UVFS_FA_VALID_MODE; + sAttr.fa_type = UVFS_FA_TYPE_SYMLINK; + sAttr.fa_mode = SYMLINK_MODE; + + // Create Symlink on root + iErr = HFS_fsOps.fsops_symlink( pRootNode, pcSymlinkFilename, pcSymLinkContent, &sAttr, &pSymLinkOnRootNode ); + if ( iErr != 0 ) { + printf( "fsops_symlink failed to create %s with eror code : %d\n", pcSymlinkFilename, iErr ); + return(iErr); + } + + // Create Symlink on folder + iErr = HFS_fsOps.fsops_symlink( pFolderNode, pcSymlinkFilename, pcSymLinkContent, &sAttr, &pSymLinkOnFolderNode ); + if ( iErr != 0 ) { + printf( "fsops_symlink failed to create %s inside %s with eror code : %d\n", pcSymlinkFilename, pcFolderName, iErr ); + return(iErr); + } + + // Create Symlink on file + iErr = HFS_fsOps.fsops_symlink( pFileNode, pcSymlinkFilename, pcSymLinkContent, &sAttr, &pSymLinkOnFileNode ); + if ( iErr == 0 ) { + printf( "fsops_symlink error: did not fail to create %s inside %s with eror code : %d\n", pcSymlinkFilename, pcFileName, iErr ); + return(-1); + } + + // cleanup + assert(pSymLinkOnFileNode == NULL); + HFS_fsOps.fsops_reclaim( pFileNode ); + HFS_fsOps.fsops_reclaim( pFolderNode ); + HFS_fsOps.fsops_reclaim( pSymLinkOnFolderNode ); + HFS_fsOps.fsops_reclaim( pSymLinkOnRootNode ); + + return 0; +} + +static int +HFSTest_SetAttr( UVFSFileNode RootNode ) +{ + int iErr = 0; + + UVFSFileNode Dir1 = NULL; + iErr = HFS_fsOps.fsops_lookup(RootNode, "D2", &Dir1); + if (iErr) + printf("Dir read failed, D2 wasn't found in Root"); + UVFSFileNode File1 = NULL; + iErr = HFS_fsOps.fsops_lookup(Dir1, "a.txt", &File1); + + if (iErr) + { + printf("File not found!\n"); + return -1; + } + + // Change file size + // Set Attr, make F1 larger + iErr = SetAttrChangeSize(File1,12*1024); + printf("SetAttrChangeSize to 12K err [%d]\n",iErr); + if (iErr) + { + return iErr; + } + + iErr = SetAttrChangeSize(File1,4*1024); + printf("SetAttrChangeSize to 4 err [%d]\n",iErr); + if (iErr) + { + return iErr; + } + + iErr = SetAttrChangeSize(File1,0*1024); + printf("SetAttrChangeSize to 0 err [%d]\n",iErr); + if (iErr) + { + return iErr; + } + + iErr = SetAttrChangeSize(File1,8*1024*1024); + printf("SetAttrChangeSize to 120MB err [%d]\n",iErr); + if (iErr) + { + return iErr; + } + + iErr = SetAttrChangeMode(File1, UVFS_FA_MODE_GRP(UVFS_FA_MODE_RWX) | UVFS_FA_MODE_USR(UVFS_FA_MODE_RWX)); + printf("Changed file mode to RO err[ %d]\n",iErr); + if (iErr) + { + return iErr; + } + + iErr = SetAttrChangeUidGid(File1, 222, 555); + + printf("Changed Uid and Gid err [%d]\n", iErr); + if (iErr) + { + return iErr; + } + + iErr = SetAttrChangeAtimeMtime(File1); + + printf("Changed Atime and Mtime err [%d]\n", iErr); + if (iErr) + { + return iErr; + } + + HFS_fsOps.fsops_reclaim(File1); + + HFS_fsOps.fsops_reclaim(Dir1); + + iErr = HFS_fsOps.fsops_lookup(RootNode, "D2", &Dir1); + if (iErr) + printf("Dir read failed, D2 wasn't found in Root"); + iErr = HFS_fsOps.fsops_lookup(Dir1, "a.txt", &File1); + if (iErr) + { + printf("File not found! (2)\n"); + return -1; + } + + iErr = SetAttrChangeAtimeMtime(File1); + + printf("Changed Atime and Mtime (2) err [%d]\n", iErr); + if (iErr) + { + return iErr; + } + + HFS_fsOps.fsops_reclaim(File1); + + HFS_fsOps.fsops_reclaim(Dir1); + + return iErr; +} + +static char* gpcFSAttrs [] = { + UVFS_FSATTR_PC_LINK_MAX, + UVFS_FSATTR_PC_NAME_MAX, + UVFS_FSATTR_PC_NO_TRUNC, + UVFS_FSATTR_PC_FILESIZEBITS, + UVFS_FSATTR_PC_XATTR_SIZE_BITS, + UVFS_FSATTR_BLOCKSIZE, + UVFS_FSATTR_IOSIZE, + UVFS_FSATTR_TOTALBLOCKS, + UVFS_FSATTR_BLOCKSFREE, + UVFS_FSATTR_BLOCKSAVAIL, + UVFS_FSATTR_BLOCKSUSED, + UVFS_FSATTR_CNAME, + UVFS_FSATTR_FSTYPENAME, + UVFS_FSATTR_FSSUBTYPE, + UVFS_FSATTR_VOLNAME, + UVFS_FSATTR_VOLUUID, + UVFS_FSATTR_CAPS_FORMAT, + UVFS_FSATTR_CAPS_INTERFACES, + UVFS_FSATTR_LAST_MTIME, + UVFS_FSATTR_MOUNT_TIME +}; + +static int +HFSTest_GetFSAttr( UVFSFileNode RootNode ) +{ + int iErr = 0; + size_t uLen = 512; + size_t uRetLen = 0; + UVFSFSAttributeValue* psAttrVal = (UVFSFSAttributeValue*)malloc(uLen); + assert( psAttrVal ); + + for ( uint32_t uIdx=0; uIdxfsa_bool? "true" : "false" ); + } + else if ( UVFS_FSATTR_IS_NUMBER( gpcFSAttrs[uIdx] ) ) + { + printf( "%llu", psAttrVal->fsa_number ); + } + else if ( UVFS_FSATTR_IS_OPAQUE( gpcFSAttrs[uIdx] ) ) + { + printf("0x"); + for ( uint32_t uOp=0; uOpfsa_opaque[uOp] ); + } + } + else if ( UVFS_FSATTR_IS_STRING( gpcFSAttrs[uIdx] ) ) + { + printf( "%s", psAttrVal->fsa_string ); + } + else + { + assert(0); + } + printf("].\n"); + } + +exit: + free(psAttrVal); + return (iErr); +} + +static int +HFSTest_WriteRead( UVFSFileNode RootNode ) +{ +#define FILENAME "NewFileForTest" +#define MAXFILESIZE (1024*1024*1024) + + int iErr = 0; + UVFSFileNode psFile = NULL; + size_t iActuallyWrite = 0; + size_t iActuallyRead = 0; + void* pvOutBuf = malloc(MAXFILESIZE); + void* pvInBuf = malloc(MAXFILESIZE); + assert( pvOutBuf != NULL && pvInBuf != NULL ); + uint64_t* puOutBuf = pvOutBuf; + uint64_t* puInBuf = pvInBuf; + + // Create new file with size 50,000 Bytes + assert( CreateNewFile( RootNode, &psFile, FILENAME, 50000 ) == 0 ); + + // lets write 10,000 Bytes with 0xCD + memset(pvOutBuf, 0, MAXFILESIZE); + memset(pvInBuf, 0, MAXFILESIZE); + + memset(pvOutBuf, 0xCD, 10000); + + assert( HFS_fsOps.fsops_write( psFile, 0, 10000, pvOutBuf, &iActuallyWrite ) == 0 ); + assert( HFS_fsOps.fsops_read( psFile, 0, 10000, pvInBuf, &iActuallyRead ) == 0 ); + + // Lets test it... + for ( uint64_t uIdx=0; uIdx<(MAXFILESIZE/sizeof(uint64_t)); uIdx++ ) + { + assert( puInBuf[uIdx] == puOutBuf[uIdx] ); + } + + // Lets extend the file to 100,000 Bytes... + memset(pvOutBuf+10000, 0xED, 90000); + assert( HFS_fsOps.fsops_write( psFile, 10000, 90000, pvOutBuf+10000, &iActuallyWrite ) == 0 ); + assert( HFS_fsOps.fsops_read( psFile, 0, 100000, pvInBuf, &iActuallyRead ) == 0 ); + + // Lets test it... + for ( uint64_t uIdx=0; uIdx<(MAXFILESIZE/sizeof(uint64_t)); uIdx++ ) + { + assert( puInBuf[uIdx] == puOutBuf[uIdx] ); + } + + memset(pvOutBuf, 0, MAXFILESIZE); + memset(pvInBuf, 0, MAXFILESIZE); + assert( SetAttrChangeSize(psFile, 10000) == 0 ); + memset(pvOutBuf, 0xCD, 10000); + memset(pvOutBuf+20000, 0xBB, 10000); + + assert( HFS_fsOps.fsops_write( psFile, 20000, 10000, pvOutBuf+20000, &iActuallyWrite ) == 0 ); + assert( HFS_fsOps.fsops_read( psFile, 0, 30000, pvInBuf, &iActuallyRead ) == 0 ); + + // Lets test it... + for ( uint64_t uIdx=0; uIdx<(MAXFILESIZE/sizeof(uint64_t)); uIdx++ ) + { + assert( puInBuf[uIdx] == puOutBuf[uIdx] ); + } + + HFS_fsOps.fsops_reclaim( psFile ); + + goto exit; + +exit: + return iErr; +} + +static int +HFSTest_RandomIO( UVFSFileNode RootNode ) +{ +#define MAX_IO_SIZE (1024*1024) +#define MAX_IO_OFFSET (80*MAX_IO_SIZE) +#define TEST_RUN_TIME_SEC (30) + + int iErr = 0; + UVFSFileNode psFile; + static mach_timebase_info_data_t sTimebaseInfo; + mach_timebase_info(&sTimebaseInfo); + + void* pvWriteBuf = malloc(MAX_IO_SIZE); + void* pvReadBuf = malloc(MAX_IO_SIZE); + + int* puBuf = pvWriteBuf; + for ( uint64_t uIdx=0; uIdx<(MAX_IO_SIZE/sizeof(int)); uIdx++ ) + { + puBuf[uIdx] = rand(); + } + + iErr = CreateNewFile( RootNode, &psFile, "SimpleFile", 0 ); + assert(iErr == 0); + + uint64_t start = mach_absolute_time(); + uint64_t elapsedSec = 0; + // while( elapsedSec < TEST_RUN_TIME_SEC ) + for(uint32_t uWriteReadCnt=1000; uWriteReadCnt; uWriteReadCnt--) + { + uint64_t uNextIOSize = rand() % MAX_IO_SIZE; + uint64_t uNextIOOffset = rand() % MAX_IO_OFFSET; + + printf("uNextIOSize = %llu, uNextIOOffset = %llu\n", uNextIOSize, uNextIOOffset); + + size_t iActuallyWrite; + size_t iActuallyRead; + + iErr = HFS_fsOps.fsops_write( psFile, uNextIOOffset, uNextIOSize, pvWriteBuf, &iActuallyWrite ); + assert(iErr==0); + iErr = HFS_fsOps.fsops_read( psFile, uNextIOOffset, uNextIOSize, pvReadBuf, &iActuallyRead ); + assert(iErr==0); + + uint8_t* puRead = pvReadBuf; + uint8_t* puWrite = pvWriteBuf; + for ( uint64_t uIdx=0; uIdxdea_attrs ); + psCurrentReadDirTestsData = (UVFSDirEntryAttr *) ((void*) psCurrentReadDirTestsData + uEntrySize); + SetExpectedAttr("TestFile2.txt", UVFS_FA_TYPE_FILE, psCurrentReadDirTestsData); + iErr = HFS_fsOps.fsops_getattr( TestFile2, &psCurrentReadDirTestsData->dea_attrs ); + + printf("Read DIR attr:\n"); + iErr = ReadDirAttr(TestFolder, psReadDirTestsData, 2); + free(psReadDirTestsData); + printf("ReadDirAttr err [%d]\n", iErr); + if (iErr) { + goto exit; + } + + printf("Remove File1:\n"); + iErr = RemoveFile(TestFolder,"TestFile.txt"); + printf("Remove File TestFile from TestFolder err [%d]\n", iErr); + if (iErr) { + goto exit; + } + + printf("Remove File2:\n"); + iErr = RemoveFile(TestFolder,"TestFile2.txt"); + printf("Remove File TestFile2 from TestFolder err [%d]\n", iErr); + if (iErr) { + goto exit; + } + + printf("Remove TestFolder:\n"); + iErr = RemoveFolder(RootNode,"TestFolder"); + printf("Remove Folder TestFolder from Root err [%d]\n", iErr); + if (iErr) { + goto exit; + } + +exit: + HFS_fsOps.fsops_reclaim(TestFolder); + HFS_fsOps.fsops_reclaim(TestFile1); + HFS_fsOps.fsops_reclaim(TestFile2); + return iErr; +} + +static int __used +HFSTest_SetXattr( UVFSFileNode RootNode ) +{ + const char * pcAttr = "com.apple.test.set"; + const char * pcAttr2 = "com.apple.test.set2"; + char pcData[] = "This is attribute data"; + char pcData2[] = "This is attribute data 2"; + int iErr = 0; + + UVFSFileNode TestFile = NULL; + iErr = HFS_fsOps.fsops_lookup( RootNode, "kl_set.test", &TestFile ); + if ( iErr ) + { + printf("Lookup err [%d]\n", iErr); + return iErr; + } + + // Add Attribute - create + iErr = HFS_fsOps.fsops_setxattr(TestFile, pcAttr, pcData, strlen(pcData)+1, UVFSXattrHowCreate); + if ( iErr ) + { + printf("SetAttr err [%d]\n", iErr); + goto out; + } + + // Add Attribute - create with failure + iErr = HFS_fsOps.fsops_setxattr(TestFile, pcAttr, pcData2, strlen(pcData2)+1, UVFSXattrHowCreate); + if ( iErr != EEXIST) + { + printf("SetAttr err [%d]\n", iErr); + goto out; + } + + // Add Attribute - set + iErr = HFS_fsOps.fsops_setxattr(TestFile, pcAttr, pcData2, strlen(pcData2)+1, UVFSXattrHowSet); + if ( iErr ) + { + printf("SetAttr err [%d]\n", iErr); + goto out; + } + + // Add Attribute - replace + iErr = HFS_fsOps.fsops_setxattr(TestFile, pcAttr, pcData, strlen(pcData)+1, UVFSXattrHowReplace); + if ( iErr ) + { + printf("SetAttr err [%d]\n", iErr); + goto out; + } + + // Add Attribute - replace with failure + iErr = HFS_fsOps.fsops_setxattr(TestFile, pcAttr2, pcData, strlen(pcData)+1, UVFSXattrHowReplace); + if ( iErr != ENOATTR ) + { + printf("SetAttr err [%d]\n", iErr); + goto out; + } + + // Add Attribute - remove + iErr = HFS_fsOps.fsops_setxattr(TestFile, pcAttr, pcData, strlen(pcData)+1, UVFSXattrHowRemove); + if ( iErr ) + { + printf("SetAttr err [%d]\n", iErr); + goto out; + } + + // Add Attribute - remove with failure + iErr = HFS_fsOps.fsops_setxattr(TestFile, pcAttr, pcData, strlen(pcData)+1, UVFSXattrHowRemove); + if ( iErr != ENOATTR ) + { + printf("SetAttr err [%d]\n", iErr); + goto out; + } + + // Get Attribute - check nothing left + size_t actual_size = INT32_MAX; + iErr = HFS_fsOps.fsops_listxattr(TestFile, NULL, 0, &actual_size); + if ( iErr || (actual_size != 0)) + { + printf("ListAttr err [%d]\n", iErr); + goto out; + } + + // Add Attribute - create extended attribute and check content + const char * pcAttr3 = "com.apple.test.set3"; + uint8_t *pBuffer = NULL; + uint8_t *pBufferRet = NULL; + + // Test more than one sector and ending outside boundary. +#define ATTR_EXT_SIZE (5000) + + pBuffer = malloc(ATTR_EXT_SIZE); + pBufferRet = malloc(ATTR_EXT_SIZE); + + if ( pBuffer == NULL || pBufferRet == NULL) + { + iErr = ENOMEM; + goto out; + } + + for (int i = 0; i < ATTR_EXT_SIZE; ++i) + { + pBuffer[i] = i % 256; + pBufferRet[i] = 0xff; + } + + iErr = HFS_fsOps.fsops_setxattr(TestFile, pcAttr3, pBuffer, ATTR_EXT_SIZE, UVFSXattrHowCreate); + if ( iErr ) + { + printf("SetAttr err [%d]\n", iErr); + goto out_mem; + } + + iErr = HFS_fsOps.fsops_getxattr(TestFile, pcAttr3, pBufferRet, ATTR_EXT_SIZE, &actual_size); + if ( iErr ) + { + printf("GetAttr err [%d]\n", iErr); + goto out_mem; + } + + // check data + assert(actual_size == ATTR_EXT_SIZE); + + for (int i = 0; i < ATTR_EXT_SIZE; ++i) + { + assert(pBuffer[i] == pBufferRet[i]); + } + + iErr = HFS_fsOps.fsops_setxattr(TestFile, pcAttr3, pBuffer, ATTR_EXT_SIZE, UVFSXattrHowRemove); + if ( iErr ) + { + printf("SetAttr err [%d]\n", iErr); + goto out_mem; + } + +out_mem: + free(pBuffer); + free(pBufferRet); + +out: + // Reclaim test file + HFS_fsOps.fsops_reclaim(TestFile); + + return iErr; +} + + +static int __used +HFSTest_ListXattr( UVFSFileNode RootNode ) +{ + int iErr = 0; + + UVFSFileNode TestFile = NULL; + iErr = HFS_fsOps.fsops_lookup( RootNode, "kl.test", &TestFile ); + if ( iErr ) + { + printf("Lookup err [%d]\n", iErr); + return iErr; + } + + // Get required size + size_t actual_size = 0; + iErr = HFS_fsOps.fsops_listxattr(TestFile, NULL, 0, &actual_size); + if ( iErr ) + { + printf("ListAttr err [%d]\n", iErr); + goto out; + } + + // Get Attributes + size_t size = actual_size; + char *pcBuffer = malloc(size); + if ( pcBuffer == NULL ) + { + iErr = ENOMEM; + goto out; + } + + actual_size = 0; + iErr = HFS_fsOps.fsops_listxattr(TestFile, pcBuffer, size, &actual_size); + if ( iErr ) + { + printf("ListAttr err [%d]\n", iErr); + goto mem_err; + } + + // Just check it + assert(actual_size == size); + + // Print Attributes Names + size_t attr_size = 0; + char *pcAttribues = pcBuffer; + + while (attr_size < size) + { + // Get required size + actual_size = 0; + iErr = HFS_fsOps.fsops_getxattr(TestFile, pcAttribues, NULL, 0, &actual_size); + if ( iErr ) + { + printf("GetAttr size err [%d]\n", iErr); + goto mem_err; + } + + // Get Attributes + size_t bufsize = actual_size+1; + char *pcAttrBuffer = malloc(bufsize); + if ( pcAttrBuffer == NULL ) + { + iErr = ENOMEM; + goto mem_err; + } + bzero(pcAttrBuffer, bufsize); + + HFS_fsOps.fsops_getxattr(TestFile, pcAttribues, pcAttrBuffer, bufsize, &actual_size); + if ( iErr ) + { + printf("GetAttr err [%d]\n", iErr); + free(pcAttrBuffer); + goto mem_err; + } + + printf("Found attribute '%s' : %s\n", pcAttribues, pcAttrBuffer); + + free(pcAttrBuffer); + + size_t curr_attr_size = strlen(pcAttribues) + 1; + + attr_size += curr_attr_size; + pcAttribues += curr_attr_size; + } + +mem_err: + free(pcBuffer); + +out: + // Reclaim test file + HFS_fsOps.fsops_reclaim(TestFile); + + return iErr; +} + +/* + * Tests List Struct. + */ + +#if HFS_CRASH_TEST + #define ADD_TEST_WITH_CRASH_ABORT(testName, dmgPath, testHandler, CrashAbortType, CrashAbortCallback, CrashAbortCount) \ + { .pcTestName = testName, .pcDMGPath = dmgPath, .pfTestHandler = testHandler, \ + .eCrashID = CrashAbortType, .pAbortFunc = CrashAbortCallback, \ + .uCrashAbortCnt = CrashAbortCount \ + } +#endif + +#define ADD_TEST(testName, dmgPath, testHandler) \ +{ .pcTestName = testName, .pcDMGPath = dmgPath, .pfTestHandler = testHandler, \ +} + +#define ADD_TEST_NO_SYNC(testName, dmgPath, testHandler) \ +{ .pcTestName = testName, .pcDMGPath = dmgPath, .pfTestHandler = testHandler, \ +} + +TestData_S gsTestsData[] = { +#if 1 // Enable non-journal tests + ADD_TEST( "HFSTest_JustMount", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_JustMount ), + ADD_TEST( "HFSTest_ReadDefragmentFile", "/Volumes/SSD_Shared/FS_DMGs/HFSDeFragment.dmg", &HFSTest_ReadDefragmentFile ), + ADD_TEST( "HFSTest_RemoveDir", "/Volumes/SSD_Shared/FS_DMGs/HFSRemoveDir.dmg", &HFSTest_RemoveDir ), + ADD_TEST( "HFSTest_Remove", "/Volumes/SSD_Shared/FS_DMGs/HFSRemove.dmg", &HFSTest_Remove ), + ADD_TEST( "HFSTest_ReadDir", "/Volumes/SSD_Shared/FS_DMGs/HFSReadDir.dmg", &HFSTest_ReadDir ), + ADD_TEST( "HFSTest_ReadDirAttr", "/Volumes/SSD_Shared/FS_DMGs/HFSReadDir.dmg", &HFSTest_ReadDirAttr ), + ADD_TEST( "HFSTest_MakeDir", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_MakeDir ), + ADD_TEST( "HFSTest_SetAttr", "/Volumes/SSD_Shared/FS_DMGs/HFSSetAttr.dmg", &HFSTest_SetAttr ), + ADD_TEST( "HFSTest_ReadSymLink", "/Volumes/SSD_Shared/FS_DMGs/HFSReadSymLink.dmg", &HFSTest_ReadSymlink ), + ADD_TEST( "HFSTest_GetFSAttr", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_GetFSAttr ), + ADD_TEST( "HFSTest_Create", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_Create ), + ADD_TEST( "HFSTest_Symlink", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_Symlink ), + ADD_TEST( "HFSTest_SymlinkOnFile", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_SymlinkOnFile ), + ADD_TEST( "HFSTest_Rename", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_Rename ), + ADD_TEST( "HFSTest_WriteRead", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_WriteRead ), + ADD_TEST( "HFSTest_RandomIO", "/Volumes/SSD_Shared/FS_DMGs/HFS100MB.dmg", &HFSTest_RandomIO ), + ADD_TEST( "HFSTest_Create1000Files", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_Create1000Files ), + ADD_TEST( "HFSTest_HardLink", "/Volumes/SSD_Shared/FS_DMGs/HFSHardLink.dmg", &HFSTest_HardLink ), + ADD_TEST( "HFSTest_CreateHardLink", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_CreateHardLink ), + ADD_TEST( "HFSTest_RenameToHardlink", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_RenameToHardlink ), + ADD_TEST( "HFSTest_SetXattr", "/Volumes/SSD_Shared/FS_DMGs/HFSXattr.dmg", &HFSTest_SetXattr ), + ADD_TEST( "HFSTest_ListXattr", "/Volumes/SSD_Shared/FS_DMGs/HFSXattr.dmg", &HFSTest_ListXattr ), + ADD_TEST( "HFSTest_RootFillUp", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_RootFillUp ), + ADD_TEST( "HFSTest_ScanDir", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_ScanDir ), + ADD_TEST( "HFSTest_MultiThreadedRW", CREATE_HFS_DMG, &HFSTest_MultiThreadedRW_wJournal ), + ADD_TEST_NO_SYNC( "HFSTest_ValidateUnmount", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_ValidateUnmount ), + ADD_TEST( "HFSTest_ScanID", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_ScanID ), +#endif +#if 1 // Enbale journal-tests + ADD_TEST( "HFSTest_OpenJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_OpenJournal ), + ADD_TEST( "HFSTest_WriteToJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_WriteToJournal ), + ADD_TEST( "HFSTest_JustMount_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_JustMount ), + ADD_TEST( "HFSTest_ReadDefragmentFile_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-DeFragment.dmg", &HFSTest_ReadDefragmentFile ), + ADD_TEST( "HFSTest_RemoveDir_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-RemoveDir.dmg", &HFSTest_RemoveDir ), + ADD_TEST( "HFSTest_Remove_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Remove.dmg", &HFSTest_Remove ), + ADD_TEST( "HFSTest_ReadDir_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-ReadDir.dmg", &HFSTest_ReadDir ), + ADD_TEST( "HFSTest_ReadDirAttr_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-ReadDir.dmg", &HFSTest_ReadDirAttr ), + ADD_TEST( "HFSTest_MakeDir_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_MakeDir ), + ADD_TEST( "HFSTest_SetAttr_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-SetAttr.dmg", &HFSTest_SetAttr ), + ADD_TEST( "HFSTest_ReadSymLink_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-ReadSymLink.dmg", &HFSTest_ReadSymlink ), + ADD_TEST( "HFSTest_GetFSAttr_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_GetFSAttr ), + ADD_TEST( "HFSTest_Create_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_Create ), + ADD_TEST( "HFSTest_Symlink_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_Symlink ), + ADD_TEST( "HFSTest_SymlinkOnFile", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", &HFSTest_SymlinkOnFile ), + ADD_TEST( "HFSTest_Rename_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_Rename ), + ADD_TEST( "HFSTest_WriteRead_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_WriteRead ), + ADD_TEST( "HFSTest_RandomIO_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-144MB.dmg", &HFSTest_RandomIO ), + ADD_TEST( "HFSTest_Create1000Files_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-EmptyLarge.dmg", &HFSTest_Create1000Files ), + ADD_TEST( "HFSTest_HardLink_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-HardLink.dmg", &HFSTest_HardLink ), + ADD_TEST( "HFSTest_CreateHardLink_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-EmptyLarge.dmg", &HFSTest_CreateHardLink ), + ADD_TEST( "HFSTest_RootFillUp_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-EmptyLarge.dmg", &HFSTest_RootFillUp ), + ADD_TEST( "HFSTest_MultiThreadedRW_wJournal", "", &HFSTest_MultiThreadedRW_wJournal ), + ADD_TEST( "HFSTest_DeleteAHugeDefragmentedFile_wJournal", "", &HFSTest_DeleteAHugeDefragmentedFile_wJournal ), + ADD_TEST( "HFSTest_CreateJournal_Sparse", CREATE_SPARSE_VOLUME, &HFSTest_OpenJournal ), + ADD_TEST( "HFSTest_MakeDirAndKeep_Sparse", CREATE_SPARSE_VOLUME, &HFSTest_MakeDirAndKeep ), + ADD_TEST( "HFSTest_CreateAndWriteToJournal_Sparse", CREATE_SPARSE_VOLUME, &HFSTest_WriteToJournal ), + ADD_TEST( "HFSTest_MultiThreadedRW_wJournal_Sparse", CREATE_SPARSE_VOLUME, &HFSTest_MultiThreadedRW_wJournal ), + ADD_TEST( "HFSTest_ScanDir", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_ScanDir ), + ADD_TEST_NO_SYNC( "HFSTest_ValidateUnmount_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_ValidateUnmount_wJournal ), + ADD_TEST( "HFSTest_Corrupted2ndDiskImage", "/Volumes/SSD_Shared/FS_DMGs/corrupted_80M.dmg.sparseimage", + &HFSTest_Corrupted2ndDiskImage ), + ADD_TEST( "HFSTest_ScanID", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", &HFSTest_ScanID ), + +#endif +#if HFS_CRASH_TEST + // The following 2 tests checks mount after unmount, no-journal + ADD_TEST_WITH_CRASH_ABORT( "HFSTest_OneSync", "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg", + &HFSTest_OneSync, CRASH_ABORT_RANDOM, HFSTest_SaveDMG, 0 ), + ADD_TEST( "HFSTest_ConfirmTestFolderExists", TEMP_DMG_BKUP, &HFSTest_ConfirmTestFolderExists ), + + // The following 2 tests checks mount after unmount with journal + ADD_TEST_WITH_CRASH_ABORT( "HFSTest_OneSync_wJournal", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", + &HFSTest_OneSync, CRASH_ABORT_RANDOM, HFSTest_SaveDMG, 0 ), + ADD_TEST( "HFSTest_ConfirmTestFolderExists", TEMP_DMG_BKUP, &HFSTest_ConfirmTestFolderExists ), + + ADD_TEST_WITH_CRASH_ABORT("HFSTest_OpenJournal_wCrashOnMakeDir", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", + &HFSTest_OpenJournal, CRASH_ABORT_MAKE_DIR, HFSTest_FailTestOnCrashAbort, 0), + + ADD_TEST_WITH_CRASH_ABORT("HFSTest_OpenJournal_wCrashAfterBlockData", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", + &HFSTest_OpenJournal, CRASH_ABORT_JOURNAL_AFTER_BLOCK_DATA, HFSTest_CrashAbort, 0), + + ADD_TEST_WITH_CRASH_ABORT("HFSTest_OpenJournal_wCrashAfterJournalData", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", + &HFSTest_OpenJournal, CRASH_ABORT_JOURNAL_AFTER_JOURNAL_DATA, HFSTest_CrashAbort, 0), + + ADD_TEST_WITH_CRASH_ABORT("HFSTest_OpenJournal_wCrashAfterJournalHeader", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", + &HFSTest_OpenJournal, CRASH_ABORT_JOURNAL_AFTER_JOURNAL_HEADER, HFSTest_CrashAbort, 0), + + ADD_TEST_WITH_CRASH_ABORT("MultiThreadedRW_wJournal_RandomCrash", CREATE_SPARSE_VOLUME, + &MultiThreadedRW_wJournal_RandomCrash, CRASH_ABORT_RANDOM, HFSTest_CrashAbortAtRandom, 0), + + // The following 2 tests check journal replay, make sure the drive is mountable, and the created fonder DOES exist + ADD_TEST_WITH_CRASH_ABORT("HFSTest_MakeDirAndKeep_wCrashAfterJournalHeader", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", + &HFSTest_MakeDirAndKeep, CRASH_ABORT_JOURNAL_AFTER_JOURNAL_HEADER, HFSTest_CrashAbortOnMkDir, 0), + ADD_TEST( "HFSTest_ConfirmTestFolderExists", TEMP_DMG_BKUP, &HFSTest_ConfirmTestFolderExists ), + + // The following 2 tests check journal replay, make sure the drive is mountable, and the created fonder does NOT exist + ADD_TEST_WITH_CRASH_ABORT("HFSTest_MakeDirAndKeep_wCrashAfterJournalData", "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", + &HFSTest_MakeDirAndKeep, CRASH_ABORT_JOURNAL_AFTER_JOURNAL_DATA, HFSTest_CrashAbortOnMkDir, 0), + ADD_TEST( "HFSTest_ConfirmTestFolderDoesntExists", TEMP_DMG_BKUP, &HFSTest_ConfirmTestFolderDoesntExists ), + + // The following 2 tests check journal replay, make sure the drive is mountable, and the created fonder DOES exist + ADD_TEST_WITH_CRASH_ABORT("HFSTest_MakeDirAndKeep_wCrashAfterJournalHeader_Sparse", CREATE_SPARSE_VOLUME, + &HFSTest_MakeDirAndKeep, CRASH_ABORT_JOURNAL_AFTER_JOURNAL_HEADER, HFSTest_CrashAbortOnMkDir, 1), + ADD_TEST( "HFSTest_ConfirmTestFolderExists", TEMP_DMG_BKUP_SPARSE, &HFSTest_ConfirmTestFolderExists ), +#endif + +}; + +void *SyncerThread(void *pvArgs) { + + int iErr = 0; + TestData_S *psTestData = pvArgs; + + printf("Syncer Thread runs every %u mS\n", guSyncerPeriod); + + while(psTestData->bSyncerOn) { + usleep(guSyncerPeriod * 1000); + iErr = HFS_fsOps.fsops_sync(psTestData->psRootNode); + psTestData->uSyncerCount++; + if (iErr) { + printf("fsops_sync returned %d\n", iErr); + break; + } + } + + TesterThreadReturnStatus_S *psReturnStatus = malloc(sizeof(TesterThreadReturnStatus_S)); + assert(psReturnStatus); + memset(psReturnStatus, 0, sizeof(*psReturnStatus)); + + printf("Syncer returns %d\n", iErr); + + psReturnStatus->iErr = iErr; + + return((void*)psReturnStatus); +} + +static int KickOffSyncerThread(TestData_S *psTestData) { + + int iErr = 0; + + if (guSyncerPeriod == 0) { + goto exit; + } + psTestData->bSyncerOn = true; + + pthread_attr_t sAttr; + pthread_attr_init(&sAttr); + pthread_attr_setdetachstate(&sAttr, PTHREAD_CREATE_JOINABLE); + + iErr = pthread_create(&psTestData->sSyncerThread, &sAttr, SyncerThread, psTestData); + + pthread_attr_destroy(&sAttr); + +exit: + return(iErr); +} + +static int ShutdownSyncerThread(TestData_S *psTestData) { + + int iErr = 0; + TesterThreadReturnStatus_S *psReturnStatus = NULL; + + if (guSyncerPeriod == 0) { + goto exit; + } + + psTestData->bSyncerOn = false; + iErr = pthread_join(psTestData->sSyncerThread, (void*)&psReturnStatus); + if (iErr) { + printf("Error waiting for Syncer thread! %d\n", iErr); + goto exit; + } + + printf("Syncer Thead ran %u times (iErr %d)\n", psTestData->uSyncerCount, iErr); + + assert(psReturnStatus); + + iErr = psReturnStatus->iErr; + if (iErr) { + printf("Syncer thread returned iErr = %d\n", iErr); + goto exit; + } + +exit: + if (psReturnStatus) { + free(psReturnStatus); + } + + return(iErr); +} + +static int HFSTest_RunTest(TestData_S *psTestData) { + UVFSScanVolsRequest sScanVolsReq = {0}; + UVFSScanVolsReply sScanVolsReply = {0}; + int iErr = 0; + int iFD = HFSTest_PrepareEnv( psTestData ); + giFD = iFD; + + iErr = HFS_fsOps.fsops_taste( iFD ); + printf("Taste err [%d]\n",iErr); + if ( iErr ) { + close(iFD); + HFSTest_DestroyEnv( iFD ); + return(iErr); + } + + iErr = HFS_fsOps.fsops_scanvols( iFD, &sScanVolsReq, &sScanVolsReply ); + printf("ScanVols err [%d]\n", iErr); + if ( iErr ) + { + close(iFD); + HFSTest_DestroyEnv( iFD ); + return(iErr); + } + + UVFSFileNode RootNode = NULL; + iErr = HFS_fsOps.fsops_mount( iFD, sScanVolsReply.sr_volid, 0, NULL, &RootNode ); + printf("Mount err [%d]\n", iErr); + if ( iErr ) + { + close(iFD); + HFSTest_DestroyEnv( iFD ); + return(iErr); + } + + psTestData->psRootNode = RootNode; + iErr = KickOffSyncerThread(psTestData); + if ( iErr ) { + close(iFD); + HFSTest_DestroyEnv( iFD ); + return(iErr); + } + + // Execute the test + iErr = psTestData->pfTestHandler( RootNode ); + printf("Test [%s] finish with error code [%d]\n", psTestData->pcTestName, iErr); + #if HFS_CRASH_TEST + if (psTestData->eCrashID == CRASH_ABORT_NONE) + #endif + if ( iErr ) { + close(iFD); + HFSTest_DestroyEnv( iFD ); + return(iErr); + } + + iErr = ShutdownSyncerThread(psTestData); + if ( iErr ) { + close(iFD); + HFSTest_DestroyEnv( iFD ); + return(iErr); + } + + iErr = HFS_fsOps.fsops_unmount(RootNode, UVFSUnmountHintNone); + printf("UnMount err [%d]\n", iErr); + if ( iErr ) { + close(iFD); + HFSTest_DestroyEnv( iFD ); + return(iErr); + } + + HFSTest_PrintCacheStats(); + + #if HFS_CRASH_TEST + if (psTestData->eCrashID != CRASH_ABORT_NONE) { + + // Execute post crash analysis + iErr = psTestData->pAbortFunc(psTestData, + gsCrashReport.eCrashID, + iFD, + gsCrashReport.psNode, + gsCrashReport.pSyncerThread); + + printf("Analysis [%s] finished with error code [%d]\n", psTestData->pcTestName, iErr); + + if (iErr) { + HFSTest_DestroyEnv( iFD ); + return(iErr); + } + } else + #endif + { + // close file + close(iFD); + + //assert(gCacheStat.buf_cache_size == 0); + + // Run fsck + char pcFsckCmd[512] = {0}; + strcat( pcFsckCmd, "/System/Library/Filesystems/hfs.fs/Contents/Resources/fsck_hfs -fd /dev/disk"); + + strcat( pcFsckCmd, pcLastDevPathName ); + if (pcDevNum[0] != '\0') { + strcat( pcFsckCmd, "s" ); + strcat( pcFsckCmd, pcDevNum ); + } + printf("Execute %s\n", pcFsckCmd); + iErr = system( pcFsckCmd ); + if ( iErr ) + { + printf( "*** Fsck CMD failed! (%d)\n", iErr); + HFSTest_DestroyEnv( iFD ); + return(iErr); + } else { + printf( "*** Fsck CMD succeeded!\n"); + } + + HFSTest_DestroyEnv( iFD ); + } + return(iErr); +} + +#if HFS_CRASH_TEST +int HFSTest_CrashAbort_Hanlder(CrashAbort_E eAbort, int iFD, UVFSFileNode psNode, pthread_t pSyncerThread) { + printf("HFSTest_CrashAbort_Hanlder (%u):\n", guCrashAbortCnt); + if (guCrashAbortCnt) { + guCrashAbortCnt--; + return(0); + } + + close(iFD); // prevent additional writes to media + if (pSyncerThread == pthread_self()) { + printf("Crash Abort on Syncer Thread!\n"); + } + + gsCrashReport.uCrashCount++; + gsCrashReport.eCrashID = eAbort; + gsCrashReport.iFD = iFD; + gsCrashReport.psNode = psNode; + gsCrashReport.pSyncerThread = pSyncerThread; + + return(0); +} +#endif + + +int hfs_tester_run_fsck(void) +{ + // Journaled + int iErr = HFS_fsOps.fsops_init(); + printf("Init err [%d]\n",iErr); + if (iErr) + exit(-1); + + TestData_S sTestData = { + .pcTestName = "hfs_tester_run_fsck", + .pcDMGPath = "/Volumes/SSD_Shared/FS_DMGs/HFSJ-Empty.dmg", + }; + + int iFD = HFSTest_PrepareEnv(&sTestData); + + iErr = HFS_fsOps.fsops_taste( iFD ); + printf("Taste err [%d]\n",iErr); + if ( iErr ) { + close(iFD); + HFSTest_DestroyEnv( iFD ); + return(iErr); + } + + iErr = HFS_fsOps.fsops_check(iFD, 0, NULL, QUICK_CHECK); + printf("Check err [%d]\n",iErr); + if (iErr) + exit(-1); + + close(iFD); + HFSTest_DestroyEnv( iFD ); + + // Non-Journaled + iErr = HFS_fsOps.fsops_init(); + printf("Init err [%d]\n",iErr); + if (iErr) + exit(-1); + + sTestData.pcDMGPath = "/Volumes/SSD_Shared/FS_DMGs/HFSEmpty.dmg"; + + iFD = HFSTest_PrepareEnv(&sTestData); + + iErr = HFS_fsOps.fsops_taste( iFD ); + printf("Taste err [%d]\n",iErr); + if ( iErr ) { + close(iFD); + HFSTest_DestroyEnv( iFD ); + return(iErr); + } + + iErr = HFS_fsOps.fsops_check(iFD, 0, NULL, QUICK_CHECK); + printf("Check err [%d]\n",iErr); + if (iErr) + exit(-1); + + close(iFD); + HFSTest_DestroyEnv( iFD ); + return iErr; +} + +int hfs_tester_run(uint32_t uFirstTest, uint32_t uLastTest) +{ + uint32_t uTestRan = 0; + int iErr = HFS_fsOps.fsops_init(); + printf("Init err [%d]\n",iErr); + if (iErr) + exit(-1); + + uint32_t uAvailTests = sizeof(gsTestsData)/sizeof(gsTestsData[0]); + if ((!uLastTest) || + (uLastTest > uAvailTests)) { + + uLastTest = uAvailTests; + } + + for ( unsigned uTestNum=uFirstTest; uTestNum < uLastTest ; uTestNum++ ) + { + printf("******************************************************************************************\n"); + printf("**** about to run test [%s] [%u] \n", gsTestsData[uTestNum].pcTestName, uTestNum); + printf("******************************************************************************************\n"); + + #if HFS_CRASH_TEST + HFSTest_ClearCrashAbortFunctionArray(); + + if (gsTestsData[uTestNum].eCrashID) { + // Inject Crach condition + CrashAbort_E eCrashID = gsTestsData[uTestNum].eCrashID; + printf( "Adding Crash-Abort Function at (%u), %s.\n", eCrashID, ppcCrashAbortDesc[eCrashID] ); + guCrashAbortCnt = gsTestsData[uTestNum].uCrashAbortCnt; + gpsCrashAbortFunctionArray[eCrashID] = HFSTest_CrashAbort_Hanlder; + memset(&gsCrashReport, 0, sizeof(gsCrashReport)); + } + #endif + + iErr = HFSTest_RunTest(&gsTestsData[uTestNum]); + + if (iErr) { + exit(-1); + } + uTestRan++; + } + + HFS_fsOps.fsops_fini(); + + printf("*** Run %u out of %u tests successfully\n", uTestRan, uAvailTests); + + return 0; +} + +/*******************************************/ +/*******************************************/ +/*******************************************/ +// Predefined Tests END. +/*******************************************/ +/*******************************************/ +/*******************************************/ +int main( int argc, const char * argv[] ) { + uint32_t uFirstTest = 0; + uint32_t uLastTest = 0; + + time_t sTimeStamp = time(NULL); + char pcTimeStamp[256] = {0}; + strcpy(pcTimeStamp, ctime(&sTimeStamp)); + pcTimeStamp[strlen(pcTimeStamp)-1] = '\0'; // remove \n + sprintf(gpcResultsFolder, "\"/tmp/%s\"", pcTimeStamp); + printf("*** gpcResultsFolder is %s\n", gpcResultsFolder); + + + if ((argc < 2) || (argc > 5)) + { + printf("Usage : livefiles_hfs_tester < dev-path / RUN_HFS_TESTS > [First Test] [Last Test] [Syncer Period (mS)]\n"); + exit(1); + } + + printf( "livefiles_hfs_tester %s (%u)\n", argv[1], uFirstTest ); + + if (argc >= 3) { + sscanf(argv[2], "%u", &uFirstTest); + } + + if (argc >= 4) { + sscanf(argv[3], "%u", &uLastTest); + } + + if (argc >= 5) { + sscanf(argv[4], "%u", &guSyncerPeriod); + } + + if ( strncmp(argv[1], HFS_TEST_PREFIX, strlen(HFS_TEST_PREFIX)) == 0 ) + { + int err = hfs_tester_run(uFirstTest, uLastTest); + printf("*** hfs_tester_run return status : %d ***\n", err); + if (err >= 256) err = -1; // exit code overflow + exit(err); + + } else if ( strncmp(argv[1], HFS_RUN_FSCK, strlen(HFS_RUN_FSCK)) == 0 ) + { + int err = hfs_tester_run_fsck(); + printf("*** hfs_tester_run_fsck return status : %d ***\n", err); + if (err >= 256) err = -1; // exit code overflow + exit(err); + } + + UVFSScanVolsRequest sScanVolsReq = {0}; + UVFSScanVolsReply sScanVolsReply = {0}; + int err = 0; + int fd = open( argv[1], O_RDWR ); + int uCycleCounter = 0; + + UVFSFileNode RootNode = NULL; + if(fd < 0) + { + printf("Failed to open [%s] errno %d\n", argv[1], errno); + return EBADF; + } + + do + { + + err = HFS_fsOps.fsops_init(); + printf("Init err [%d]\n",err); + if (err) break; + + err = HFS_fsOps.fsops_taste(fd); + printf("Taste err [%d]\n",err); + if (err) break; + + err = HFS_fsOps.fsops_scanvols(fd, &sScanVolsReq, &sScanVolsReply); + printf("ScanVols err [%d]\n",err); + if (err) break; + + err = HFS_fsOps.fsops_mount(fd, sScanVolsReply.sr_volid, 0, NULL, &RootNode); + printf("Mount err [%d]\n",err); + if (err) break; + + ReadDirAttr(RootNode,NULL, 0); + + UVFSFileNode D1_Node = NULL; + err = CreateNewFolder(RootNode,&D1_Node,"D1"); + printf("CreateNewFolder err [%d]\n",err); + if (err) break; + + bool bFound = false; + read_directory_and_search_for_name( RootNode, "D1", &bFound, NULL, 0); + if (!bFound) + { + printf("Dir read failed, D1 wasn't found in Root"); + break; + } + + HFS_fsOps.fsops_reclaim(D1_Node); + + // Remove D1 + err = RemoveFolder(RootNode,"D1"); + printf("Remove Folder D1 from Root err [%d]\n",err); + if (err) break; + + uCycleCounter++; + }while(uCycleCounter < TEST_CYCLE_COUNT); + + err = HFS_fsOps.fsops_unmount(RootNode, UVFSUnmountHintNone); + printf("UnMount err [%d]\n",err); + + return err; +} + + diff --git a/livefiles_hfs_plugin/livefiles_hfs_tester.entitlements b/livefiles_hfs_plugin/livefiles_hfs_tester.entitlements new file mode 100644 index 0000000..4396b7d --- /dev/null +++ b/livefiles_hfs_plugin/livefiles_hfs_tester.entitlements @@ -0,0 +1,10 @@ + + + + + com.apple.rootless.restricted-block-devices + + com.apple.private.security.disk-device-access + + + diff --git a/livefiles_hfs_plugin/livefiles_hfs_tester.h b/livefiles_hfs_plugin/livefiles_hfs_tester.h new file mode 100644 index 0000000..98bab18 --- /dev/null +++ b/livefiles_hfs_plugin/livefiles_hfs_tester.h @@ -0,0 +1,14 @@ +/* Copyright © 2017 Apple Inc. All rights reserved. + * + * livefiles_hfs_tester.h + * hfs + * + * Created by Yakov Ben Zaken on 31/12/2017. +*/ + +#ifndef livefiles_hfs_tester_h +#define livefiles_hfs_tester_h + +#include + +#endif /* livefiles_hfs_tester_h */ diff --git a/livefiles_hfs_plugin/scripts/CreateRelease.py b/livefiles_hfs_plugin/scripts/CreateRelease.py new file mode 100755 index 0000000..67704c9 --- /dev/null +++ b/livefiles_hfs_plugin/scripts/CreateRelease.py @@ -0,0 +1,73 @@ +#!/usr/bin/python + +import os, sys, shutil + +# Temporarily change folder to another folder and then go back +class Temp_Chdir: + def __init__(self, newDir): + self.prevDir = os.getcwd() + self.newDir = newDir + + def __enter__(self): + os.chdir(self.newDir) + + def __exit__(self, type, value, traceback): + os.chdir(self.prevDir) + +# Print and execute something +def execute(cmd): + print('Command to execute: ' + cmd) + ret = os.system(cmd) + if (ret != 0): raise Exception('Shell command failed with error code {}'.format(ret)) + return ret + +if ( len(sys.argv) != 4 ): + raise Exception('Usage : python CreateRelease.py ') + + +PROJECT_ROOT_FOLDER = sys.argv[1] +REL = sys.argv[2] +CONF = sys.argv[3] +if CONF not in ['Debug', 'Release']: + raise Exception('Bad confuguration {}'.format(CONF)) + +print('Creating HFSPlugin Project [{}] Release [{}]...'.format(PROJECT_ROOT_FOLDER, REL)) +SVN_URL = 'http://iliha1-svn01.euro.apple.com/svn/Integration/S4E/production/Files/hfs_plugin' + +shutil.rmtree('/tmp/tmp_HFSPlugin', ignore_errors=True) + +with Temp_Chdir('/tmp'): + execute('svn co {} tmp_HFSPlugin --depth immediates'.format(SVN_URL)) + os.makedirs('tmp_HFSPlugin/{}'.format(REL)) + + +targetsInfo = { + 'iOS' : { + 'sdk' : 'iphoneos.internal', + 'buildDir' : 'build/{}-iphoneos/'.format(CONF) + }, + 'OSX' : { + 'sdk' : 'macosx.internal', + 'buildDir' : 'build/{}/'.format(CONF) + } +} + +with Temp_Chdir(PROJECT_ROOT_FOLDER): + + for target, info in targetsInfo.items(): + execute('mkdir -p /tmp/tmp_HFSPlugin/{}/{}'.format(REL, target)) + execute('xcodebuild -target livefiles_hfs -sdk {} -configuration {}'.format(info['sdk'], CONF)) + shutil.copy('{}/livefiles_hfs.dylib'.format(info['buildDir']), '/tmp/tmp_HFSPlugin/{}/{}/'.format(REL, target)) + + execute('echo xcodebuild configuration : {} > /tmp/tmp_HFSPlugin/{}/release_notes.txt'.format(CONF, REL)) + execute('echo Source branch : `git rev-parse --abbrev-ref HEAD` >> /tmp/tmp_HFSPlugin/{}/release_notes.txt'.format(REL)) + execute('echo Last commit Hash : `git rev-parse HEAD` >> /tmp/tmp_HFSPlugin/{}/release_notes.txt'.format(REL)) + execute('echo "\n\n In Order to checkout the source files execute :\n\tgit checkout `git rev-parse --abbrev-ref HEAD` \n\tgit checkout `git rev-parse HEAD`" >> /tmp/tmp_HFSPlugin/{}/release_notes.txt'.format(REL)) + +## Add to SVN: +with Temp_Chdir('/tmp/tmp_HFSPlugin'): + execute('svn add {}'.format(REL)) + execute('svn ci -m "Added release {}"'.format(REL)) + +print('All set! {} HFSPlugin release is ready'.format(REL)) + diff --git a/mount_hfs/mount_hfs.c b/mount_hfs/mount_hfs.c index afd8c3e..9e8fcff 100644 --- a/mount_hfs/mount_hfs.c +++ b/mount_hfs/mount_hfs.c @@ -54,9 +54,9 @@ /* Sensible wrappers over the byte-swapping routines */ #include "hfs_endian.h" -#if !TARGET_OS_EMBEDDED -#include "optical.h" -#endif +#if TARGET_OS_OSX +#include "optical.h" //only include optical headers on Macs +#endif //osx #include @@ -456,11 +456,13 @@ main(argc, argv) int do_rekey = 0; int tmp_mntflags = 0; -#if TARGET_OS_EMBEDDED + +#if TARGET_OS_IPHONE mntflags = MNT_NOATIME; -#else +#else // !TARGET_OS_IPHONE mntflags = 0; -#endif +#endif // TARGET_OS_IPHONE + encp = NULL; (void)memset(&args, '\0', sizeof(struct hfs_mount_args)); @@ -648,7 +650,7 @@ main(argc, argv) #endif -#if !TARGET_OS_EMBEDDED +#if TARGET_OS_OSX /* * We shouldn't really be calling up to other layers, but * an exception was made in this case to fix the situation diff --git a/mount_hfs/mount_hfs.osx.entitlements b/mount_hfs/mount_hfs.osx.entitlements index 0288bfa..fb35d7f 100644 --- a/mount_hfs/mount_hfs.osx.entitlements +++ b/mount_hfs/mount_hfs.osx.entitlements @@ -2,7 +2,7 @@ - com.apple.rootless.install + com.apple.rootless.restricted-block-devices diff --git a/newfs_hfs/makehfs.c b/newfs_hfs/makehfs.c index 9f7b5e0..724a43d 100644 --- a/newfs_hfs/makehfs.c +++ b/newfs_hfs/makehfs.c @@ -194,15 +194,15 @@ void SETOFFSET (void *buffer, UInt16 btNodeSize, SInt16 recOffset, SInt16 vecOff #define ROUNDUP(x, u) (((x) % (u) == 0) ? (x) : ((x)/(u) + 1) * (u)) -#if TARGET_OS_EMBEDDED +#if TARGET_OS_IPHONE #define ENCODING_TO_BIT(e) \ ((e) < 48 ? (e) : 0) -#else +#else // !TARGET_OS_IPHONE #define ENCODING_TO_BIT(e) \ ((e) < 48 ? (e) : \ ((e) == kCFStringEncodingMacUkrainian ? 48 : \ ((e) == kCFStringEncodingMacFarsi ? 49 : 0))) -#endif +#endif // TARGET_OS_IPHONE #ifdef DEBUG_BUILD @@ -1371,7 +1371,7 @@ WriteAttributesFile(const DriveInfo *driveInfo, UInt64 startingSector, WriteBuffer(driveInfo, startingSector, *bytesUsed, buffer); } -#if !TARGET_OS_EMBEDDED +#if !TARGET_OS_IPHONE static int get_dev_uuid(const char *disk_name, char *dev_uuid_str, int dev_uuid_len) { @@ -1416,7 +1416,7 @@ clear_journal_dev(const char *dev_name) close(fd); return 0; } -#endif /* !TARGET_OS_EMBEDDED */ +#endif /* !(TARGET_OS_IPHONE) */ static int @@ -1430,36 +1430,43 @@ WriteJournalInfo(const DriveInfo *driveInfo, UInt64 startingSector, memset(buffer, 0xdb, driveInfo->physSectorSize); memset(jibp, 0, sizeof(JournalInfoBlock)); -#if !TARGET_OS_EMBEDDED - if (dp->journalDevice) { - char uuid_str[64]; - - if (get_dev_uuid(dp->journalDevice, uuid_str, sizeof(uuid_str)) == 0) { - strlcpy((char *)&jibp->reserved[0], uuid_str, sizeof(jibp->reserved)); - - // we also need to blast out some zeros to the journal device - // in case it had a file system on it previously. that way - // it's "initialized" in the sense that the previous contents - // won't get mounted accidently. if this fails we'll bail out. - if (clear_journal_dev(dp->journalDevice) != 0) { - return -1; - } - } else { - printf("FAILED to get the device uuid for device %s\n", dp->journalDevice); - strlcpy((char *)&jibp->reserved[0], "NO-DEV-UUID", sizeof(jibp->reserved)); - return -1; - } - } else { -#endif + //initialize it to start in FS jibp->flags = kJIJournalInFSMask; -#if !TARGET_OS_EMBEDDED - } -#endif + +#if TARGET_OS_OSX + if (dp->journalDevice) { + //if not in FS, then un-set flags + jibp->flags = 0; + + char uuid_str[64]; + + if (get_dev_uuid(dp->journalDevice, uuid_str, sizeof(uuid_str)) == 0) { + strlcpy((char *)&jibp->reserved[0], uuid_str, sizeof(jibp->reserved)); + + // we also need to blast out some zeros to the journal device + // in case it had a file system on it previously. that way + // it's "initialized" in the sense that the previous contents + // won't get mounted accidently. if this fails we'll bail out. + if (clear_journal_dev(dp->journalDevice) != 0) { + return -1; + } + } else { + printf("FAILED to get the device uuid for device %s\n", dp->journalDevice); + strlcpy((char *)&jibp->reserved[0], "NO-DEV-UUID", sizeof(jibp->reserved)); + return -1; + } + } +#endif // TARGET_OS_OSX + jibp->flags |= kJIJournalNeedInitMask; - if (NEWFS_HFS_DEBUG && dp->journalBlock) + + if (NEWFS_HFS_DEBUG && dp->journalBlock) { journalBlock = dp->journalBlock; - else + } + else { journalBlock = header->journalInfoBlock + 1; + } + jibp->offset = ((UInt64) journalBlock) * header->blockSize; jibp->size = dp->journalSize; diff --git a/tests/cases/test-cas-bsdflags.c b/tests/cases/test-cas-bsdflags.c new file mode 100644 index 0000000..d9d0e93 --- /dev/null +++ b/tests/cases/test-cas-bsdflags.c @@ -0,0 +1,69 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "hfs-tests.h" +#include "test-utils.h" +#include "disk-image.h" + +//TEST(cas_bsdflags) + +static bool +cas_bsd_flags(int fd, uint32_t expected_flags, uint32_t new_flags) +{ + struct fsioc_cas_bsdflags cas; + + cas.expected_flags = expected_flags; + cas.new_flags = new_flags; + cas.actual_flags = ~0; /* poison */ + + assert_no_err(ffsctl(fd, FSIOC_CAS_BSDFLAGS, &cas, 0)); + return (cas.expected_flags == cas.actual_flags); +} + +int run_cas_bsdflags(__unused test_ctx_t *ctx) +{ + disk_image_t *di = disk_image_get(); + struct stat sb; + int fd; + + char *file; + asprintf(&file, "%s/cas_bsdflags.data", di->mount_point); + + assert_with_errno((fd = open(file, + O_CREAT | O_RDWR | O_TRUNC, 0666)) >= 0); + + assert_no_err(fchflags(fd, UF_HIDDEN)); + assert_no_err(fstat(fd, &sb)); + assert(sb.st_flags == UF_HIDDEN); + + assert(cas_bsd_flags(fd, 0, UF_NODUMP) == false); + assert_no_err(fstat(fd, &sb)); + assert(sb.st_flags == UF_HIDDEN); + + assert(cas_bsd_flags(fd, UF_HIDDEN, UF_NODUMP) == true); + assert_no_err(fstat(fd, &sb)); + assert(sb.st_flags == UF_NODUMP); + + assert(cas_bsd_flags(fd, UF_NODUMP, 0) == true); + assert_no_err(fstat(fd, &sb)); + assert(sb.st_flags == 0); + + close(fd); + assert_no_err(unlink(file)); + free(file); + + return 0; +} + diff --git a/tests/cases/test-chflags.c b/tests/cases/test-chflags.c index d2c73a1..3f48859 100644 --- a/tests/cases/test-chflags.c +++ b/tests/cases/test-chflags.c @@ -1,6 +1,6 @@ #include -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -88,4 +88,4 @@ int run_chflags(__unused test_ctx_t *ctx) return 0; } -#endif // TARGET_OS_EMBEDDED +#endif // (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) diff --git a/tests/cases/test-class-roll.c b/tests/cases/test-class-roll.c index df097ab..f909985 100644 --- a/tests/cases/test-class-roll.c +++ b/tests/cases/test-class-roll.c @@ -1,6 +1,6 @@ #include -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -10,6 +10,8 @@ #include #import #include +#include +#include #include "hfs-tests.h" #include "test-utils.h" @@ -223,4 +225,4 @@ int run_class_roll(__unused test_ctx_t *ctx) return 0; } -#endif // TARGET_OS_EMBEDDED +#endif // TARGET_OS_IPHONE & !SIM diff --git a/tests/cases/test-dir-link.c b/tests/cases/test-dir-link.c index 857707b..cdc58d2 100644 --- a/tests/cases/test-dir-link.c +++ b/tests/cases/test-dir-link.c @@ -1,6 +1,6 @@ #include -#if !TARGET_OS_EMBEDDED +#if !TARGET_OS_IPHONE #include #include @@ -99,4 +99,4 @@ int run_dir_link(__unused test_ctx_t *ctx) return 0; } -#endif // !TARGET_OS_EMBEDDED +#endif // !TARGET_OS_IPHONE diff --git a/tests/cases/test-dprotect.c b/tests/cases/test-dprotect.c index 75d2c28..12e191a 100644 --- a/tests/cases/test-dprotect.c +++ b/tests/cases/test-dprotect.c @@ -1,6 +1,6 @@ #include -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -66,4 +66,4 @@ int run_dprotect(__unused test_ctx_t *ctx) return 0; } -#endif +#endif // TARGET_OS_IPHONE & !SIM diff --git a/tests/cases/test-external-jnl.c b/tests/cases/test-external-jnl.c index f293ffb..6b86236 100644 --- a/tests/cases/test-external-jnl.c +++ b/tests/cases/test-external-jnl.c @@ -8,7 +8,7 @@ #include -#if !TARGET_OS_EMBEDDED +#if !TARGET_OS_IPHONE #include #include @@ -75,4 +75,4 @@ int run_external_jnl(__unused test_ctx_t *ctx) return 0; } -#endif +#endif // !TARGET_OS_IPHONE diff --git a/tests/cases/test-file-too-big.m b/tests/cases/test-file-too-big.m index 72af252..aa58142 100644 --- a/tests/cases/test-file-too-big.m +++ b/tests/cases/test-file-too-big.m @@ -1,6 +1,6 @@ #include -#if !TARGET_OS_EMBEDDED +#if !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -93,4 +93,4 @@ int run_file_too_big(__unused test_ctx_t *ctx) return 0; } -#endif // !TARGET_OS_EMBEDDED +#endif // !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) diff --git a/tests/cases/test-fsinfo-cprotect.c b/tests/cases/test-fsinfo-cprotect.c index 0730c94..0ac2afe 100644 --- a/tests/cases/test-fsinfo-cprotect.c +++ b/tests/cases/test-fsinfo-cprotect.c @@ -6,7 +6,7 @@ #include -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -101,4 +101,4 @@ int run_fsinfo_cprotect(__unused test_ctx_t *ctx) return 0; } -#endif // TARGET_OS_EMBEDDED +#endif // TARGET_OS_IPHONE & !SIM diff --git a/tests/cases/test-fsync.c b/tests/cases/test-fsync.c index b8e9440..ac41b27 100644 --- a/tests/cases/test-fsync.c +++ b/tests/cases/test-fsync.c @@ -34,7 +34,7 @@ int run_fsync(__unused test_ctx_t *ctx) disk_image_t *di = NULL; const char *tstdir; -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) struct statfs sfs; bool hfs_root; @@ -47,7 +47,7 @@ int run_fsync(__unused test_ctx_t *ctx) hfs_root = true; tstdir = "/tmp"; } -#else // !TARGET_OS_EMBEDDED +#else // !TARGET_OS_IPHONE & !SIM di = disk_image_get(); tstdir = di->mount_point; #endif diff --git a/tests/cases/test-get-volume-create-time.c b/tests/cases/test-get-volume-create-time.c new file mode 100644 index 0000000..9722c5c --- /dev/null +++ b/tests/cases/test-get-volume-create-time.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * fsctl HFSIOC_GET_VOL_CREATE_TIME does not return volume cteate time. + */ +#include + +#include "../../core/hfs_fsctl.h" +#include "hfs-tests.h" +#include "test-utils.h" +#include "disk-image.h" + +/* + * Just as a good measure we add this check so that compilation does + * not break when compiled against older hfs_fsctl.h which did not + * include HFSIOC_GET_VOL_CREATE_TIME. + */ +#if !defined(HFSIOC_GET_VOL_CREATE_TIME) +#define HFSIOC_GET_VOL_CREATE_TIME _IOR('h', 4, time_t) +#endif + +TEST(get_volume_create_time) + +int run_get_volume_create_time(__unused test_ctx_t *ctx) +{ + disk_image_t *di; + time_t vol_create_time; + + di = disk_image_get(); + /* + * Volume create date is stored inside volume header in localtime. The + * date is stored as 32-bit integer containing the number of seconds + * since midnight, January 1, 1904. We can safely assume that create + * date set for the volume will not be epoch. + */ + vol_create_time = 0; + assert_no_err(fsctl(di->mount_point, HFSIOC_GET_VOL_CREATE_TIME, + &vol_create_time, 0)); + if (!vol_create_time) + assert_fail("fcntl HFSIOC_GET_VOL_CREATE_TIME failed to set " + "volume create time."); + return 0; +} diff --git a/tests/cases/test-getattrlist-dprotect.m b/tests/cases/test-getattrlist-dprotect.m index 8ee93b3..f24b3bf 100644 --- a/tests/cases/test-getattrlist-dprotect.m +++ b/tests/cases/test-getattrlist-dprotect.m @@ -12,7 +12,7 @@ #include -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -209,4 +209,4 @@ int run_getattrlist_dprotect(__unused test_ctx_t *ctx) return 0; } -#endif // TARGET_OS_EMBEDDED +#endif // (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) diff --git a/tests/cases/test-getattrlist.c b/tests/cases/test-getattrlist.c index ae879d4..e7fd9d4 100644 --- a/tests/cases/test-getattrlist.c +++ b/tests/cases/test-getattrlist.c @@ -102,7 +102,7 @@ int run_getattrlist(__unused test_ctx_t *ctx) uint32_t expected = al.commonattr ^ ATTR_CMN_EXTENDED_SECURITY; -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) if (hfs_root == true) { assert(attrs.protection_class == 3); } diff --git a/tests/cases/test-hard-links.m b/tests/cases/test-hard-links.m index 5702561..d2a46a2 100644 --- a/tests/cases/test-hard-links.m +++ b/tests/cases/test-hard-links.m @@ -112,7 +112,7 @@ static void run(void) assert_no_err(unlink(file2)); -#if !TARGET_OS_EMBEDDED +#if !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) // Same as the last sequence but with directory hard links assert_no_err(mkdir(dir1, 0777)); @@ -224,7 +224,7 @@ static void run(void) assert(!systemx("/sbin/fsck_hfs", SYSTEMX_QUIET, "-ld", mut_vol_device, NULL)); -#endif +#endif //!(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) return; } diff --git a/tests/cases/test-invalid-ranges.m b/tests/cases/test-invalid-ranges.m index 8e85f5b..7a8b44e 100644 --- a/tests/cases/test-invalid-ranges.m +++ b/tests/cases/test-invalid-ranges.m @@ -151,7 +151,7 @@ int run_invalid_ranges(__unused test_ctx_t *ctx) assert_no_err(munmap(p, 1024 * ps * 2)); -#if !TARGET_OS_EMBEDDED +#if !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) disk_image_t *di2 = disk_image_create(DISK_IMAGE, &(disk_image_opts_t){ .size = 100 * 1024 * 1024 }); @@ -331,7 +331,7 @@ int run_invalid_ranges(__unused test_ctx_t *ctx) assert_no_err(close(fd)); -#endif +#endif //!(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) // Test for fd = open(file, O_CREAT | O_RDWR, 0666); diff --git a/tests/cases/test-journal-toggle.c b/tests/cases/test-journal-toggle.c index d1e713e..8359415 100755 --- a/tests/cases/test-journal-toggle.c +++ b/tests/cases/test-journal-toggle.c @@ -1,6 +1,6 @@ #include -#if !TARGET_OS_EMBEDDED +#if !TARGET_OS_IPHONE #include #include @@ -62,4 +62,4 @@ int run_journal_toggle(__unused test_ctx_t *ctx) return 0; } -#endif // !TARGET_OS_EMBEDDED \ No newline at end of file +#endif // !TARGET_OS_IPHONE diff --git a/tests/cases/test-key-roll.c b/tests/cases/test-key-roll.c index a8b517a..74dedda 100644 --- a/tests/cases/test-key-roll.c +++ b/tests/cases/test-key-roll.c @@ -1,6 +1,6 @@ #include -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -1307,4 +1307,4 @@ int run_key_roll(__unused test_ctx_t *ctx) return 0; } -#endif // TARGET_OS_EMBEDDED +#endif // TARGET_OS_IPHONE & !SIM diff --git a/tests/cases/test-list-ids.c b/tests/cases/test-list-ids.c index 47c8090..a7166c8 100644 --- a/tests/cases/test-list-ids.c +++ b/tests/cases/test-list-ids.c @@ -1,6 +1,6 @@ #include -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include "hfs-tests.h" #include "test-utils.h" @@ -180,4 +182,4 @@ exit: return 0; } -#endif // TARGET_OS_EMBEDDED +#endif // (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) diff --git a/tests/cases/test-map-private.m b/tests/cases/test-map-private.m index bd70baf..c1e19e2 100644 --- a/tests/cases/test-map-private.m +++ b/tests/cases/test-map-private.m @@ -1,6 +1,6 @@ #include -#if !TARGET_OS_EMBEDDED +#if !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -158,4 +158,4 @@ int run_map_private(__unused test_ctx_t *ctx) return 0; } -#endif +#endif // !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) diff --git a/tests/cases/test-move-data-extents.c b/tests/cases/test-move-data-extents.c index 8e4750f..25bbfbb 100644 --- a/tests/cases/test-move-data-extents.c +++ b/tests/cases/test-move-data-extents.c @@ -1,6 +1,6 @@ #include -#if !TARGET_OS_EMBEDDED +#if !TARGET_OS_IPHONE #include #include @@ -206,4 +206,4 @@ int run_move_data_extents(__unused test_ctx_t *ctx) return 0; } -#endif +#endif // !TARGET_OS_IPHONE diff --git a/tests/cases/test-quotas.c b/tests/cases/test-quotas.c index 8f9d776..c6fa71f 100644 --- a/tests/cases/test-quotas.c +++ b/tests/cases/test-quotas.c @@ -8,6 +8,8 @@ #include +#if !TARGET_OS_IPHONE + #include #include #include @@ -39,7 +41,7 @@ int run_quotas(__unused test_ctx_t *ctx) { disk_image_t *di = disk_image_create(QUOTA_DMG, &(disk_image_opts_t) { -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) .mount_point = QUOTA_DMG_MOUNT_POINT, #endif .size = 64 * 1024 * 1024, @@ -112,3 +114,4 @@ int run_quotas(__unused test_ctx_t *ctx) return 0; } +#endif // !TARGET_OS_EMBEDDED diff --git a/tests/cases/test-resize.m b/tests/cases/test-resize.m index a9d085b..1ae22ce 100644 --- a/tests/cases/test-resize.m +++ b/tests/cases/test-resize.m @@ -1,6 +1,6 @@ #include -#if !TARGET_OS_EMBEDDED +#if !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -76,4 +76,4 @@ int run_resize(__unused test_ctx_t *ctx) return 0; } -#endif // !TARGET_OS_EMBEDDED +#endif // !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) diff --git a/tests/cases/test-scan-range-size.c b/tests/cases/test-scan-range-size.c index 10c5391..7ea56b7 100644 --- a/tests/cases/test-scan-range-size.c +++ b/tests/cases/test-scan-range-size.c @@ -6,7 +6,7 @@ #include -#if !TARGET_OS_EMBEDDED +#if !TARGET_OS_IPHONE #include #include @@ -63,4 +63,4 @@ int run_scan_range_size(__unused test_ctx_t *ctx) { return 0; } -#endif // !TARGET_OS_EMBEDDED +#endif // !TARGET_OS_IPHONE diff --git a/tests/cases/test-secluded-rename.c b/tests/cases/test-secluded-rename.c index 9c2715a..4d0be69 100644 --- a/tests/cases/test-secluded-rename.c +++ b/tests/cases/test-secluded-rename.c @@ -1,6 +1,6 @@ #include -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include #include @@ -63,4 +63,4 @@ int run_secluded_rename(__unused test_ctx_t *ctx) return 0; } -#endif +#endif // (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) diff --git a/tests/cases/test-set-protection-class.c b/tests/cases/test-set-protection-class.c index 8898493..55751f3 100644 --- a/tests/cases/test-set-protection-class.c +++ b/tests/cases/test-set-protection-class.c @@ -27,7 +27,7 @@ int run_set_protection_class(__unused test_ctx_t *ctx) { const char *tstdir; -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) // The root file system needs to be HFS struct statfs sfs; @@ -38,7 +38,7 @@ int run_set_protection_class(__unused test_ctx_t *ctx) } tstdir = "/tmp"; -#else // !TARGET_OS_EMBEDDED +#else // !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) disk_image_t *di = disk_image_get(); tstdir = di->mount_point; #endif @@ -139,7 +139,7 @@ int run_set_protection_class(__unused test_ctx_t *ctx) assert_with_errno((fd = open(path, O_RDWR)) >= 0); -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) assert_no_err(fcntl(fd, F_SETPROTECTIONCLASS, 2)); #endif diff --git a/tests/cases/test-sparse-dev.c b/tests/cases/test-sparse-dev.c index c33a53f..0d2fc99 100644 --- a/tests/cases/test-sparse-dev.c +++ b/tests/cases/test-sparse-dev.c @@ -8,7 +8,7 @@ #include -#if !TARGET_OS_EMBEDDED +#if !TARGET_OS_IPHONE #include #include @@ -55,4 +55,4 @@ int run_sparse_dev(__unused test_ctx_t *ctx) return 0; } -#endif // !TARGET_OS_EMBEDDED +#endif // !TARGET_OS_IPHONE diff --git a/tests/cases/test-throttled-io.c b/tests/cases/test-throttled-io.c index 2bf0c14..f76cdc5 100644 --- a/tests/cases/test-throttled-io.c +++ b/tests/cases/test-throttled-io.c @@ -1,3 +1,7 @@ +#include + +#if !TARGET_OS_BRIDGE + #include #include #include @@ -13,55 +17,204 @@ #include #include #include +#include #include "hfs-tests.h" #include "test-utils.h" #include "disk-image.h" +#define TEST_PATH "/tmp/throttled-io.sparseimage" +#define MOUNT_POINT "/tmp/throttled_io" + TEST(throttled_io) static disk_image_t *gDI; static char *gFile1, *gFile2, *gFile3; -static pid_t gPID = 0; static void *gBuf; static const size_t gBuf_size = 64 * 1024 * 1024; -static void start_background_io(void) +static pid_t bg_io_pid = 0; +static size_t BG_IOSIZE = (4U << 10); // BG-IO buffer size 4KiB +static off_t BG_MAX_FILESIZE = (1ULL << 30); // Max BG-IO File-size 1GiB + +// +// A worker function called from the background-io child process. First it +// attempts to open file at path `gFile1` ( a new file is created if one does +// does not exist). If the file is opened successfully, the file is written +// continiously, wrapping around when write offset is greater or equal to +// `BG_MAX_FILESIZE`. +// +errno_t background_io_worker(void) { - char *of; - asprintf(&of, "of=%s", gFile1); - - assert_no_err(posix_spawn(&gPID, "/bin/dd", NULL, NULL, - (char *[]){ "/bin/dd", "if=/dev/random", - of, NULL }, - NULL)); + int fd; + off_t offset; + char *buffer; + + // + // Open the file at path `gFile1`, create a new file if one does not + // exists. + // + fd = open(gFile1, O_RDWR|O_TRUNC); + if (fd == -1 && errno == ENOENT) { + + fd = creat(gFile1, 0666); + if (fd == -1) { + fprintf(stderr, "Failed to create file: %s\n", + gFile1); + return errno; + } + close(fd); + fd = open(gFile1, O_RDWR); + } + + // + // Return errno if we could not open the file. + // + if (fd == -1) { + fprintf(stderr, "Failed to open file: %s\n", gFile1); + return errno; + } + + // + // Allocate the write buffer on-stack such that we don't have to free + // it explicitly. + // + buffer = alloca(BG_IOSIZE); + if (!buffer) + return ENOMEM; + (void)memset(buffer, -1, BG_IOSIZE); + + offset = 0; + while (true) { + ssize_t written; + + written = pwrite(fd, buffer, BG_IOSIZE, offset); + if (written == -1) { + return errno; + } + + offset += written; + if (offset >= BG_MAX_FILESIZE) { + offset = 0; + } + + // + // Voluntarily relinquish cpu to allow the throttled process to + // schedule after every 128 MiB of write, else the test can + // take very long time and timeout. Sleep half a second after + // we have written 128 MiB. + // + if (!(offset % (off_t)(BG_IOSIZE * 1024 * 32))) { + usleep(500000); + } + + // + // Just in case the test times-out for some reason and parent + // terminates without killing this background-io process, let + // us poll this child process's parent. If the parent process + // has died then to ensure cleanup we return from this child + // process. + // + if (getppid() == 1) { + return ETIMEDOUT; + } + } + + // + // Should not come here. + // + return 0; } -static void end_background_io(void) +// +// Start a continious background-io process, if successful the pid of the +// background-io is cached in `bg_io_pid'. +// +static void start_background_io_process(void) { - if ( gPID != 0 ) - { - kill(gPID, SIGKILL); - int stat; - wait(&stat); - gPID = 0; + pid_t child_pid; + + child_pid = fork(); + switch(child_pid) { + + case -1: + assert_fail("Failed to spawn background-io " + "process, error %d, %s.\n", errno, + strerror(errno)); + case 0: { + int child_ret; + + child_ret = background_io_worker(); + _exit(child_ret); + } + default: + bg_io_pid = child_pid; } } -static int run_test1(void) +// +// Kill the background-io process if it was started. The background-io process +// should perform IO continuously and should not exit normally. If the +// background-io exited normally, the error is reported. +// +static void kill_background_io_process(void) { + int child_status; + pid_t child_pid; - // Kick off another process to ensure we get throttled - start_background_io(); + if (!bg_io_pid) + return; - assert_no_err(setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, - IOPOL_THROTTLE)); + kill(bg_io_pid, SIGKILL); + do { + child_pid = waitpid(bg_io_pid, &child_status, WUNTRACED); + } while (child_pid == -1 && errno == EINTR); + + if (child_pid == -1 && errno != ECHILD) { + assert_fail("Failed to wait for child pid: %ld, error %d, " + "%s.\n", (long)bg_io_pid, errno, + strerror(errno)); + } + + if (WIFEXITED(child_status)) { + int error; + + error = WEXITSTATUS(child_status); + if (error) { + assert_fail("background-io exited with error %d, " + "%s.\n", error, strerror(error)); + } + } + + bg_io_pid = 0; +} +static int run_test1(void) +{ int fd, fd2; + int orig_io_policy; + + // + // Kick off another process to ensure we get throttled. + // + start_background_io_process(); + + // + // Cache the set IO policy of this process. + // + orig_io_policy = getiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS); + assert_with_errno(orig_io_policy != -1); + + // + // Set new IO policy for this process. + // + assert_no_err(setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, + IOPOL_THROTTLE)); + assert_with_errno((fd = open("/dev/random", O_RDONLY)) >= 0); - assert_with_errno((fd2 = open(gFile2, - O_RDWR | O_CREAT | O_TRUNC, 0666)) >= 0); + assert_with_errno((fd2 = open(gFile2, O_RDWR | O_CREAT | O_TRUNC, + 0666)) >= 0); assert_no_err(fcntl(fd2, F_SINGLE_WRITER, 1)); assert_no_err(fcntl(fd2, F_NOCACHE, 1)); @@ -95,9 +248,17 @@ static int run_test1(void) assert_no_err (close(fd)); assert_no_err (close(fd2)); - - end_background_io(); + // + // Kill the background IO process. + // + kill_background_io_process(); + + // + // Restore the orig. IO policy. + // + assert_no_err(setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, + orig_io_policy)); return 0; } @@ -139,9 +300,27 @@ static void test2_thread(void) static int run_test2(void) { - start_background_io(); - - int fd = open(gFile3, O_RDWR | O_CREAT | O_TRUNC, 0666); + int fd; + int orig_io_policy; + + // + // Kick off another process to ensure we get throttled. + // + start_background_io_process(); + + // + // Cache the set IO policy of this process. + // + orig_io_policy = getiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS); + assert_with_errno(orig_io_policy != -1); + + // + // Set new IO policy for this process. + // + assert_no_err(setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, + IOPOL_THROTTLE)); + + fd = open(gFile3, O_RDWR | O_CREAT | O_TRUNC, 0666); assert(fd >= 0); assert_no_err(fcntl(fd, F_SINGLE_WRITER, 1)); @@ -170,15 +349,24 @@ static int run_test2(void) pthread_join(thread, NULL); assert_no_err (close(fd)); - - end_background_io(); + + // + // Kill the background IO process. + // + kill_background_io_process(); + + // + // Restore the orig. IO policy. + // + assert_no_err(setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, + orig_io_policy)); return 0; } static bool clean_up(void) { - end_background_io(); + kill_background_io_process(); unlink(gFile1); unlink(gFile2); @@ -193,8 +381,11 @@ static bool clean_up(void) int run_throttled_io(__unused test_ctx_t *ctx) { - - gDI = disk_image_get(); + + gDI = disk_image_create(TEST_PATH, &(disk_image_opts_t){ + .size = 8 GB, + .mount_point = MOUNT_POINT + }); asprintf(&gFile1, "%s/throttled_io.1", gDI->mount_point); asprintf(&gFile2, "%s/throttled_io.2", gDI->mount_point); @@ -214,3 +405,5 @@ int run_throttled_io(__unused test_ctx_t *ctx) return res; } + +#endif // !TARGET_OS_BRIDGE diff --git a/tests/cases/test-transcode.m b/tests/cases/test-transcode.m index 70ca87a..4d38860 100644 --- a/tests/cases/test-transcode.m +++ b/tests/cases/test-transcode.m @@ -8,7 +8,7 @@ #import -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #import #import diff --git a/tests/disk-image.m b/tests/disk-image.m index 4940be4..7a37064 100644 --- a/tests/disk-image.m +++ b/tests/disk-image.m @@ -23,7 +23,7 @@ #include "test-utils.h" #include "systemx.h" -#if TARGET_OS_EMBEDDED +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #include "dmg.dat" @@ -179,6 +179,9 @@ disk_image_t *disk_image_create(const char *path, disk_image_opts_t *opts) free(line); fclose(fp); + // Ensure we have a mount point + assert(opts->mount_point); + // Mount it char *mkdir_args[4] = { "mkdir", "-p", (char *)opts->mount_point, NULL }; assert_no_err(posix_spawn(&pid, "/bin/mkdir", NULL, NULL, mkdir_args, NULL)); @@ -243,7 +246,7 @@ disk_image_t *disk_image_get(void) return di; } -#else // !TARGET_OS_EMBEDDED +#else // !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) bool disk_image_cleanup(disk_image_t *di) { @@ -492,4 +495,4 @@ disk_image_t *disk_image_get(void) return di; } -#endif // TARGET_OS_EMBEDDED +#endif // (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) diff --git a/tests/gen-test-plist.sh b/tests/gen-test-plist.sh index 054ec9a..b65f3a7 100755 --- a/tests/gen-test-plist.sh +++ b/tests/gen-test-plist.sh @@ -28,6 +28,17 @@ EOF set -e set -o pipefail +# The following change is taken directly from the XCBuild changes made for APFS in +# the commit cf61eef74b8 +if [ "$CURRENT_ARCH" = undefined_arch ]; then + # Xcode's New Build System, XCBuild, doesn't define CURRENT_ARCH anymore for script + # targets. It does define ARCHS though, which is the complete list of architectures + # being built for the platform. Since we don't really expect to have a different list + # of tests for different architectures of the same platform, it should be safe to just + # use the first one on the list for purposes of this script + CURRENT_ARCH=${ARCHS%% *} +fi + # Look for any files containing the TEST macro. Then # push those through the preprocessor (which should # filter out any that aren't applicable to the targeted -- 2.45.2