From 51e135ce018217de5c809f4ac236ef6b6d87ef97 Mon Sep 17 00:00:00 2001 From: Apple <opensource@apple.com> Date: Tue, 29 Oct 2013 00:03:35 +0000 Subject: [PATCH] hfs-226.1.1.tar.gz --- CopyHFSMeta/DeviceWrapper.c | 19 +- CopyHFSMeta/SparseBundle.c | 113 +- CopyHFSMeta/main.c | 2 +- .../English.lproj}/InfoPlist.strings | 0 {hfs_util => fs}/Info.plist | 67 +- fsck_hfs/cache.c | 1519 ++++ fsck_hfs/cache.h | 257 + fsck_hfs/dfalib/BTree.c | 1806 +++++ fsck_hfs/dfalib/BTree.h | 412 ++ fsck_hfs/dfalib/BTreeAllocate.c | 543 ++ fsck_hfs/dfalib/BTreeMiscOps.c | 581 ++ fsck_hfs/dfalib/BTreeNodeOps.c | 1034 +++ fsck_hfs/dfalib/BTreePrivate.h | 430 ++ fsck_hfs/dfalib/BTreeScanner.c | 385 + fsck_hfs/dfalib/BTreeScanner.h | 75 + fsck_hfs/dfalib/BTreeTreeOps.c | 1844 +++++ fsck_hfs/dfalib/BlockCache.c | 410 ++ fsck_hfs/dfalib/CaseFolding.h | 478 ++ fsck_hfs/dfalib/CatalogCheck.c | 2118 ++++++ fsck_hfs/dfalib/CheckHFS.h | 79 + fsck_hfs/dfalib/DecompData.h | 263 + fsck_hfs/dfalib/DecompDataEnums.h | 71 + fsck_hfs/dfalib/DecompMakeData.c | 586 ++ fsck_hfs/dfalib/FixDecompsNotes.txt | 190 + fsck_hfs/dfalib/HardLinkCheck.c | 1322 ++++ fsck_hfs/dfalib/SAllocate.c | 1561 ++++ fsck_hfs/dfalib/SBTree.c | 621 ++ fsck_hfs/dfalib/SCatalog.c | 268 + fsck_hfs/dfalib/SControl.c | 1585 ++++ fsck_hfs/dfalib/SDevice.c | 287 + fsck_hfs/dfalib/SExtents.c | 1835 +++++ fsck_hfs/dfalib/SKeyCompare.c | 515 ++ fsck_hfs/dfalib/SRebuildBTree.c | 1226 +++ fsck_hfs/dfalib/SRepair.c | 6541 +++++++++++++++++ fsck_hfs/dfalib/SRuntime.h | 420 ++ fsck_hfs/dfalib/SStubs.c | 203 + fsck_hfs/dfalib/SUtils.c | 2735 +++++++ fsck_hfs/dfalib/SVerify1.c | 4514 ++++++++++++ fsck_hfs/dfalib/SVerify2.c | 1811 +++++ fsck_hfs/dfalib/Scavenger.h | 1451 ++++ fsck_hfs/dfalib/VolumeBitmapCheck.c | 1490 ++++ fsck_hfs/dfalib/dirhardlink.c | 1537 ++++ fsck_hfs/dfalib/fsck_journal.c | 582 ++ fsck_hfs/dfalib/fsck_journal.h | 108 + fsck_hfs/dfalib/hfs_endian.c | 1119 +++ fsck_hfs/dfalib/hfs_endian.h | 97 + fsck_hfs/dfalib/uuid.c | 65 + fsck_hfs/docs/fsck_gui_interface_design.rtf | 881 +++ fsck_hfs/fsck_debug.c | 106 + fsck_hfs/fsck_debug.h | 78 + fsck_hfs/fsck_hfs.8 | 250 + fsck_hfs/fsck_hfs.c | 1041 +++ fsck_hfs/fsck_hfs.h | 73 + fsck_hfs/fsck_hfs_msgnums.h | 196 + fsck_hfs/fsck_hfs_strings.c | 213 + fsck_hfs/fsck_keys.h | 75 + fsck_hfs/fsck_messages.c | 1162 +++ fsck_hfs/fsck_messages.h | 165 + fsck_hfs/fsck_msgnums.h | 70 + fsck_hfs/fsck_strings.c | 77 + fsck_hfs/utilities.c | 1024 +++ fstyp_hfs/fstyp_hfs.8 | 40 + fstyp_hfs/fstyp_hfs.c | 203 + hfs.xcconfig | 17 + hfs.xcodeproj/project.pbxproj | 1112 ++- .../contents.xcworkspacedata | 7 + .../xcshareddata/WorkspaceSettings.xcsettings | 8 + .../xcschemes/All_MacOSX.xcscheme | 62 + .../xcshareddata/xcschemes/All_iOS.xcscheme | 62 + .../xcschemes/CopyHFSMeta.xcscheme | 89 + .../xcshareddata/xcschemes/fsck_hfs.xcscheme | 89 + .../xcschemes/fsck_makestrings.xcscheme | 89 + .../xcshareddata/xcschemes/hfs.fs.xcscheme | 62 + .../xcshareddata/xcschemes/hfs_util.xcscheme | 89 + .../xcshareddata/xcschemes/mount_hfs.xcscheme | 89 + .../xcshareddata/xcschemes/newfs_hfs.xcscheme | 89 + .../xcschemes/newfs_hfs_debug.xcscheme | 89 + hfs_util/PkgInfo | 1 - hfs_util/hfs.util.8 | 1 - hfs_util/hfsutil_jnl.c | 501 +- hfs_util/version.plist | 10 - mount_hfs/hfs_endian.h | 57 + mount_hfs/mount_hfs.8 | 106 + mount_hfs/mount_hfs.c | 865 +++ mount_hfs/optical.c | 146 + mount_hfs/optical.h | 28 + newfs_hfs/hfs_endian.c | 172 + newfs_hfs/hfs_endian.h | 78 + newfs_hfs/makehfs.c | 2151 ++++++ newfs_hfs/newfs_hfs.8 | 177 + newfs_hfs/newfs_hfs.c | 1458 ++++ newfs_hfs/newfs_hfs.h | 259 + 92 files changed, 58392 insertions(+), 400 deletions(-) rename {hfs_util => fs/English.lproj}/InfoPlist.strings (100%) rename {hfs_util => fs}/Info.plist (81%) create mode 100644 fsck_hfs/cache.c create mode 100644 fsck_hfs/cache.h create mode 100644 fsck_hfs/dfalib/BTree.c create mode 100644 fsck_hfs/dfalib/BTree.h create mode 100644 fsck_hfs/dfalib/BTreeAllocate.c create mode 100644 fsck_hfs/dfalib/BTreeMiscOps.c create mode 100644 fsck_hfs/dfalib/BTreeNodeOps.c create mode 100644 fsck_hfs/dfalib/BTreePrivate.h create mode 100755 fsck_hfs/dfalib/BTreeScanner.c create mode 100755 fsck_hfs/dfalib/BTreeScanner.h create mode 100644 fsck_hfs/dfalib/BTreeTreeOps.c create mode 100644 fsck_hfs/dfalib/BlockCache.c create mode 100644 fsck_hfs/dfalib/CaseFolding.h create mode 100644 fsck_hfs/dfalib/CatalogCheck.c create mode 100644 fsck_hfs/dfalib/CheckHFS.h create mode 100644 fsck_hfs/dfalib/DecompData.h create mode 100644 fsck_hfs/dfalib/DecompDataEnums.h create mode 100644 fsck_hfs/dfalib/DecompMakeData.c create mode 100644 fsck_hfs/dfalib/FixDecompsNotes.txt create mode 100755 fsck_hfs/dfalib/HardLinkCheck.c create mode 100644 fsck_hfs/dfalib/SAllocate.c create mode 100644 fsck_hfs/dfalib/SBTree.c create mode 100644 fsck_hfs/dfalib/SCatalog.c create mode 100644 fsck_hfs/dfalib/SControl.c create mode 100644 fsck_hfs/dfalib/SDevice.c create mode 100644 fsck_hfs/dfalib/SExtents.c create mode 100644 fsck_hfs/dfalib/SKeyCompare.c create mode 100755 fsck_hfs/dfalib/SRebuildBTree.c create mode 100644 fsck_hfs/dfalib/SRepair.c create mode 100644 fsck_hfs/dfalib/SRuntime.h create mode 100644 fsck_hfs/dfalib/SStubs.c create mode 100644 fsck_hfs/dfalib/SUtils.c create mode 100644 fsck_hfs/dfalib/SVerify1.c create mode 100644 fsck_hfs/dfalib/SVerify2.c create mode 100644 fsck_hfs/dfalib/Scavenger.h create mode 100644 fsck_hfs/dfalib/VolumeBitmapCheck.c create mode 100644 fsck_hfs/dfalib/dirhardlink.c create mode 100644 fsck_hfs/dfalib/fsck_journal.c create mode 100644 fsck_hfs/dfalib/fsck_journal.h create mode 100755 fsck_hfs/dfalib/hfs_endian.c create mode 100755 fsck_hfs/dfalib/hfs_endian.h create mode 100644 fsck_hfs/dfalib/uuid.c create mode 100644 fsck_hfs/docs/fsck_gui_interface_design.rtf create mode 100644 fsck_hfs/fsck_debug.c create mode 100644 fsck_hfs/fsck_debug.h create mode 100644 fsck_hfs/fsck_hfs.8 create mode 100644 fsck_hfs/fsck_hfs.c create mode 100644 fsck_hfs/fsck_hfs.h create mode 100644 fsck_hfs/fsck_hfs_msgnums.h create mode 100644 fsck_hfs/fsck_hfs_strings.c create mode 100644 fsck_hfs/fsck_keys.h create mode 100644 fsck_hfs/fsck_messages.c create mode 100644 fsck_hfs/fsck_messages.h create mode 100644 fsck_hfs/fsck_msgnums.h create mode 100644 fsck_hfs/fsck_strings.c create mode 100644 fsck_hfs/utilities.c create mode 100644 fstyp_hfs/fstyp_hfs.8 create mode 100644 fstyp_hfs/fstyp_hfs.c create mode 100644 hfs.xcconfig create mode 100644 hfs.xcodeproj/project.xcworkspace/contents.xcworkspacedata create mode 100644 hfs.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/All_MacOSX.xcscheme create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/All_iOS.xcscheme create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/CopyHFSMeta.xcscheme create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/fsck_hfs.xcscheme create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/fsck_makestrings.xcscheme create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/hfs.fs.xcscheme create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/hfs_util.xcscheme create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/mount_hfs.xcscheme create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/newfs_hfs.xcscheme create mode 100644 hfs.xcodeproj/xcshareddata/xcschemes/newfs_hfs_debug.xcscheme delete mode 100644 hfs_util/PkgInfo delete mode 100644 hfs_util/version.plist create mode 100644 mount_hfs/hfs_endian.h create mode 100644 mount_hfs/mount_hfs.8 create mode 100644 mount_hfs/mount_hfs.c create mode 100644 mount_hfs/optical.c create mode 100644 mount_hfs/optical.h create mode 100644 newfs_hfs/hfs_endian.c create mode 100644 newfs_hfs/hfs_endian.h create mode 100644 newfs_hfs/makehfs.c create mode 100644 newfs_hfs/newfs_hfs.8 create mode 100644 newfs_hfs/newfs_hfs.c create mode 100644 newfs_hfs/newfs_hfs.h diff --git a/CopyHFSMeta/DeviceWrapper.c b/CopyHFSMeta/DeviceWrapper.c index 54f62ec..e9bf6be 100644 --- a/CopyHFSMeta/DeviceWrapper.c +++ b/CopyHFSMeta/DeviceWrapper.c @@ -46,24 +46,37 @@ writeExtent(struct IOWrapper *context, DeviceInfo_t *devp, off_t start, off_t le { const size_t bufSize = 1024 * 1024; struct DeviceWrapperContext *ctx = (struct DeviceWrapperContext*)context->context; - uint8_t buffer[bufSize]; + uint8_t *buffer = NULL; + ssize_t retval = 0; off_t total = 0; if (debug) printf("Writing extent <%lld, %lld> to device %s", start, len, ctx->pathname); + buffer = malloc(bufSize); + if (buffer == NULL) { + warn("%s(%s): Could not allocate %zu bytes for buffer", __FILE__, __FUNCTION__, bufSize); + retval = -1; + goto done; + } + while (total < len) { ssize_t nread; size_t amt = MIN(bufSize, len - total); + // XXX - currently, DeviceWrapepr isn't used, but it needs to deal wit unaligned I/O when it is. nread = pread(devp->fd, buffer, amt, start + total); if (nread == -1) { warn("Cannot read from device at offset %lld", start + total); - return -1; + retval = -1; + goto done; } (void)pwrite(ctx->fd, (char*)buffer, nread, start + total); bp(nread); total += nread; } - return 0; +done: + if (buffer) + free(buffer); + return retval; } /* diff --git a/CopyHFSMeta/SparseBundle.c b/CopyHFSMeta/SparseBundle.c index 1ac5a68..a7c5668 100644 --- a/CopyHFSMeta/SparseBundle.c +++ b/CopyHFSMeta/SparseBundle.c @@ -5,6 +5,7 @@ #include <fcntl.h> #include <err.h> #include <errno.h> +#include <unistd.h> #include <sys/stat.h> #include <sys/fcntl.h> #include <removefile.h> @@ -61,18 +62,43 @@ static const char *bundlePrototype = "</plist>\n"; /* - * Do a per-volume sync. We use this just before updating the progress file, so - * that any changes -- data and metadata -- will have made it to disk, without - * causing a sync of every mounted volume. - * + * Perform a (potentially) unaligned read from a given input device. */ - -static void -sync_volume(const char *path) { - int full_sync = FSCTL_SYNC_FULLSYNC | FSCTL_SYNC_WAIT; - - (void)fsctl(path, FSCTL_SYNC_VOLUME, &full_sync, 0); - return; +static ssize_t +UnalignedRead(DeviceInfo_t *devp, void *buffer, size_t size, off_t offset) +{ + ssize_t nread = -1; + size_t readSize = ((size + devp->blockSize - 1) / devp->blockSize) * devp->blockSize; + off_t baseOffset = (offset / devp->blockSize) * devp->blockSize; + size_t off = offset - baseOffset; + char *tmpbuf = NULL; + + if ((baseOffset == offset) && (readSize == size)) { + /* + * The read is already properly aligned, so call pread. + */ + return pread(devp->fd, buffer, size, offset); + } + + tmpbuf = malloc(readSize); + if (!tmpbuf) { + goto done; + } + + nread = pread(devp->fd, tmpbuf, readSize, baseOffset); + if (nread == -1) { + goto done; + } + + nread -= off; + if (nread > (ssize_t)size) { + nread = size; + } + memcpy(buffer, tmpbuf + off, nread); + +done: + free(tmpbuf); + return nread; } /* @@ -148,19 +174,29 @@ doSparseWrite(IOWrapper_t *context, off_t offset, void *buffer, size_t len) int fd; if (ctx->cfd == -1 || ctx->cBandNum != bandNum) { - if (ctx->cfd != -1) - close(ctx->cfd); - asprintf(&bandName, "%s/bands/%x", ctx->pathname, bandNum); - fd = open(bandName, O_WRONLY | O_CREAT, 0666); - if (fd == -1) { - warn("Cannot open band file %s for offset %llu", bandName, offset + written); - retval = -1; - goto done; - } - free(bandName); - bandName = NULL; - ctx->cfd = fd; - ctx->cBandNum = bandNum; + if (ctx->cfd != -1) { + close(ctx->cfd); + } + asprintf(&bandName, "%s/bands/%x", ctx->pathname, bandNum); + fd = open(bandName, O_WRONLY | O_CREAT, 0666); + if (fd == -1) { + warn("Cannot open band file %s for offset %llu", bandName, offset + written); + retval = -1; + goto done; + } + /* + * When we create a new band file, we sync the volume + * it's on, so that we can ensure that the band file is present + * on disk. (Otherwise, with a crash, we can end up with the + * data not where we expected.) In this case, however, we probably + * don't need to wait for it -- just start the sync. + */ + fsync_volume_np(fd, 0); + fcntl(fd, F_NOCACHE, 1); + free(bandName); + bandName = NULL; + ctx->cfd = fd; + ctx->cBandNum = bandNum; } else { fd = ctx->cfd; } @@ -172,6 +208,8 @@ doSparseWrite(IOWrapper_t *context, off_t offset, void *buffer, size_t len) retval = -1; goto done; } + // Sync the data out. + fsync(fd); written += nwritten; } retval = written; @@ -188,30 +226,44 @@ static ssize_t WriteExtentToSparse(struct IOWrapper * context, DeviceInfo_t *devp, off_t start, off_t len, void (^bp)(off_t)) { const size_t bufSize = 1024 * 1024; - uint8_t buffer[bufSize]; + uint8_t *buffer = NULL; + ssize_t retval = 0; off_t total = 0; if (debug) printf("Writing extent <%lld, %lld>\n", start, len); + buffer = malloc(bufSize); + if (buffer == NULL) { + warn("%s(%s): Could not allocate %zu bytes for buffer", __FILE__, __FUNCTION__, bufSize); + retval = -1; + goto done; + } + while (total < len) { ssize_t nread; ssize_t nwritten; size_t amt = MIN(bufSize, len - total); - nread = pread(devp->fd, buffer, amt, start + total); + nread = UnalignedRead(devp, buffer, amt, start + total); if (nread == -1) { warn("Cannot read from device at offset %lld", start + total); - return -1; + retval = -1; + break; } if (nread < amt) { warnx("Short read from source device -- got %zd, expected %zd", nread, amt); } nwritten = doSparseWrite(context, start + total, buffer, nread); - if (nwritten == -1) - return -1; + if (nwritten == -1) { + retval = -1; + break; + } bp(nread); total += nread; } if (debug) printf("\twrote %lld\n", total); - return 0; +done: + if (buffer) + free(buffer); + return retval; } static const CFStringRef kBandSizeKey = CFSTR("band-size"); @@ -341,7 +393,6 @@ SetProgress(struct IOWrapper *context, off_t prog) } else { fp = fopen(progFile, "w"); if (fp) { - sync_volume(ctx->pathname); (void)fprintf(fp, "%llu\n", prog); fclose(fp); } diff --git a/CopyHFSMeta/main.c b/CopyHFSMeta/main.c index 235f0e8..2c5b69b 100644 --- a/CopyHFSMeta/main.c +++ b/CopyHFSMeta/main.c @@ -126,7 +126,7 @@ OpenDevice(const char *devname) errx(kBadExit, "device name `%s' does not fit pattern", devname); } // Only use an exclusive open if we're not debugging. - fd = open(dev.devname, O_RDWR | (debug ? 0 : O_EXLOCK)); + fd = open(dev.devname, O_RDONLY | (debug ? 0 : O_EXLOCK)); if (fd == -1) { err(kBadExit, "cannot open raw device %s", dev.devname); } diff --git a/hfs_util/InfoPlist.strings b/fs/English.lproj/InfoPlist.strings similarity index 100% rename from hfs_util/InfoPlist.strings rename to fs/English.lproj/InfoPlist.strings diff --git a/hfs_util/Info.plist b/fs/Info.plist similarity index 81% rename from hfs_util/Info.plist rename to fs/Info.plist index 1a61a37..18bcc30 100644 --- a/hfs_util/Info.plist +++ b/fs/Info.plist @@ -1,6 +1,6 @@ <?xml version="1.0" encoding="UTF-8"?> -<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd"> -<plist version="0.9"> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> <dict> <key>CFBundleDevelopmentRegion</key> <string>English</string> @@ -13,11 +13,13 @@ <key>CFBundlePackageType</key> <string>fs </string> <key>CFBundleShortVersionString</key> - <string>1.6.1</string> + <string>$HFS_VERSION</string> <key>CFBundleSignature</key> <string>????</string> + <key>CFBundleGetInfoString</key> + <string>$HFS_COPYRIGHT_INFO_STRING</string> <key>CFBundleVersion</key> - <string>1.6</string> + <string>$HFS_VERSION</string> <key>FSMediaTypes</key> <dict> <key>Apple_Boot</key> @@ -32,7 +34,7 @@ <key>FSProbeArguments</key> <string>-p</string> <key>FSProbeExecutable</key> - <string>../../hfs.util</string> + <string>hfs.util</string> <key>FSProbeOrder</key> <integer>1000</integer> <key>autodiskmount</key> @@ -50,7 +52,7 @@ <key>FSProbeArguments</key> <string>-p</string> <key>FSProbeExecutable</key> - <string>../../hfs.util</string> + <string>hfs.util</string> <key>FSProbeOrder</key> <integer>1000</integer> <key>autodiskmount</key> @@ -68,7 +70,7 @@ <key>FSProbeArguments</key> <string>-p</string> <key>FSProbeExecutable</key> - <string>../../hfs.util</string> + <string>hfs.util</string> <key>FSProbeOrder</key> <integer>1000</integer> </dict> @@ -84,7 +86,7 @@ <key>FSProbeArguments</key> <string>-p</string> <key>FSProbeExecutable</key> - <string>../../hfs.util</string> + <string>hfs.util</string> <key>FSProbeOrder</key> <integer>1000</integer> </dict> @@ -100,7 +102,7 @@ <key>FSProbeArguments</key> <string>-p</string> <key>FSProbeExecutable</key> - <string>../../hfs.util</string> + <string>hfs.util</string> <key>FSProbeOrder</key> <integer>1000</integer> </dict> @@ -116,7 +118,7 @@ <key>FSProbeArguments</key> <string>-p</string> <key>FSProbeExecutable</key> - <string>../../hfs.util</string> + <string>hfs.util</string> <key>FSProbeOrder</key> <integer>2000</integer> </dict> @@ -132,7 +134,7 @@ <key>FSProbeArguments</key> <string>-p</string> <key>FSProbeExecutable</key> - <string>../../hfs.util</string> + <string>hfs.util</string> <key>FSProbeOrder</key> <integer>2000</integer> </dict> @@ -144,7 +146,7 @@ <key>FSMountArguments</key> <string></string> <key>FSMountExecutable</key> - <string>../../../../../../sbin/mount_hfs</string> + <string>mount_hfs</string> <key>FSName</key> <string>Mac OS Standard</string> <key>FSSubType</key> @@ -154,11 +156,11 @@ <key>FSXMLOutputArgument</key> <string>-x</string> <key>FSRepairExecutable</key> - <string>../../../../../../sbin/fsck_hfs</string> + <string>fsck_hfs</string> <key>FSVerificationArguments</key> <string>-n</string> <key>FSVerificationExecutable</key> - <string>../../../../../../sbin/fsck_hfs</string> + <string>fsck_hfs</string> </dict> <key>HFS+</key> <dict> @@ -167,7 +169,7 @@ <key>FSFormatContentMask</key> <string>Apple_HFS</string> <key>FSFormatExecutable</key> - <string>../../../../../../sbin/newfs_hfs</string> + <string>newfs_hfs</string> <key>FSFormatMinimumSize</key> <integer>524288</integer> <key>FSFormatMaximumSize</key> @@ -175,7 +177,7 @@ <key>FSMountArguments</key> <string></string> <key>FSMountExecutable</key> - <string>../../../../../../sbin/mount_hfs</string> + <string>mount_hfs</string> <key>FSName</key> <string>Mac OS Extended</string> <key>FSSubType</key> @@ -185,11 +187,11 @@ <key>FSXMLOutputArgument</key> <string>-x</string> <key>FSRepairExecutable</key> - <string>../../../../../../sbin/fsck_hfs</string> + <string>fsck_hfs</string> <key>FSVerificationArguments</key> <string>-fn</string> <key>FSVerificationExecutable</key> - <string>../../../../../../sbin/fsck_hfs</string> + <string>fsck_hfs</string> </dict> <key>Journaled HFS+</key> <dict> @@ -198,7 +200,7 @@ <key>FSFormatContentMask</key> <string>Apple_HFS</string> <key>FSFormatExecutable</key> - <string>../../../../../../sbin/newfs_hfs</string> + <string>newfs_hfs</string> <key>FSFormatMinimumSize</key> <integer>9437184</integer> <key>FSFormatMaximumSize</key> @@ -206,7 +208,7 @@ <key>FSMountArguments</key> <string></string> <key>FSMountExecutable</key> - <string>../../../../../../sbin/mount_hfs</string> + <string>mount_hfs</string> <key>FSName</key> <string>Mac OS Extended (Journaled)</string> <key>FSSubType</key> @@ -216,16 +218,15 @@ <key>FSXMLOutputArgument</key> <string>-x</string> <key>FSRepairExecutable</key> - <string>../../../../../../sbin/fsck_hfs</string> + <string>fsck_hfs</string> <key>FSVerificationArguments</key> <string>-fn</string> <key>FSVerificationExecutable</key> - <string>../../../../../../sbin/fsck_hfs</string> + <string>fsck_hfs</string> <key>FSLiveVerificationArguments</key> <string>-l</string> <key>FSCoreStorageEncryptionName</key> - <string>Mac OS Extended (Journaled, Encrypted)</string> - + <string>Mac OS Extended (Journaled, Encrypted)</string> </dict> <key>Case-sensitive HFS+</key> <dict> @@ -234,7 +235,7 @@ <key>FSFormatContentMask</key> <string>Apple_HFSX</string> <key>FSFormatExecutable</key> - <string>../../../../../../sbin/newfs_hfs</string> + <string>newfs_hfs</string> <key>FSFormatMinimumSize</key> <integer>4194304</integer> <key>FSFormatMaximumSize</key> @@ -242,7 +243,7 @@ <key>FSMountArguments</key> <string></string> <key>FSMountExecutable</key> - <string>../../../../../../sbin/mount_hfs</string> + <string>mount_hfs</string> <key>FSName</key> <string>Mac OS Extended (Case-sensitive)</string> <key>FSSubType</key> @@ -252,11 +253,11 @@ <key>FSXMLOutputArgument</key> <string>-x</string> <key>FSRepairExecutable</key> - <string>../../../../../../sbin/fsck_hfs</string> + <string>fsck_hfs</string> <key>FSVerificationArguments</key> <string>-fn</string> <key>FSVerificationExecutable</key> - <string>../../../../../../sbin/fsck_hfs</string> + <string>fsck_hfs</string> </dict> <key>Case-sensitive Journaled HFS+</key> <dict> @@ -265,7 +266,7 @@ <key>FSFormatContentMask</key> <string>Apple_HFSX</string> <key>FSFormatExecutable</key> - <string>../../../../../../sbin/newfs_hfs</string> + <string>newfs_hfs</string> <key>FSFormatMinimumSize</key> <integer>9437184</integer> <key>FSFormatMaximumSize</key> @@ -273,7 +274,7 @@ <key>FSMountArguments</key> <string></string> <key>FSMountExecutable</key> - <string>../../../../../../sbin/mount_hfs</string> + <string>mount_hfs</string> <key>FSName</key> <string>Mac OS Extended (Case-sensitive, Journaled)</string> <key>FSSubType</key> @@ -283,15 +284,15 @@ <key>FSXMLOutputArgument</key> <string>-x</string> <key>FSRepairExecutable</key> - <string>../../../../../../sbin/fsck_hfs</string> + <string>fsck_hfs</string> <key>FSVerificationArguments</key> <string>-fn</string> <key>FSVerificationExecutable</key> - <string>../../../../../../sbin/fsck_hfs</string> + <string>fsck_hfs</string> <key>FSLiveVerificationArguments</key> <string>-l</string> <key>FSCoreStorageEncryptionName</key> - <string>Mac OS Extended (Case-sensitive, Journaled, Encrypted)</string> + <string>Mac OS Extended (Case-sensitive, Journaled, Encrypted)</string> </dict> </dict> </dict> diff --git a/fsck_hfs/cache.c b/fsck_hfs/cache.c new file mode 100644 index 0000000..fdd5649 --- /dev/null +++ b/fsck_hfs/cache.c @@ -0,0 +1,1519 @@ +/* + * Copyright (c) 2000-2012 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include <errno.h> +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <sys/uio.h> +#include <unistd.h> +#include <string.h> + +#include "fsck_hfs.h" +#include "cache.h" + +#define true 1 +#define false 0 + +#define CACHE_DEBUG 0 + +/* + * CacheAllocBlock + * + * Allocate an unused cache block. + */ +void *CacheAllocBlock (Cache_t *cache); + +/* + * CacheFreeBlock + * + * Release an active cache block. + */ +static int +CacheFreeBlock( Cache_t *cache, Tag_t *tag ); + +/* + * CacheLookup + * + * Obtain a cache block. If one already exists, it is returned. Otherwise a + * new one is created and inserted into the cache. + */ +int CacheLookup (Cache_t *cache, uint64_t off, Tag_t **tag); + +/* + * CacheRawRead + * + * Perform a direct read on the file. + */ +int CacheRawRead (Cache_t *cache, uint64_t off, uint32_t len, void *buf); + +/* + * CacheRawWrite + * + * Perform a direct write on the file. + */ +int CacheRawWrite (Cache_t *cache, uint64_t off, uint32_t len, void *buf); + +/* + * CacheFlushRange + * + * Flush, and optionally remove, all cache blocks that intersect + * a given range. + */ +static int +CacheFlushRange( Cache_t *cache, uint64_t start, uint64_t len, int remove); + +/* + * LRUInit + * + * Initializes the LRU data structures. + */ +static int LRUInit (LRU_t *lru); + +/* + * LRUDestroy + * + * Shutdown the LRU. + * + * NOTE: This is typically a no-op, since the cache manager will be clearing + * all cache tags. + */ +static int LRUDestroy (LRU_t *lru); + +/* + * LRUHit + * + * Registers data activity on the given node. If the node is already in the + * LRU, it is moved to the front. Otherwise, it is inserted at the front. + * + * NOTE: If the node is not in the LRU, we assume that its pointers are NULL. + */ +static int LRUHit (LRU_t *lru, LRUNode_t *node, int age); + +/* + * LRUEvict + * + * Chooses a buffer to release. + * + * TODO: Under extreme conditions, it should be possible to release the buffer + * of an actively referenced cache buffer, leaving the tag behind as a + * placeholder. This would be required for implementing 2Q-LRU + * replacement. + */ +static int LRUEvict (LRU_t *lru, LRUNode_t *node); + +/* + * CalculateCacheSizes + * + * Determine the cache size values that should be used to initialize the cache. + * If the requested value does not validate according to the conditions described + * below, it is adjusted. + * + * If no input values are provided, use default values for cache size + * and cache block size. + * + * Cache size should be - + * a. greater than or equal to minimum cache size + * b. less than or equal to maximum cache size. The maximum cache size + * is limited by the maximum value that can be allocated using malloc + * or mmap (maximum value for size_t) + * c. multiple of cache block size + * + * Returns: void + * *calcBlockSize: the size of the blocks in the cache + * *calcTotalBlocks: the number of blocks in the cache + */ +void CalculateCacheSizes(uint64_t cacheSize, uint32_t *calcBlockSize, uint32_t *calcTotalBlocks, char cache_debug) +{ + uint32_t blockSize = DefaultCacheBlockSize; + const size_t max_size_t = ~0; /* Maximum value represented by size_t */ + + /* Simple case - no user cache size, use default values */ + if (!cacheSize) { + *calcBlockSize = DefaultCacheBlockSize; + *calcTotalBlocks = DefaultCacheBlocks; + goto out; + } + + /* User provided cache size - check with minimum and maximum values */ + if (cacheSize < MinCacheSize) { + cacheSize = MinCacheSize; + } + if (cacheSize > max_size_t || + cacheSize > MaxCacheSize) { + if (cache_debug) { + printf ("\tCache size should be greater than %uM and less than %luM\n", MinCacheSize/(1024*1024), max_size_t/(1024*1024)); + } + cacheSize = MaxCacheSize; + } + + /* Cache size should be multiple of cache block size */ + if (cacheSize % blockSize) { + if (cache_debug) { + printf ("\tCache size should be multiple of cache block size (currently %uK)\n", blockSize/1024); + } + cacheSize = (cacheSize / blockSize) * blockSize; + } + + *calcBlockSize = blockSize; + *calcTotalBlocks = cacheSize / blockSize; + +out: + return; +} + +/* + * CacheInit + * + * Initializes the cache for use. If preTouch is non-zero, the cache memory will + * be iterated through, with one byte per page touched. (This is to ensure that + * the memory is actually created, and is used to avoid deadlocking due to swapping + * during a live verify of the boot volume.) + */ +int CacheInit (Cache_t *cache, int fdRead, int fdWrite, uint32_t devBlockSize, + uint32_t cacheBlockSize, uint32_t cacheTotalBlocks, uint32_t hashSize, int preTouch) +{ + void ** temp; + uint32_t i; + Buf_t * buf; + + memset (cache, 0x00, sizeof (Cache_t)); + + cache->FD_R = fdRead; + cache->FD_W = fdWrite; + cache->DevBlockSize = devBlockSize; + /* CacheFlush requires cleared cache->Hash */ + cache->Hash = (Tag_t **) calloc( 1, (sizeof (Tag_t *) * hashSize) ); + cache->HashSize = hashSize; + cache->BlockSize = cacheBlockSize; + + /* Allocate the cache memory */ + /* Break out of the loop on success, or when the proposed cache is < MinCacheSize */ + while (1) { + cache->FreeHead = mmap (NULL, + cacheTotalBlocks * cacheBlockSize, + PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, + -1, + 0); + if (cache->FreeHead == (void *)-1) { + if ((cacheTotalBlocks * cacheBlockSize) <= MinCacheSize) { + if (debug) + printf("\tTried to allocate %dK, minimum is %dK\n", + (cacheTotalBlocks * cacheBlockSize) / 1024, + MinCacheSize / 1024); + break; + } + if (debug) + printf("\tFailed to allocate %uK for cache; trying %uK\n", + (cacheTotalBlocks * cacheBlockSize) / 1024, + (cacheTotalBlocks * cacheBlockSize / 2) / 1024); + CalculateCacheSizes((cacheTotalBlocks * cacheBlockSize) / 2, &cacheBlockSize, &cacheTotalBlocks, debug); + continue; + } else { + if (debug) { + printf ("\tUsing cacheBlockSize=%uK cacheTotalBlock=%u cacheSize=%uK.\n", cacheBlockSize/1024, cacheTotalBlocks, (cacheBlockSize/1024) * cacheTotalBlocks); + } + break; + } + } + if (cache->FreeHead == (void*)-1) { +#if CACHE_DEBUG + printf("%s(%d): FreeHead = -1\n", __FUNCTION__, __LINE__); +#endif + return (ENOMEM); + } + + + /* If necessary, touch a byte in each page */ + if (preTouch) { + size_t pageSize = getpagesize(); + unsigned char *ptr = (unsigned char *)cache->FreeHead; + unsigned char *end = ptr + (cacheTotalBlocks * cacheBlockSize); + while (ptr < end) { + *ptr = 0; + ptr += pageSize; + } + } + + /* Initialize the cache memory free list */ + temp = cache->FreeHead; + for (i = 0; i < cacheTotalBlocks - 1; i++) { + *temp = ((char *)temp + cacheBlockSize); + temp = (void **)((char *)temp + cacheBlockSize); + } + *temp = NULL; + cache->FreeSize = cacheTotalBlocks; + + buf = (Buf_t *)malloc(sizeof(Buf_t) * MAXBUFS); + if (buf == NULL) { +#if CACHE_DEBUG + printf("%s(%d): malloc(%zu) failed\n", __FUNCTION__, __LINE__, sizeof(Buf_t) * MAXBUFS); +#endif + return (ENOMEM); + } + + memset (&buf[0], 0x00, sizeof (Buf_t) * MAXBUFS); + for (i = 1 ; i < MAXBUFS ; i++) { + (&buf[i-1])->Next = &buf[i]; + } + cache->FreeBufs = &buf[0]; + +#if CACHE_DEBUG + printf( "%s - cacheTotalBlocks %d cacheBlockSize %d hashSize %d \n", + __FUNCTION__, cacheTotalBlocks, cacheBlockSize, hashSize ); + printf( "%s - cache memory %d \n", __FUNCTION__, (cacheTotalBlocks * cacheBlockSize) ); +#endif + + return (LRUInit (&cache->LRU)); +} + + +/* + * CacheDestroy + * + * Shutdown the cache. + */ +int CacheDestroy (Cache_t *cache) +{ + CacheFlush( cache ); + +#if CACHE_DEBUG + /* Print cache report */ + printf ("Cache Report:\n"); + printf ("\tRead Requests: %d\n", cache->ReqRead); + printf ("\tWrite Requests: %d\n", cache->ReqWrite); + printf ("\tDisk Reads: %d\n", cache->DiskRead); + printf ("\tDisk Writes: %d\n", cache->DiskWrite); + printf ("\tSpans: %d\n", cache->Span); +#endif + /* Shutdown the LRU */ + LRUDestroy (&cache->LRU); + + /* I'm lazy, I'll come back to it :P */ + return (EOK); +} + +/* + * CacheRead + * + * Reads a range of bytes from the cache, returning a pointer to a buffer + * containing the requested bytes. + * + * NOTE: The returned buffer may directly refer to a cache block, or an + * anonymous buffer. Do not make any assumptions about the nature of + * the returned buffer, except that it is contiguous. + */ +int CacheRead (Cache_t *cache, uint64_t off, uint32_t len, Buf_t **bufp) +{ + Tag_t * tag; + Buf_t * searchBuf; + Buf_t * buf; + uint32_t coff = (off % cache->BlockSize); + uint64_t cblk = (off - coff); + int error; + + /* Check for conflicts with other bufs */ + searchBuf = cache->ActiveBufs; + while (searchBuf != NULL) { + if ((searchBuf->Offset >= off) && (searchBuf->Offset < off + len)) { +#if CACHE_DEBUG + printf ("ERROR: CacheRead: Deadlock\n"); +#endif + return (EDEADLK); + } + + searchBuf = searchBuf->Next; + } + + /* get a free buffer */ + if ((buf = cache->FreeBufs) == NULL) { +#if CACHE_DEBUG + printf ("ERROR: CacheRead: no more bufs!\n"); +#endif + return (ENOBUFS); + } + cache->FreeBufs = buf->Next; + *bufp = buf; + + /* Clear the buf structure */ + buf->Next = NULL; + buf->Prev = NULL; + buf->Flags = 0; + buf->Offset = off; + buf->Length = len; + buf->Buffer = NULL; + + /* If this is unaligned or spans multiple cache blocks */ + if ((cblk / cache->BlockSize) != ((off + len - 1) / cache->BlockSize)) { + buf->Flags |= BUF_SPAN; + } + /* Fetch the first cache block */ + error = CacheLookup (cache, cblk, &tag); + if (error != EOK) { +#if CACHE_DEBUG + printf ("ERROR: CacheRead: CacheLookup error %d\n", error); +#endif + return (error); + } + + /* If we live nicely inside a cache block */ + if (!(buf->Flags & BUF_SPAN)) { + /* Offset the buffer into the cache block */ + buf->Buffer = tag->Buffer + coff; + + /* Bump the cache block's reference count */ + tag->Refs++; + + /* Kick the node into the right queue */ + LRUHit (&cache->LRU, (LRUNode_t *)tag, 0); + + /* Otherwise, things get ugly */ + } else { + uint32_t boff; /* Offset into the buffer */ + uint32_t blen; /* Space to fill in the buffer */ + uint32_t temp; + + /* Allocate a temp buffer */ + buf->Buffer = (void *)malloc (len); + if (buf->Buffer == NULL) { +#if CACHE_DEBUG + printf ("ERROR: CacheRead: No Memory\n"); +#endif + return (ENOMEM); + } + + /* Blit the first chunk into the buffer */ + boff = cache->BlockSize - coff; + blen = len - boff; +#if CACHE_DEBUG + printf("INFO: memcpy(%p, %p + %u, %u)\n", buf->Buffer, tag->Buffer, coff, boff); +#endif + memcpy (buf->Buffer, tag->Buffer + coff, boff); + + /* Bump the cache block's reference count */ + tag->Refs++; + + /* Kick the node into the right queue */ + LRUHit (&cache->LRU, (LRUNode_t *)tag, 0); + + /* Next cache block */ + cblk += cache->BlockSize; + + /* Read data a cache block at a time */ + while (blen) { + /* Fetch the next cache block */ + error = CacheLookup (cache, cblk, &tag); + if (error != EOK) { + /* Free the allocated buffer */ + free (buf->Buffer); + buf->Buffer = NULL; + + /* Release all the held tags */ + cblk -= cache->BlockSize; + while (!boff) { + if (CacheLookup (cache, cblk, &tag) != EOK) { + fprintf (stderr, "CacheRead: Unrecoverable error\n"); + exit (-1); + } + tag->Refs--; + + /* Kick the node into the right queue */ + LRUHit (&cache->LRU, (LRUNode_t *)tag, 0); + } + + return (error); + } + + /* Blit the cache block into the buffer */ + temp = ((blen > cache->BlockSize) ? cache->BlockSize : blen); +#if CACHE_DEBUG + printf ("INFO: memcpy(%p + %u, %p, %u)\n", buf->Buffer, boff, tag->Buffer, temp); +#endif + memcpy (buf->Buffer + boff, + tag->Buffer, + temp); + + /* Update counters */ + boff += temp; + blen -= temp; + tag->Refs++; + + /* Advance to the next cache block */ + cblk += cache->BlockSize; + + /* Kick the node into the right queue */ + LRUHit (&cache->LRU, (LRUNode_t *)tag, 0); + } + + /* Count the spanned access */ + cache->Span++; + } + + /* Attach to head of active buffers list */ + if (cache->ActiveBufs != NULL) { + buf->Next = cache->ActiveBufs; + buf->Prev = NULL; + + cache->ActiveBufs->Prev = buf; + + } else { + cache->ActiveBufs = buf; + } + + /* Update counters */ + cache->ReqRead++; + return (EOK); +} + +/* + * XXX + * All of the uses of kLockWrite need to be audited for + * when the journal replay is writing. + */ +/* + * CacheWrite + * + * Writes a buffer through the cache. + */ +int CacheWrite ( Cache_t *cache, Buf_t *buf, int age, uint32_t writeOptions ) +{ + Tag_t * tag; + uint32_t coff = (buf->Offset % cache->BlockSize); + uint64_t cblk = (buf->Offset - coff); + int error; + + /* Fetch the first cache block */ + error = CacheLookup (cache, cblk, &tag); + if (error != EOK) return (error); + + /* If the buffer was a direct reference */ + if (!(buf->Flags & BUF_SPAN)) { + /* Commit the dirty block */ + if ( (writeOptions & (kLazyWrite | kLockWrite)) != 0 ) + { + /* Copy flags to tag */ + tag->Flags |= (writeOptions & (kLazyWrite | kLockWrite)); + } + else + { + error = CacheRawWrite (cache, + tag->Offset, + cache->BlockSize, + tag->Buffer); + if (error != EOK) return (error); + } + + /* Release the reference */ + if ((writeOptions & kLockWrite) == 0) + tag->Refs--; + + /* Kick the node into the right queue */ + LRUHit (&cache->LRU, (LRUNode_t *)tag, age); + + /* Otherwise, we do the ugly thing again */ + } else { + uint32_t boff; /* Offset into the buffer */ + uint32_t blen; /* Space to fill in the buffer */ + uint32_t temp; + + /* Blit the first chunk back into the cache */ + boff = cache->BlockSize - coff; + blen = buf->Length - boff; + memcpy (tag->Buffer + coff, buf->Buffer, boff); + + /* Commit the dirty block */ + if ( (writeOptions & (kLazyWrite | kLockWrite)) != 0 ) + { + /* flag this for lazy write */ + tag->Flags |= (writeOptions & (kLazyWrite | kLockWrite)); + } + else + { + error = CacheRawWrite (cache, + tag->Offset, + cache->BlockSize, + tag->Buffer); + if (error != EOK) return (error); + } + + /* Release the cache block reference */ + if ((writeOptions & kLockWrite) == 0) + tag->Refs--; + + /* Kick the node into the right queue */ + LRUHit (&cache->LRU, (LRUNode_t *)tag, age); + + /* Next cache block */ + cblk += cache->BlockSize; + + /* Write data a cache block at a time */ + while (blen) { + /* Fetch the next cache block */ + error = CacheLookup (cache, cblk, &tag); + /* We must go through with the write regardless */ + + /* Blit the next buffer chunk back into the cache */ + temp = ((blen > cache->BlockSize) ? cache->BlockSize : blen); + memcpy (tag->Buffer, + buf->Buffer + boff, + temp); + + /* Commit the dirty block */ + if ( (writeOptions & (kLazyWrite | kLockWrite)) != 0 ) + { + /* flag this for lazy write */ + tag->Flags |= (writeOptions & (kLazyWrite | kLockWrite)); + } + else + { + error = CacheRawWrite (cache, + tag->Offset, + cache->BlockSize, + tag->Buffer); + if (error != EOK) return (error); + } + + /* Update counters */ + boff += temp; + blen -= temp; + if ((writeOptions & kLockWrite) == 0) + tag->Refs--; + + /* Kick the node into the right queue */ + LRUHit (&cache->LRU, (LRUNode_t *)tag, age); + /* And go to the next cache block */ + cblk += cache->BlockSize; + } + + /* Release the anonymous buffer */ + free (buf->Buffer); + } + + /* Detach the buffer */ + if (buf->Next != NULL) + buf->Next->Prev = buf->Prev; + if (buf->Prev != NULL) + buf->Prev->Next = buf->Next; + if (cache->ActiveBufs == buf) + cache->ActiveBufs = buf->Next; + + /* Clear the buffer and put it back on free list */ + memset (buf, 0x00, sizeof (Buf_t)); + buf->Next = cache->FreeBufs; + cache->FreeBufs = buf; + + /* Update counters */ + cache->ReqWrite++; + + return (EOK); +} + +/* + * CacheRelease + * + * Releases a clean buffer. + * + * NOTE: We don't verify whether it's dirty or not. + */ +int CacheRelease (Cache_t *cache, Buf_t *buf, int age) +{ + Tag_t * tag; + uint32_t coff = (buf->Offset % cache->BlockSize); + uint64_t cblk = (buf->Offset - coff); + int error; + + /* Fetch the first cache block */ + error = CacheLookup (cache, cblk, &tag); + if (error != EOK) { +#if CACHE_DEBUG + printf ("ERROR: CacheRelease: CacheLookup error\n"); +#endif + return (error); + } + + /* If the buffer was a direct reference */ + if (!(buf->Flags & BUF_SPAN)) { + /* Release the reference */ + if ((tag->Flags & kLockWrite) == 0) { + tag->Refs--; + } + + /* Kick the node into the right queue */ + LRUHit (&cache->LRU, (LRUNode_t *)tag, age); + + /* Otherwise, we do the ugly thing again */ + } else { + uint32_t blen; /* Space to fill in the buffer */ + + /* Blit the first chunk back into the cache */ + blen = buf->Length - cache->BlockSize + coff; + + /* Release the cache block reference */ + if ((tag->Flags & kLockWrite) == 0) { + tag->Refs--; + } + + /* Kick the node into the right queue */ + LRUHit (&cache->LRU, (LRUNode_t *)tag, age); + + /* Next cache block */ + cblk += cache->BlockSize; + + /* Release cache blocks one at a time */ + while (blen) { + /* Fetch the next cache block */ + error = CacheLookup (cache, cblk, &tag); + /* We must go through with the write regardless */ + + /* Update counters */ + blen -= ((blen > cache->BlockSize) ? cache->BlockSize : blen); + if ((tag->Flags & kLockWrite) == 0) + tag->Refs--; + + /* Kick the node into the right queue */ + LRUHit (&cache->LRU, (LRUNode_t *)tag, age); + /* Advance to the next block */ + cblk += cache->BlockSize; + } + + /* Release the anonymous buffer */ + free (buf->Buffer); + } + + /* Detach the buffer */ + if (buf->Next != NULL) + buf->Next->Prev = buf->Prev; + if (buf->Prev != NULL) + buf->Prev->Next = buf->Next; + if (cache->ActiveBufs == buf) + cache->ActiveBufs = buf->Next; + + /* Clear the buffer and put it back on free list */ + memset (buf, 0x00, sizeof (Buf_t)); + buf->Next = cache->FreeBufs; + cache->FreeBufs = buf; + + return (EOK); +} + +/* + * CacheRemove + * + * Disposes of a particular buffer. + */ +int CacheRemove (Cache_t *cache, Tag_t *tag) +{ + int error; + + /* Make sure it's not busy */ + if (tag->Refs) return (EBUSY); + + /* Detach the tag */ + if (tag->Next != NULL) + tag->Next->Prev = tag->Prev; + if (tag->Prev != NULL) + tag->Prev->Next = tag->Next; + else + cache->Hash[tag->Offset % cache->HashSize] = tag->Next; + + /* Make sure the head node doesn't have a back pointer */ + if ((cache->Hash[tag->Offset % cache->HashSize] != NULL) && + (cache->Hash[tag->Offset % cache->HashSize]->Prev != NULL)) { +#if CACHE_DEBUG + printf ("ERROR: CacheRemove: Corrupt hash chain\n"); +#endif + } + + /* Release it's buffer (if it has one) */ + if (tag->Buffer != NULL) + { + error = CacheFreeBlock (cache, tag); + if ( EOK != error ) + return( error ); + } + + /* Zero the tag (for easy debugging) */ + memset (tag, 0x00, sizeof (Tag_t)); + + /* Free the tag */ + free (tag); + + return (EOK); +} + +/* + * CacheEvict + * + * Only dispose of the buffer, leave the tag intact. + */ +int CacheEvict (Cache_t *cache, Tag_t *tag) +{ + int error; + + /* Make sure it's not busy */ + if (tag->Refs) return (EBUSY); + + /* Release the buffer */ + if (tag->Buffer != NULL) + { + error = CacheFreeBlock (cache, tag); + if ( EOK != error ) + return( error ); + } + tag->Buffer = NULL; + + return (EOK); +} + +/* + * CacheAllocBlock + * + * Allocate an unused cache block. + */ +void *CacheAllocBlock (Cache_t *cache) +{ + void * temp; + + if (cache->FreeHead == NULL) + return (NULL); + if (cache->FreeSize == 0) + return (NULL); + + temp = cache->FreeHead; + cache->FreeHead = *((void **)cache->FreeHead); + cache->FreeSize--; + + return (temp); +} + +/* + * CacheFreeBlock + * + * Release an active cache block. + */ +static int +CacheFreeBlock( Cache_t *cache, Tag_t *tag ) +{ + int error; + + if ( (tag->Flags & kLazyWrite) != 0 ) + { + /* this cache block has been marked for lazy write - do it now */ + error = CacheRawWrite( cache, + tag->Offset, + cache->BlockSize, + tag->Buffer ); + if ( EOK != error ) + { +#if CACHE_DEBUG + printf( "%s - CacheRawWrite failed with error %d \n", __FUNCTION__, error ); +#endif + return ( error ); + } + tag->Flags &= ~kLazyWrite; + } + + if ((tag->Flags & kLockWrite) == 0) + { + *((void **)tag->Buffer) = cache->FreeHead; + cache->FreeHead = (void **)tag->Buffer; + cache->FreeSize++; + } + return( EOK ); +} + + +/* + * CacheFlush + * + * Write out any blocks that are marked for lazy write. + */ +int +CacheFlush( Cache_t *cache ) +{ + int error; + int i; + Tag_t * myTagPtr; + + for ( i = 0; i < cache->HashSize; i++ ) + { + myTagPtr = cache->Hash[ i ]; + + while ( NULL != myTagPtr ) + { + if ( (myTagPtr->Flags & kLazyWrite) != 0 ) + { + /* this cache block has been marked for lazy write - do it now */ + error = CacheRawWrite( cache, + myTagPtr->Offset, + cache->BlockSize, + myTagPtr->Buffer ); + if ( EOK != error ) + { +#if CACHE_DEBUG + printf( "%s - CacheRawWrite failed with error %d \n", __FUNCTION__, error ); +#endif + return( error ); + } + myTagPtr->Flags &= ~kLazyWrite; + } + myTagPtr = myTagPtr->Next; + } /* while */ + } /* for */ + + return( EOK ); + +} /* CacheFlush */ + + +/* + * RangeIntersect + * + * Return true if the two given ranges intersect. + * + */ +static int +RangeIntersect(uint64_t start1, uint64_t len1, uint64_t start2, uint64_t len2) +{ + uint64_t end1 = start1 + len1 - 1; + uint64_t end2 = start2 + len2 - 1; + + if (end1 < start2 || start1 > end2) + return 0; + else + return 1; +} + + +/* + * CacheFlushRange + * + * Flush, and optionally remove, all cache blocks that intersect + * a given range. + */ +static int +CacheFlushRange( Cache_t *cache, uint64_t start, uint64_t len, int remove) +{ + int error; + int i; + Tag_t *currentTag, *nextTag; + + for ( i = 0; i < cache->HashSize; i++ ) + { + currentTag = cache->Hash[ i ]; + + while ( NULL != currentTag ) + { + /* Keep track of the next block, in case we remove the current block */ + nextTag = currentTag->Next; + + if ( currentTag->Flags & kLazyWrite && + RangeIntersect(currentTag->Offset, cache->BlockSize, start, len)) + { + error = CacheRawWrite( cache, + currentTag->Offset, + cache->BlockSize, + currentTag->Buffer ); + if ( EOK != error ) + { +#if CACHE_DEBUG + printf( "%s - CacheRawWrite failed with error %d \n", __FUNCTION__, error ); +#endif + return error; + } + currentTag->Flags &= ~kLazyWrite; + + if ( remove && ((currentTag->Flags & kLockWrite) == 0)) + CacheRemove( cache, currentTag ); + } + + currentTag = nextTag; + } /* while */ + } /* for */ + + return EOK; +} /* CacheFlushRange */ + +/* Function: CacheCopyDiskBlocks + * + * Description: Perform direct disk block copy from from_offset to to_offset + * of given length. + * + * The function flushes the cache blocks intersecting with disk blocks + * belonging to from_offset. Invalidating the disk blocks belonging to + * to_offset from the cache would have been sufficient, but its block + * start and end might not lie on cache block size boundary. Therefore we + * flush the disk blocks belonging to to_offset on the disk . + * + * The function performs raw read and write on the disk of cache block size, + * with exception of last operation. + * + * Note that the data written to disk does not exist in cache after + * this function. This function however ensures that if the device + * offset being read/written on disk existed in cache, it is invalidated and + * written to disk before performing any read/write operation. + * + * Input: + * 1. cache - pointer to cache. + * 2. from_offset - disk offset to copy from. + * 3. to_offset - disk offset to copy to. + * 4. len - length in bytes to be copied. Note that this length should be + * a multiple of disk block size, else read/write will return error. + * + * Output: + * zero (EOK) on success. + * On failure, non-zero value. + * Known error values: + * ENOMEM - insufficient memory to allocate intermediate copy buffer. + * EINVAL - the length of data to read/write is not multiple of + * device block size, or + * the device offset is not multiple of device block size, or + * ENXIO - invalid disk offset + */ +int CacheCopyDiskBlocks (Cache_t *cache, uint64_t from_offset, uint64_t to_offset, uint32_t len) +{ + int i; + int error; + char *tmpBuffer = NULL; + uint32_t ioReqCount; + uint32_t numberOfBuffersToWrite; + + /* Return error if length of data to be written on disk is + * less than the length of the buffer to be written, or + * disk offsets are not multiple of device block size + */ + if ((len % cache->DevBlockSize) || + (from_offset % cache->DevBlockSize) || + (to_offset % cache->DevBlockSize)) { + error = EINVAL; + goto out; + } + + /* Flush contents of from_offset on the disk */ + error = CacheFlushRange(cache, from_offset, len, 1); + if (error != EOK) goto out; + + /* Flush contents of to_offset on the disk */ + error = CacheFlushRange(cache, to_offset, len, 1); + if (error != EOK) goto out; + + /* Allocate temporary buffer for reading/writing, currently + * set to block size of cache. + */ + tmpBuffer = malloc(cache->BlockSize); + if (!tmpBuffer) { +#if CACHE_DEBUG + printf("%s(%d): malloc(%zd) failed\n", __FUNCTION__, __LINE__, (size_t)cache->BlockSize); +#endif + error = ENOMEM; + goto out; + } + + ioReqCount = cache->BlockSize; + numberOfBuffersToWrite = (len + ioReqCount - 1) / ioReqCount; + + for (i=0; i<numberOfBuffersToWrite; i++) { + if (i == (numberOfBuffersToWrite - 1)) { + /* last buffer */ + ioReqCount = len - (i * cache->BlockSize); + } + + /* Read data */ + error = CacheRawRead (cache, from_offset, ioReqCount, tmpBuffer); + if (error != EOK) goto out; + + /* Write data */ + error = CacheRawWrite (cache, to_offset, ioReqCount, tmpBuffer); + if (error != EOK) goto out; + +#if 0 + printf ("%s: Copying %d bytes from %qd to %qd\n", __FUNCTION__, ioReqCount, from_offset, to_offset); +#endif + + /* Increment offsets with data read/written */ + from_offset += ioReqCount; + to_offset += ioReqCount; + } + +out: + if (tmpBuffer) { + free (tmpBuffer); + } + return error; +} + +/* Function: CacheWriteBufferToDisk + * + * Description: Write data on disk starting at given offset for upto write_len. + * The data from given buffer upto buf_len is written to the disk starting + * at given offset. If the amount of data written on disk is greater than + * the length of buffer, all the remaining data is written as zeros. + * + * If no buffer is provided or if length of buffer is zero, the function + * writes zeros on disk from offset upto write_len bytes. + * + * The function requires the length of buffer is either equal to or less + * than the data to be written on disk. It also requires that the length + * of data to be written on disk is a multiple of device block size. + * + * Note that the data written to disk does not exist in cache after + * this function. This function however ensures that if the device + * offset being written on disk existed in cache, it is invalidated and + * written to disk before performing any read/write operation. + * + * Input: + * 1. cache - pointer to cache + * 2. offset - offset on disk to write data of buffer + * 3. buffer - pointer to data to be written on disk + * 4. len - length of buffer to be written on disk. + * + * Output: + * zero (EOK) on success. + * On failure, non-zero value. + * Known error values: + * ENOMEM - insufficient memory to allocate intermediate copy buffer. + * EINVAL - the length of data to read/write is not multiple of + * device block size, or + * the device offset is not multiple of device block size, or + * the length of data to be written on disk is less than + * the length of buffer. + * ENXIO - invalid disk offset + */ +int CacheWriteBufferToDisk (Cache_t *cache, uint64_t offset, uint32_t write_len, u_char *buffer, uint32_t buf_len) +{ + int error; + u_char *write_buffer = NULL; + uint32_t io_count; + uint32_t buf_offset; + uint32_t bytes_remain; + uint8_t zero_fill = false; + + /* Check if buffer is provided */ + if (buffer == NULL) { + buf_len = 0; + } + + /* Return error if length of data to be written on disk is, + * less than the length of the buffer to be written, or + * is not a multiple of device block size, or offset to write + * is not multiple of device block size + */ + if ((write_len % cache->DevBlockSize) || + (offset % cache->DevBlockSize) || + (write_len < buf_len)) { + error = EINVAL; + goto out; + } + + /* Flush cache contents of offset range to be written on the disk */ + error = CacheFlushRange(cache, offset, write_len, 1); + if (error != EOK) { + goto out; + } + + /* Calculate correct size of buffer to be written each time */ + io_count = (write_len < cache->BlockSize) ? write_len : cache->BlockSize; + + /* Allocate temporary buffer to write data to disk */ + write_buffer = malloc (io_count); + if (!write_buffer) { +#if CACHE_DEBUG + printf("%s(%d): malloc(%zd) failed\n", __FUNCTION__, __LINE__, (size_t)cache->BlockSize); +#endif + error = ENOMEM; + goto out; + } + + /* Read offset in data buffer to be written to disk */ + buf_offset = 0; + + while (write_len) { + /* The last buffer might be less than io_count bytes */ + if (write_len < io_count) { + io_count = write_len; + } + + /* Check whether data buffer was written completely to disk */ + if (buf_offset < buf_len) { + /* Calculate the bytes from data buffer still to be written */ + bytes_remain = buf_len - buf_offset; + + if (bytes_remain >= io_count) { + /* Bytes remaining is greater than bytes written in one + * IO request. Limit bytes read from data buffer in this + * pass to the bytes written in one IO request + */ + bytes_remain = io_count; + + /* Copy data from data buffer to write buffer */ + memcpy (write_buffer, buffer, bytes_remain); + } else { + /* Bytes remaining is less than bytes written in one + * IO request. Zero fill the remaining write buffer. + */ + + /* Copy data from data buffer to write buffer */ + memcpy (write_buffer, buffer, bytes_remain); + + /* Zero fill remain buffer, if any */ + memset (write_buffer + bytes_remain, 0, io_count - bytes_remain); + } + + buf_offset += bytes_remain; + buffer += bytes_remain; + } else { + /* Do not zero fill the buffer if we have already done it */ + if (zero_fill == false) { + /* Zero fill entire write buffer */ + memset (write_buffer, 0, io_count); + zero_fill = true; + } + } + + /* Write data */ + error = CacheRawWrite (cache, offset, io_count, write_buffer); + if (error != EOK) goto out; + + offset += io_count; + write_len -= io_count; + } + +out: + /* If we allocated a temporary buffer, deallocate it */ + if (write_buffer != NULL) { + free (write_buffer); + } + return error; +} + +/* + * CacheLookup + * + * Obtain a cache block. If one already exists, it is returned. Otherwise a + * new one is created and inserted into the cache. + */ +int CacheLookup (Cache_t *cache, uint64_t off, Tag_t **tag) +{ + Tag_t * temp; + uint32_t hash = off % cache->HashSize; + int error; + + *tag = NULL; + + /* Search the hash table */ + error = 0; + temp = cache->Hash[hash]; + while (temp != NULL) { + if (temp->Offset == off) break; + temp = temp->Next; + } + + /* If it's a hit */ + if (temp != NULL) { + /* Perform MTF if necessary */ + if (cache->Hash[hash] != temp) { + /* Disconnect the tag */ + if (temp->Next != NULL) + temp->Next->Prev = temp->Prev; + temp->Prev->Next = temp->Next; + } + + /* Otherwise, it's a miss */ + } else { + /* Allocate a new tag */ + temp = (Tag_t *)calloc (sizeof (Tag_t), 1);/* We really only need to zero the + LRU portion though */ + temp->Offset = off; + + /* Kick the tag onto the LRU */ + //LRUHit (&cache->LRU, (LRUNode_t *)temp, 0); + } + + /* Insert at the head (if it's not already there) */ + if (cache->Hash[hash] != temp) { + temp->Prev = NULL; + temp->Next = cache->Hash[hash]; + if (temp->Next != NULL) + temp->Next->Prev = temp; + cache->Hash[hash] = temp; + } + + /* Make sure there's a buffer */ + if (temp->Buffer == NULL) { + /* Find a free buffer */ + temp->Buffer = CacheAllocBlock (cache); + if (temp->Buffer == NULL) { + /* Try to evict a buffer */ + error = LRUEvict (&cache->LRU, (LRUNode_t *)temp); + if (error != EOK) return (error); + + /* Try again */ + temp->Buffer = CacheAllocBlock (cache); + if (temp->Buffer == NULL) { +#if CACHE_DEBUG + printf("%s(%d): CacheAllocBlock failed (FreeHead = %p, FreeSize = %u)\n", __FUNCTION__, __LINE__, cache->FreeHead, cache->FreeSize); +#endif + return (ENOMEM); + } + } + + /* Load the block from disk */ + error = CacheRawRead (cache, off, cache->BlockSize, temp->Buffer); + if (error != EOK) return (error); + } + +#if 0 + if (temp && temp->Flags & kLockWrite) { + fprintf(stderr, "CacheLookup(%p, %llu, %p): Found cache-locked block\n", cache, off, tag); + } +#endif + + *tag = temp; + return (EOK); +} + +/* + * CacheRawRead + * + * Perform a direct read on the file. + */ +int CacheRawRead (Cache_t *cache, uint64_t off, uint32_t len, void *buf) +{ + uint64_t result; + ssize_t nread; + + /* Both offset and length must be multiples of the device block size */ + if (off % cache->DevBlockSize) return (EINVAL); + if (len % cache->DevBlockSize) return (EINVAL); + + /* Seek to the position */ + errno = 0; + result = lseek (cache->FD_R, off, SEEK_SET); + if (result == (off_t)-1 && errno != 0) + return errno; + if (result != off) return (ENXIO); + /* Read into the buffer */ +#if CACHE_DEBUG + printf("%s: offset %llu, len %u\n", __FUNCTION__, off, len); +#endif + nread = read (cache->FD_R, buf, len); + if (nread == -1) return (errno); + if (nread == 0) return (ENXIO); + + /* Update counters */ + cache->DiskRead++; + + return (EOK); +} + +/* + * CacheRawWrite + * + * Perform a direct write on the file. + */ +int CacheRawWrite (Cache_t *cache, uint64_t off, uint32_t len, void *buf) +{ + uint64_t result; + ssize_t nwritten; + + /* Both offset and length must be multiples of the device block size */ + if (off % cache->DevBlockSize) return (EINVAL); + if (len % cache->DevBlockSize) return (EINVAL); + + /* Seek to the position */ + errno = 0; + result = lseek (cache->FD_W, off, SEEK_SET); + if (result == (off_t)-1 && errno != 0) return (errno); + if (result != off) return (ENXIO); + + /* Write into the buffer */ + nwritten = write (cache->FD_W, buf, len); + if (nwritten == -1) return (errno); + if (nwritten == 0) return (ENXIO); + + /* Update counters */ + cache->DiskWrite++; + + return (EOK); +} + + + +/* + * LRUInit + * + * Initializes the LRU data structures. + */ +static int LRUInit (LRU_t *lru) +{ + /* Make the dummy nodes point to themselves */ + lru->Head.Next = &lru->Head; + lru->Head.Prev = &lru->Head; + + lru->Busy.Next = &lru->Busy; + lru->Busy.Prev = &lru->Busy; + + return (EOK); +} + +/* + * LRUDestroy + * + * Shutdown the LRU. + * + * NOTE: This is typically a no-op, since the cache manager will be clearing + * all cache tags. + */ +static int LRUDestroy (LRU_t *lru) +{ + /* Nothing to do */ + return (EOK); +} + +/* + * LRUHit + * + * Registers data activity on the given node. If the node is already in the + * LRU, it is moved to the front. Otherwise, it is inserted at the front. + * + * NOTE: If the node is not in the LRU, we assume that its pointers are NULL. + */ +static int LRUHit (LRU_t *lru, LRUNode_t *node, int age) +{ + /* Handle existing nodes */ + if ((node->Next != NULL) && (node->Prev != NULL)) { + /* Detach the node */ + node->Next->Prev = node->Prev; + node->Prev->Next = node->Next; + } + + /* If it's busy (we can't evict it) */ + if (((Tag_t *)node)->Refs) { + /* Insert at the head of the Busy queue */ + node->Next = lru->Busy.Next; + node->Prev = &lru->Busy; + + } else if (age) { + /* Insert at the head of the LRU */ + node->Next = &lru->Head; + node->Prev = lru->Head.Prev; + + } else { + /* Insert at the head of the LRU */ + node->Next = lru->Head.Next; + node->Prev = &lru->Head; + } + + node->Next->Prev = node; + node->Prev->Next = node; + + return (EOK); +} + +/* + * LRUEvict + * + * Chooses a buffer to release. + * + * TODO: Under extreme conditions, it shoud be possible to release the buffer + * of an actively referenced cache buffer, leaving the tag behind as a + * placeholder. This would be required for implementing 2Q-LRU + * replacement. + * + * NOTE: Make sure we never evict the node we're trying to find a buffer for! + */ +static int LRUEvict (LRU_t *lru, LRUNode_t *node) +{ + LRUNode_t * temp; + + /* Find a victim */ + while (1) { + /* Grab the tail */ + temp = lru->Head.Prev; + + /* Stop if we're empty */ + if (temp == &lru->Head) { +#if CACHE_DEBUG + printf("%s(%d): empty?\n", __FUNCTION__, __LINE__); +#endif + return (ENOMEM); + } + + /* Detach the tail */ + temp->Next->Prev = temp->Prev; + temp->Prev->Next = temp->Next; + + /* If it's not busy, we have a victim */ + if (!((Tag_t *)temp)->Refs) break; + + /* Insert at the head of the Busy queue */ + temp->Next = lru->Busy.Next; + temp->Prev = &lru->Busy; + + temp->Next->Prev = temp; + temp->Prev->Next = temp; + + /* Try again */ + } + + /* Remove the tag */ + CacheRemove ((Cache_t *)lru, (Tag_t *)temp); + + return (EOK); +} + +/* + * Dump the cache contents. + * If nobody else calls it, it gets optimized out. Annoying and yet + * useful. + */ +void +dumpCache(Cache_t *cache) +{ + int i; + int numEntries = 0; + + printf("Cache:\n"); + printf("\tDevBlockSize = %u\n", cache->DevBlockSize); + printf("\tCache Block Size = %u\n", cache->BlockSize); + printf("\tHash Size = %u\n", cache->HashSize); + printf("\tHash Table:\n"); + for (i = 0; i < cache->HashSize; i++) { + Tag_t *tag; + + for (tag = cache->Hash[i]; tag; tag = tag->Next) { + numEntries++; + printf("\t\tOffset %llu, refs %u, Flags %#x (%skLazyWrite, %skLockWrite)\n", + tag->Offset, tag->Refs, tag->Flags, + (tag->Flags & kLazyWrite) ? "" : "no ", + (tag->Flags & kLockWrite) ? "" : "no "); + } + } + printf("\tNumber of entries: %u\n", numEntries); + return; +} + diff --git a/fsck_hfs/cache.h b/fsck_hfs/cache.h new file mode 100644 index 0000000..f64e279 --- /dev/null +++ b/fsck_hfs/cache.h @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * User Land Cache Manager + * + * A user land cache manager. + */ +#ifndef _CACHE_H_ +#define _CACHE_H_ +#include <stdint.h> + +/* Different values for initializing cache */ +enum { + /* Default sizes */ + DefaultCacheBlockSize = 0x8000, /* 32K */ + DefaultCacheBlocks = 1024, + DefaultCacheSize = (DefaultCacheBlockSize * DefaultCacheBlocks), /* 32MBytes */ + + /* Minimum allowed sizes */ + MinCacheBlockSize = 0x8000, /* 32K */ + MinCacheBlocks = 1024, + MinCacheSize = (MinCacheBlockSize * MinCacheBlocks), /* 32MBytes */ + + /* Maximum allowed sizes */ + MaxCacheBlockSize = 0x8000, /* 32K */ +#ifdef __LP64__ + MaxCacheBlocks = 0x18000, +#else + MaxCacheBlocks = 0x8000, +#endif + /* MaxCacheSize will be 3G for 64-bit, and 1G for 32-bit */ + MaxCacheSize = ((unsigned)MaxCacheBlockSize * MaxCacheBlocks), + CacheHashSize = 257, /* prime number */ +}; + +/* + * Some nice lowercase shortcuts. + */ +#define EOK 0 + +#define BUF_SPAN 0x80000000 /* Buffer spans several cache blocks */ + +typedef struct LRUNode_t +{ + struct LRUNode_t * Next; /* Next node in the LRU */ + struct LRUNode_t * Prev; /* Previous node in the LRU */ +} LRUNode_t; + +typedef struct LRU_t +{ + LRUNode_t Head; /* Dummy node for the head of the LRU */ + LRUNode_t Busy; /* List of busy nodes */ +} LRU_t; + + +#define MAXBUFS 48 +/* + * Buf_t + * + * Buffer structure exchanged between the cache and client. It contains the + * data buffer with the requested data, as well as housekeeping information + * that the cache needs. + */ +typedef struct Buf_t +{ + struct Buf_t * Next; /* Next active buffer */ + struct Buf_t * Prev; /* Previous active buffer */ + + uint32_t Flags; /* Buffer flags */ + uint64_t Offset; /* Start offset of the buffer */ + uint32_t Length; /* Size of the buffer in bytes */ + + void * Buffer; /* Buffer */ +} Buf_t; + +/* + * Tag_t + * + * The cache tag structure is a header for a cache buffer. It contains a + * pointer to the cache block and housekeeping information. The type of LRU + * algorithm can be swapped out easily. + * + * NOTE: The LRU field must be the first field, so we can easily cast between + * the two. + */ +typedef struct Tag_t +{ + LRUNode_t LRU; /* LRU specific data, must be first! */ + + struct Tag_t * Next; /* Next tag in hash chain */ + struct Tag_t * Prev; /* Previous tag in hash chain */ + + uint32_t Flags; + uint32_t Refs; /* Reference count */ + uint64_t Offset; /* Offset of the buffer */ + + void * Buffer; /* Cache page */ +} Tag_t; + + +/* Tag_t.Flags bit settings */ +enum { + kLazyWrite = 0x00000001, /* only write this page when evicting or forced */ + kLockWrite = 0x00000002, /* Never evict this page -- will not work with writing yet! */ +}; + +/* + * Cache_t + * + * The main cache data structure. The cache manages access between an open + * file and the cache client program. + * + * NOTE: The LRU field must be the first field, so we can easily cast between + * the two. + */ +typedef struct Cache_t +{ + LRU_t LRU; /* LRU replacement data structure */ + + int FD_R; /* File descriptor (read-only) */ + int FD_W; /* File descriptor (write-only) */ + uint32_t DevBlockSize; /* Device block size */ + + Tag_t ** Hash; /* Lookup hash table (move to front) */ + uint32_t HashSize; /* Size of the hash table */ + uint32_t BlockSize; /* Size of the cache page */ + + void * FreeHead; /* Head of the free list */ + uint32_t FreeSize; /* Size of the free list */ + + Buf_t * ActiveBufs; /* List of active buffers */ + Buf_t * FreeBufs; /* List of free buffers */ + + uint32_t ReqRead; /* Number of read requests */ + uint32_t ReqWrite; /* Number of write requests */ + + uint32_t DiskRead; /* Number of actual disk reads */ + uint32_t DiskWrite; /* Number of actual disk writes */ + + uint32_t Span; /* Requests that spanned cache blocks */ +} Cache_t; + +extern Cache_t fscache; + +/* + * CalculateCacheSizes + * + * Determine the cache size values (block size and total blocks) that should + * be used to initialize the cache. + */ +void CalculateCacheSizes(uint64_t userCacheSize, uint32_t *calcBlockSize, uint32_t *calcTotalBlocks, + char debug); +/* + * CacheInit + * + * Initializes the cache for use. + */ +int CacheInit (Cache_t *cache, int fdRead, int fdWrite, uint32_t devBlockSize, + uint32_t cacheBlockSize, uint32_t cacheSize, uint32_t hashSize, + int preTouch); + +/* + * CacheDestroy + * + * Shutdown the cache. + */ +int CacheDestroy (Cache_t *cache); + +/* + * CacheRead + * + * Reads a range of bytes from the cache, returning a pointer to a buffer + * containing the requested bytes. + * + * NOTE: The returned buffer may directly refer to a cache block, or an + * anonymous buffer. Do not make any assumptions about the nature of + * the returned buffer, except that it is contiguous. + */ +int CacheRead (Cache_t *cache, uint64_t start, uint32_t len, Buf_t **buf); + +/* + * CacheWrite + * + * Writes a buffer through the cache. + */ +int CacheWrite ( Cache_t *cache, Buf_t *buf, int age, uint32_t writeOptions ); + +/* + * CacheRelease + * + * Releases a clean buffer. + * + * NOTE: We don't verify whether it's dirty or not. + */ +int CacheRelease (Cache_t *cache, Buf_t *buf, int age); + +/* CacheRemove + * + * Disposes of a particular tag and buffer. + */ +int CacheRemove (Cache_t *cache, Tag_t *tag); + +/* + * CacheEvict + * + * Only dispose of the buffer, leave the tag intact. + */ +int CacheEvict (Cache_t *cache, Tag_t *tag); + +/* + * CacheFlush + * + * Write out any blocks that are marked for lazy write. + */ +int +CacheFlush( Cache_t *cache ); + +/* CacheCopyDiskBlocks + * + * Perform direct disk block copy from from_offset to to_offset of given length. + */ +int CacheCopyDiskBlocks (Cache_t *cache, uint64_t from_offset, uint64_t to_offset, uint32_t len); + +/* CacheWriteBufferToDisk + * + * Write data on disk starting at given offset for upto write_len. + * The data from given buffer upto buf_len is written to the disk starting + * at given offset. If the amount of data written on disk is greater than + * the length of buffer, all the remaining data is written as zeros. + * + * If no buffer is provided or if length of buffer is zero, the function + * writes zeros on disk from offset upto write_len bytes. + */ +int CacheWriteBufferToDisk (Cache_t *cache, uint64_t offset, uint32_t write_len, u_char *buffer, uint32_t buf_len); +#endif + diff --git a/fsck_hfs/dfalib/BTree.c b/fsck_hfs/dfalib/BTree.c new file mode 100644 index 0000000..887e77a --- /dev/null +++ b/fsck_hfs/dfalib/BTree.c @@ -0,0 +1,1806 @@ +/* + * Copyright (c) 1999, 2005 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTree.c + + Contains: Implementation of public interface routines for B-tree manager. + + Version: HFS Plus 1.0 + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1999 by Apple Computer, Inc., all rights reserved. +*/ + +extern char debug; + +#include "BTree.h" +#include "BTreePrivate.h" +//#include "HFSInstrumentation.h" + + +extern Boolean NodesAreContiguous(SFCB *fcb, UInt32 nodeSize); +extern void fplog(FILE *stream, const char *fmt, ...); + +/*------------------------------------------------------------------------------- +Routine: CopyKey + +Function: Copy a BTree key. Sanity check the key length; if it is too large, + then set the copy's length to the BTree's maximum key length. + +Inputs: btcb BTree whose key is being copied + srcKey Source key being copied + +Output: destKey Destination where copy will be stored + +Result: none (void) +-------------------------------------------------------------------------------*/ +static void CopyKey(BTreeControlBlockPtr btcb, const BTreeKey *srcKey, BTreeKey *destKey) +{ + unsigned keySize = CalcKeySize(btcb, srcKey); + unsigned maxKeySize = MaxKeySize(btcb); + int fixLength = 0; + + /* + * If the key length is too long (corrupted), then constrain the number + * of bytes to copy. Remember that we did this so we can also update + * the copy's length field later. + */ + if (keySize > maxKeySize) + { + keySize = maxKeySize; + fixLength = 1; + } + + CopyMemory(srcKey, destKey, keySize); + + /* + * If we had to constrain the key size above, then also update the + * key length in the copy. This will prevent the caller from dereferencing + * part of the key which we never actually copied. + */ + if (fixLength) + { + if (btcb->attributes & kBTBigKeysMask) + destKey->length16 = btcb->maxKeyLength; + else + destKey->length8 = btcb->maxKeyLength; + } +} + + +//////////////////////////////////// Globals //////////////////////////////////// + + +/////////////////////////// BTree Module Entry Points /////////////////////////// + + +/*------------------------------------------------------------------------------- +Routine: InitBTreeModule - Initialize BTree Module Global(s). + +Function: Initialize BTree code, if necessary + +Input: none + +Output: none + +Result: noErr - success + != noErr - can't happen +-------------------------------------------------------------------------------*/ + +OSStatus InitBTreeModule(void) +{ + return noErr; +} + + +/*------------------------------------------------------------------------------- +Routine: BTInitialize - Initialize a fork for access as a B*Tree. + +Function: Write Header node and create any map nodes necessary to map the fork + as a B*Tree. If the fork is not large enough for the header node, the + FS Agent is called to extend the LEOF. Additional map nodes will be + allocated if necessary to represent the size of the fork. This allows + the FS Agent to specify the initial size of B*Tree files. + + +Input: pathPtr - pointer to path control block + maxKeyLength - maximum length that will be used for any key in this B*Tree + nodeSize - node size for B*Tree (must be 2^n, 9 >= n >= 15) + btreeType - type of B*Tree + keyDescPtr - pointer to key descriptor (optional if key compare proc is used) + +Output: none + +Result: noErr - success + paramErr - mandatory parameter was missing + E_NoGetBlockProc - FS Agent CB has no GetNodeProcPtr + E_NoReleaseBlockProc - FS Agent CB has no ReleaseNodeProcPtr + E_NoSetEndOfForkProc - FS Agent CB has no SetEndOfForkProcPtr + E_NoSetBlockSizeProc - FS Agent CB has no SetBlockSizeProcPtr + fsBTrFileAlreadyOpenErr - fork is already open as a B*Tree + fsBTInvalidKeyLengthErr - maximum key length is out of range + E_BadNodeType - node size is an illegal value + fsBTUnknownVersionErr - the B*Tree type is unknown by this module + memFullErr - not enough memory to initialize B*Tree + != noErr - failure +-------------------------------------------------------------------------------*/ +#if 0 +OSStatus BTInitialize (FCB *filePtr, + UInt16 maxKeyLength, + UInt16 nodeSize, + UInt8 btreeType, + KeyDescriptorPtr keyDescPtr ) +{ + OSStatus err; + FSForkControlBlockPtr forkPtr; + BTreeControlBlockPtr btreePtr; + BlockDescriptor headerNode; + HeaderPtr header; + Ptr pos; + FSSize minEOF, maxEOF; + SetEndOfForkProcPtr setEndOfForkProc; + SetBlockSizeProcPtr setBlockSizeProc; + + ////////////////////// Preliminary Error Checking /////////////////////////// + + headerNode.buffer = nil; + + if (pathPtr == nil) return paramErr; + + setEndOfForkProc = pathPtr->agentPtr->agent.setEndOfForkProc; + setBlockSizeProc = pathPtr->agentPtr->agent.setBlockSizeProc; + + if (pathPtr->agentPtr->agent.getBlockProc == nil) return E_NoGetBlockProc; + if (pathPtr->agentPtr->agent.releaseBlockProc == nil) return E_NoReleaseBlockProc; + if (setEndOfForkProc == nil) return E_NoSetEndOfForkProc; + if (setBlockSizeProc == nil) return E_NoSetBlockSizeProc; + + forkPtr = pathPtr->path.forkPtr; + + if (forkPtr->fork.btreePtr != nil) return fsBTrFileAlreadyOpenErr; + + if ((maxKeyLength == 0) || + (maxKeyLength > kMaxKeyLength)) return fsBTInvalidKeyLengthErr; + + if ( M_IsEven (maxKeyLength)) ++maxKeyLength; // len byte + even bytes + pad byte + + switch (nodeSize) // node size == 512*2^n + { + case 512: + case 1024: + case 2048: + case 4096: + case 8192: + case 16384: + case 32768: break; + default: return E_BadNodeType; + } + + switch (btreeType) + { + case kHFSBTreeType: + case kUserBTreeType: + case kReservedBTreeType: break; + + default: return fsBTUnknownVersionErr; //¥¥ right? + } + + + //////////////////////// Allocate Control Block ///////////////////////////// + + M_RESIDENT_ALLOCATE_FIXED_CLEAR( &btreePtr, sizeof( BTreeControlBlock ), kFSBTreeControlBlockType ); + if (btreePtr == nil) + { + err = memFullErr; + goto ErrorExit; + } + + btreePtr->version = kBTreeVersion; //¥¥ what is the version? + btreePtr->reserved1 = 0; + btreePtr->flags = 0; + btreePtr->attributes = 0; + btreePtr->forkPtr = forkPtr; + btreePtr->keyCompareProc = nil; + btreePtr->keyDescPtr = keyDescPtr; + btreePtr->btreeType = btreeType; + btreePtr->treeDepth = 0; + btreePtr->rootNode = 0; + btreePtr->leafRecords = 0; + btreePtr->firstLeafNode = 0; + btreePtr->lastLeafNode = 0; + btreePtr->nodeSize = nodeSize; + btreePtr->maxKeyLength = maxKeyLength; + btreePtr->totalNodes = 1; // so ExtendBTree adds maps nodes properly + btreePtr->freeNodes = 0; + btreePtr->writeCount = 1; // <CS10>, for BTree scanner + + // set block size = nodeSize + err = setBlockSizeProc (forkPtr, nodeSize); + M_ExitOnError (err); + + ////////////////////////////// Check LEOF /////////////////////////////////// + + minEOF = nodeSize; + if ( forkPtr->fork.logicalEOF < minEOF ) + { + // allocate more space if necessary + maxEOF 0xFFFFFFFFL; + + err = setEndOfForkProc (forkPtr, minEOF, maxEOF); + M_ExitOnError (err); + }; + + + //////////////////////// Initialize Header Node ///////////////////////////// + + err = GetNewNode (btreePtr, kHeaderNodeNum, &headerNode); + M_ExitOnError (err); + + header = headerNode.buffer; + + header->node.type = kHeaderNode; + header->node.numRecords = 3; // header rec, key desc, map rec + + header->nodeSize = nodeSize; + header->maxKeyLength = maxKeyLength; + header->btreeType = btreeType; + header->totalNodes = btreePtr->totalNodes; + header->freeNodes = btreePtr->totalNodes - 1; + // ignore header->clumpSize; //¥¥ rename this field? + + // mark header node allocated in map record + pos = ((Ptr)headerNode.buffer) + kHeaderMapRecOffset; + *pos = 0x80; + + // set node offsets ( nodeSize-8, F8, 78, 0E) + pos = ((Ptr)headerNode.buffer) + nodeSize; + pos -= 2; *((UInt16 *)pos) = kHeaderRecOffset; + pos -= 2; *((UInt16 *)pos) = kKeyDescRecOffset; + pos -= 2; *((UInt16 *)pos) = kHeaderMapRecOffset; + pos -= 2; *((UInt16 *)pos) = nodeSize - 8; + + + ///////////////////// Copy Key Descriptor To Header ///////////////////////// +#if SupportsKeyDescriptors + if (keyDescPtr != nil) + { + err = CheckKeyDescriptor (keyDescPtr, maxKeyLength); + M_ExitOnError (err); + + // copy to header node + pos = ((Ptr)headerNode.buffer) + kKeyDescRecOffset; + CopyMemory (keyDescPtr, pos, keyDescPtr->length + 1); + } +#endif + + // write header node + err = UpdateNode (btreePtr, &headerNode); + M_ExitOnError (err); + + + ////////////////////////// Allocate Map Nodes /////////////////////////////// + + err = ExtendBTree (btreePtr, forkPtr->fork.logicalEOF.lo / nodeSize); // sets totalNodes + M_ExitOnError (err); + + + ////////////////////////////// Close BTree ////////////////////////////////// + + err = UpdateHeader (btreePtr); + M_ExitOnError (err); + + pathPtr->path.forkPtr->fork.btreePtr = nil; + M_RESIDENT_DEALLOCATE_FIXED( btreePtr, sizeof( BTreeControlBlock ), kFSBTreeControlBlockType ); + + return noErr; + + + /////////////////////// Error - Clean up and Exit /////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &headerNode); + if (btreePtr != nil) + M_RESIDENT_DEALLOCATE_FIXED( btreePtr, sizeof( BTreeControlBlock ), kFSBTreeControlBlockType ); + + return err; +} +#endif + + +/*------------------------------------------------------------------------------- +Routine: BTOpenPath - Open a file for access as a B*Tree. + +Function: Create BTree control block for a file, if necessary. Validates the + file to be sure it looks like a BTree file. + + +Input: filePtr - pointer to file to open as a B-tree + keyCompareProc - pointer to client's KeyCompare function + getBlockProc - pointer to client's GetBlock function + releaseBlockProc - pointer to client's ReleaseBlock function + setEndOfForkProc - pointer to client's SetEOF function + +Result: noErr - success + paramErr - required ptr was nil + fsBTInvalidFileErr - + memFullErr - + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus BTOpenPath (SFCB *filePtr, + KeyCompareProcPtr keyCompareProc, + GetBlockProcPtr getBlockProc, + ReleaseBlockProcPtr releaseBlockProc, + SetEndOfForkProcPtr setEndOfForkProc, + SetBlockSizeProcPtr setBlockSizeProc ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + BTHeaderRec *header; + NodeRec nodeRec; + +// LogStartTime(kTraceOpenBTree); + + ////////////////////// Preliminary Error Checking /////////////////////////// + + if ( filePtr == nil || + getBlockProc == nil || + releaseBlockProc == nil || + setEndOfForkProc == nil || + setBlockSizeProc == nil ) + { + return paramErr; + } + + if ( filePtr->fcbBtree != nil ) // already has a BTreeCB + return noErr; + + // is file large enough to contain header node? + if ( filePtr->fcbLogicalSize < kMinNodeSize ) + return fsBTInvalidFileErr; //¥¥ or E_BadHeader? + + + //////////////////////// Allocate Control Block ///////////////////////////// + + btreePtr = (BTreeControlBlock*) AllocateClearMemory( sizeof( BTreeControlBlock ) ); + if (btreePtr == nil) + { + Panic ("\pBTOpen: no memory for btreePtr."); + return memFullErr; + } + + btreePtr->getBlockProc = getBlockProc; + btreePtr->releaseBlockProc = releaseBlockProc; + btreePtr->setEndOfForkProc = setEndOfForkProc; + btreePtr->keyCompareProc = keyCompareProc; + + /////////////////////////// Read Header Node //////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + + btreePtr->fcbPtr = filePtr; + filePtr->fcbBtree = (void *) btreePtr; // attach btree cb to file + + // it is now safe to call M_ExitOnError (err) + + err = setBlockSizeProc (btreePtr->fcbPtr, kMinNodeSize); + M_ExitOnError (err); + + + err = getBlockProc (btreePtr->fcbPtr, + kHeaderNodeNum, + kGetBlock, + &nodeRec ); + + PanicIf (err != noErr, "\pBTOpen: getNodeProc returned error getting header node."); + M_ExitOnError (err); + + header = (BTHeaderRec*) (nodeRec.buffer + sizeof(BTNodeDescriptor)); + + + ///////////////////////////// verify header ///////////////////////////////// + + err = VerifyHeader (filePtr, header); + M_ExitOnError (err); + + + ///////////////////// Initalize fields from header ////////////////////////// + + PanicIf ( (filePtr->fcbVolume->vcbSignature != 0x4244) && (btreePtr->nodeSize == 512), "\p BTOpenPath: wrong node size for HFS+ volume!"); + + btreePtr->treeDepth = header->treeDepth; + btreePtr->rootNode = header->rootNode; + btreePtr->leafRecords = header->leafRecords; + btreePtr->firstLeafNode = header->firstLeafNode; + btreePtr->lastLeafNode = header->lastLeafNode; + btreePtr->nodeSize = header->nodeSize; + btreePtr->maxKeyLength = header->maxKeyLength; + btreePtr->totalNodes = header->totalNodes; + btreePtr->freeNodes = header->freeNodes; + // ignore header->clumpSize; //¥¥ rename this field? + btreePtr->btreeType = header->btreeType; + + btreePtr->attributes = header->attributes; + + if ( btreePtr->maxKeyLength > 40 ) + btreePtr->attributes |= (kBTBigKeysMask + kBTVariableIndexKeysMask); //¥¥ we need a way to save these attributes + + /////////////////////// Initialize dynamic fields /////////////////////////// + + btreePtr->version = kBTreeVersion; + btreePtr->flags = 0; + btreePtr->writeCount = 1; // <CS10>, for BTree scanner + + btreePtr->numGetNodes = 1; // for earlier call to getNodeProc + + /////////////////////////// Check Header Node /////////////////////////////// + + //¥¥ set kBadClose attribute bit, and UpdateNode + + /* + * If the actual node size is different than the amount we read, + * then release and trash this block, and re-read with the correct + * node size. + */ + if ( btreePtr->nodeSize != kMinNodeSize ) + { + err = setBlockSizeProc (btreePtr->fcbPtr, btreePtr->nodeSize); + M_ExitOnError (err); + +#if BSD + /* + * Need to use kTrashBlock option to force the + * buffer cache to re-read the entire node + */ + err = releaseBlockProc(btreePtr->fcbPtr, &nodeRec, kTrashBlock); +#else + err = ReleaseNode (btreePtr, &nodeRec); +#endif + + err = GetNode (btreePtr, kHeaderNodeNum, &nodeRec ); // calls CheckNode... + M_ExitOnError (err); + } + + //¥¥ total nodes * node size <= LEOF? + + + ////////////////////////// Get Key Descriptor /////////////////////////////// +#if SupportsKeyDescriptors + if ( keyCompareProc == nil ) // if no key compare proc then get key descriptor + { + err = GetKeyDescriptor (btreePtr, nodeRec.buffer); //¥¥ it should check amount of memory allocated... + M_ExitOnError (err); + + err = CheckKeyDescriptor (btreePtr->keyDescPtr, btreePtr->maxKeyLength); + M_ExitOnError (err); + + } + else +#endif + { + btreePtr->keyDescPtr = nil; // clear it so we don't dispose garbage later + } + + err = ReleaseNode (btreePtr, &nodeRec); + M_ExitOnError (err); + + +#if BSD + /* + * Under Mac OS, b-tree nodes can be non-contiguous on disk when the + * allocation block size is smaller than the b-tree node size. + */ + if ( !NodesAreContiguous(filePtr, btreePtr->nodeSize) ) { + if (debug) fplog(stderr, "Nodes are not contiguous -- this is fatal\n"); + return fsBTInvalidNodeErr; + } +#endif + + //////////////////////////////// Success //////////////////////////////////// + + //¥¥ align LEOF to multiple of node size? - just on close + +// LogEndTime(kTraceOpenBTree, noErr); + + return noErr; + + + /////////////////////// Error - Clean up and Exit /////////////////////////// + +ErrorExit: + + filePtr->fcbBtree = nil; + (void) ReleaseNode (btreePtr, &nodeRec); + DisposeMemory( btreePtr ); + +// LogEndTime(kTraceOpenBTree, err); + + return err; +} + + + +/*------------------------------------------------------------------------------- +Routine: BTClosePath - Flush BTree Header and Deallocate Memory for BTree. + +Function: Flush the BTreeControlBlock fields to header node, and delete BTree control + block and key descriptor associated with the file if filePtr is last + path of type kBTreeType ('btre'). + + +Input: filePtr - pointer to file to delete BTree control block for. + +Result: noErr - success + fsBTInvalidFileErr - + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus BTClosePath (SFCB *filePtr) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; +#if 0 + FSPathControlBlockPtr tempPath; + Boolean otherBTreePathsOpen; +#endif + +// LogStartTime(kTraceCloseBTree); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBtree; + + if (btreePtr == nil) + return fsBTInvalidFileErr; + + ////////////////////// Check for other BTree Paths ////////////////////////// + +#if 0 +//¥¥ Need replacement field for pathType + otherBTreePathsOpen = false; + tempPath = forkPtr->fork.pathList.head; + while ( (tempPath != (FSPathControlBlockPtr) &forkPtr->fork.pathList) && + (otherBTreePathsOpen == false) ) + { + if ((tempPath != pathPtr) && (tempPath->path.pathType == kBTreeType)) + { + otherBTreePathsOpen = true; + break; // done with loop check + } + + tempPath = tempPath->next; + } + + ////////////////////////// Update Header Node /////////////////////////////// + + + if (otherBTreePathsOpen == true) + { + err = UpdateHeader (btreePtr); // update header even if we aren't closing + return err; // we only clean up after the last user... + } +#endif + + btreePtr->attributes &= ~kBTBadCloseMask; // clear "bad close" attribute bit + err = UpdateHeader (btreePtr); + M_ExitOnError (err); + +#if SupportsKeyDescriptors + if (btreePtr->keyDescPtr != nil) // deallocate keyDescriptor, if any + { + DisposeMemory( btreePtr->keyDescPtr ); + } +#endif + + DisposeMemory( btreePtr ); + filePtr->fcbBtree = nil; + +// LogEndTime(kTraceCloseBTree, noErr); + + return noErr; + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + +// LogEndTime(kTraceCloseBTree, err); + + return err; +} + + + +/*------------------------------------------------------------------------------- +Routine: BTSearchRecord - Search BTree for a record with a matching key. + +Function: Search for position in B*Tree indicated by searchKey. If a valid node hint + is provided, it will be searched first, then SearchTree will be called. + If a BTreeIterator is provided, it will be set to the position found as + a result of the search. If a record exists at that position, and a BufferDescriptor + is supplied, the record will be copied to the buffer (as much as will fit), + and recordLen will be set to the length of the record. + + If an error other than fsBTRecordNotFoundErr occurs, the BTreeIterator, if any, + is invalidated, and recordLen is set to 0. + + +Input: pathPtr - pointer to path for BTree file. + searchKey - pointer to search key to match. + hintPtr - pointer to hint (may be nil) + +Output: record - pointer to BufferDescriptor containing record + recordLen - length of data at recordPtr + iterator - pointer to BTreeIterator indicating position result of search + +Result: noErr - success, record contains copy of record found + fsBTRecordNotFoundErr - record was not found, no data copied + fsBTInvalidFileErr - no BTreeControlBlock is allocated for the fork + fsBTInvalidKeyLengthErr - + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus BTSearchRecord (SFCB *filePtr, + BTreeIterator *searchIterator, + UInt32 heuristicHint, + FSBufferDescriptor *record, + UInt16 *recordLen, + BTreeIterator *resultIterator ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + UInt32 nodeNum = 0; + BlockDescriptor node; + UInt16 index; + BTreeKeyPtr keyPtr; + RecordPtr recordPtr; + UInt16 len; + Boolean foundRecord; + Boolean validHint; + + +// LogStartTime(kTraceSearchBTree); + + if (filePtr == nil) return paramErr; + if (searchIterator == nil) return paramErr; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBtree; + if (btreePtr == nil) return fsBTInvalidFileErr; + +#if SupportsKeyDescriptors + if (btreePtr->keyCompareProc == nil) // CheckKey if we using Key Descriptor + { + err = CheckKey (&searchIterator->key, btreePtr->keyDescPtr, btreePtr->maxKeyLength); + M_ExitOnError (err); + } +#endif + + foundRecord = false; + + ////////////////////////////// Take A Hint ////////////////////////////////// + + err = IsItAHint (btreePtr, searchIterator, &validHint); + M_ExitOnError (err); + + if (validHint) + { + nodeNum = searchIterator->hint.nodeNum; + + err = GetNode (btreePtr, nodeNum, &node); + if( err == noErr ) + { + if ( ((BTNodeDescriptor*) node.buffer)->kind == kBTLeafNode && + ((BTNodeDescriptor*) node.buffer)->numRecords > 0 ) + { + foundRecord = SearchNode (btreePtr, node.buffer, &searchIterator->key, &index); + + //¥¥ if !foundRecord, we could still skip tree search if ( 0 < index < numRecords ) + } + + if (foundRecord == false) + { + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + } + else + { + ++btreePtr->numValidHints; + } + } + + if( foundRecord == false ) + (void) BTInvalidateHint( searchIterator ); + } + + ////////////////////////////// Try the heuristicHint ////////////////////////////////// + + if ( (foundRecord == false) && (heuristicHint != kInvalidMRUCacheKey) && (nodeNum != heuristicHint) ) + { + // LogStartTime(kHeuristicHint); + nodeNum = heuristicHint; + + err = GetNode (btreePtr, nodeNum, &node); + if( err == noErr ) + { + if ( ((BTNodeDescriptor*) node.buffer)->kind == kBTLeafNode && + ((BTNodeDescriptor*) node.buffer)->numRecords > 0 ) + { + foundRecord = SearchNode (btreePtr, node.buffer, &searchIterator->key, &index); + } + + if (foundRecord == false) + { + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + } + } + // LogEndTime(kHeuristicHint, (foundRecord == false)); + } + + //////////////////////////// Search The Tree //////////////////////////////// + + if (foundRecord == false) + { + err = SearchTree ( btreePtr, &searchIterator->key, treePathTable, &nodeNum, &node, &index); + switch (err) + { + case noErr: foundRecord = true; break; + case fsBTRecordNotFoundErr: break; + default: goto ErrorExit; + } + } + + + //////////////////////////// Get the Record ///////////////////////////////// + + if (foundRecord == true) + { + //¥¥ Should check for errors! Or BlockMove could choke on recordPtr!!! + GetRecordByIndex (btreePtr, node.buffer, index, &keyPtr, &recordPtr, &len); + + if (recordLen != nil) *recordLen = len; + + if (record != nil) + { + ByteCount recordSize; + + recordSize = record->itemCount * record->itemSize; + + PanicIf(len > recordSize, "\pBTSearchRecord: truncating record!"); + + if (len > recordSize) len = recordSize; + + CopyMemory (recordPtr, record->bufferAddress, len); + } + } + + + /////////////////////// Success - Update Iterator /////////////////////////// + + if (resultIterator != nil) + { + resultIterator->hint.writeCount = btreePtr->writeCount; + resultIterator->hint.nodeNum = nodeNum; + resultIterator->hint.index = index; + resultIterator->hint.reserved1 = 0; + resultIterator->hint.reserved2 = 0; + + resultIterator->version = 0; + resultIterator->reserved = 0; + + // copy the key in the BTree when found rather than searchIterator->key to get proper case/diacriticals + if (foundRecord == true) + CopyKey(btreePtr, keyPtr, &resultIterator->key); + else + CopyKey(btreePtr, &searchIterator->key, &resultIterator->key); + } + + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + +// LogEndTime(kTraceSearchBTree, (foundRecord == false)); + + if (foundRecord == false) return fsBTRecordNotFoundErr; + else return noErr; + + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + + if (recordLen != nil) + *recordLen = 0; + + if (resultIterator != nil) + { + resultIterator->hint.writeCount = 0; + resultIterator->hint.nodeNum = 0; + resultIterator->hint.index = 0; + resultIterator->hint.reserved1 = 0; + resultIterator->hint.reserved2 = 0; + + resultIterator->version = 0; + resultIterator->reserved = 0; + resultIterator->key.length16 = 0; // zero out two bytes to cover both types of keys + } + + if ( err == fsBTEmptyErr ) + err = fsBTRecordNotFoundErr; + +// LogEndTime(kTraceSearchBTree, err); + + return err; +} + + + +/*------------------------------------------------------------------------------- +Routine: BTIterateRecord - Find the first, next, previous, or last record. + +Function: Find the first, next, previous, or last record in the BTree + +Input: pathPtr - pointer to path iterate records for. + operation - iteration operation (first,next,prev,last) + iterator - pointer to iterator indicating start position + +Output: iterator - iterator is updated to indicate new position + newKeyPtr - pointer to buffer to copy key found by iteration + record - pointer to buffer to copy record found by iteration + recordLen - length of record + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus BTIterateRecord (SFCB *filePtr, + BTreeIterationOperation operation, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 *recordLen ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + BTreeKeyPtr keyPtr; + RecordPtr recordPtr; + UInt16 len; + + Boolean foundRecord; + UInt32 nodeNum; + + BlockDescriptor left, node, right; + UInt16 index; + + +// LogStartTime(kTraceGetBTreeRecord); + + ////////////////////////// Priliminary Checks /////////////////////////////// + + left.buffer = nil; + right.buffer = nil; + node.buffer = nil; + + + if (filePtr == nil) + { + return paramErr; + } + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBtree; + if (btreePtr == nil) + { + return fsBTInvalidFileErr; //¥¥ handle properly + } + + if ((operation != kBTreeFirstRecord) && + (operation != kBTreeNextRecord) && + (operation != kBTreeCurrentRecord) && + (operation != kBTreePrevRecord) && + (operation != kBTreeLastRecord)) + { + err = fsInvalidIterationMovmentErr; + goto ErrorExit; + } + + /////////////////////// Find First or Last Record /////////////////////////// + + if ((operation == kBTreeFirstRecord) || (operation == kBTreeLastRecord)) + { + if (operation == kBTreeFirstRecord) nodeNum = btreePtr->firstLeafNode; + else nodeNum = btreePtr->lastLeafNode; + + if (nodeNum == 0) + { + err = fsBTEmptyErr; + goto ErrorExit; + } + + err = GetNode (btreePtr, nodeNum, &node); + M_ExitOnError (err); + + if ( ((NodeDescPtr) node.buffer)->kind != kBTLeafNode || + ((NodeDescPtr) node.buffer)->numRecords <= 0 ) + { + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + + if (debug) fprintf(stderr, "%s(%d): returning fsBTInvalidNodeErr\n", __FUNCTION__, __LINE__); + err = fsBTInvalidNodeErr; + goto ErrorExit; + } + + if (operation == kBTreeFirstRecord) index = 0; + else index = ((BTNodeDescriptor*) node.buffer)->numRecords - 1; + + goto CopyData; //¥¥ is there a cleaner way? + } + + + //////////////////////// Find Iterator Position ///////////////////////////// + + err = FindIteratorPosition (btreePtr, iterator, + &left, &node, &right, &nodeNum, &index, &foundRecord); + M_ExitOnError (err); + + + ///////////////////// Find Next Or Previous Record ////////////////////////// + + if (operation == kBTreePrevRecord) + { + if (index > 0) + { + --index; + } + else + { + if (left.buffer == nil) + { + nodeNum = ((NodeDescPtr) node.buffer)->bLink; + if ( nodeNum > 0) + { + err = GetNode (btreePtr, nodeNum, &left); + M_ExitOnError (err); + } else { + err = fsBTStartOfIterationErr; + goto ErrorExit; + } + } + // Before we stomp on "right", we'd better release it if needed + if (right.buffer != nil) { + err = ReleaseNode(btreePtr, &right); + M_ExitOnError(err); + } + right = node; + node = left; + left.buffer = nil; + index = ((NodeDescPtr) node.buffer)->numRecords -1; + } + } + else if (operation == kBTreeNextRecord) + { + if ((foundRecord != true) && + (((NodeDescPtr) node.buffer)->fLink == 0) && + (index == ((NodeDescPtr) node.buffer)->numRecords)) + { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + + // we did not find the record but the index is already positioned correctly + if ((foundRecord == false) && (index != ((NodeDescPtr) node.buffer)->numRecords)) + goto CopyData; + + // we found the record OR we have to look in the next node + if (index < ((NodeDescPtr) node.buffer)->numRecords -1) + { + ++index; + } + else + { + if (right.buffer == nil) + { + nodeNum = ((NodeDescPtr) node.buffer)->fLink; + if ( nodeNum > 0) + { + err = GetNode (btreePtr, nodeNum, &right); + M_ExitOnError (err); + } else { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + } + // Before we stomp on "left", we'd better release it if needed + if (left.buffer != nil) { + err = ReleaseNode(btreePtr, &left); + M_ExitOnError(err); + } + left = node; + node = right; + right.buffer = nil; + index = 0; + } + } + else // operation == kBTreeCurrentRecord + { + // make sure we have something... <CS9> + if ((foundRecord != true) && + (index >= ((NodeDescPtr) node.buffer)->numRecords)) + { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + } + + //////////////////// Copy Record And Update Iterator //////////////////////// + +CopyData: + + // added check for errors <CS9> + err = GetRecordByIndex (btreePtr, node.buffer, index, &keyPtr, &recordPtr, &len); + M_ExitOnError (err); + + if (recordLen != nil) *recordLen = len; + + if (record != nil) + { + ByteCount recordSize; + + recordSize = record->itemCount * record->itemSize; + + PanicIf(len > recordSize, "\pBTIterateRecord: truncating record!"); + + if (len > recordSize) len = recordSize; + + CopyMemory (recordPtr, record->bufferAddress, len); + } + + if (iterator != nil) // first & last do not require iterator + { + iterator->hint.writeCount = btreePtr->writeCount; + iterator->hint.nodeNum = nodeNum; + iterator->hint.index = index; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + iterator->version = 0; + iterator->reserved = 0; + + CopyKey(btreePtr, keyPtr, &iterator->key); + } + + + ///////////////////////////// Release Nodes ///////////////////////////////// + + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + + if (left.buffer != nil) + { + err = ReleaseNode (btreePtr, &left); + M_ExitOnError (err); + } + + if (right.buffer != nil) + { + err = ReleaseNode (btreePtr, &right); + M_ExitOnError (err); + } + +// LogEndTime(kTraceGetBTreeRecord, noErr); + + return noErr; + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &left); + (void) ReleaseNode (btreePtr, &node); + (void) ReleaseNode (btreePtr, &right); + + if (recordLen != nil) + *recordLen = 0; + + if (iterator != nil) + { + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + iterator->version = 0; + iterator->reserved = 0; + iterator->key.length16 = 0; + } + + if ( err == fsBTEmptyErr || err == fsBTEndOfIterationErr ) + err = fsBTRecordNotFoundErr; + +// LogEndTime(kTraceGetBTreeRecord, err); + + return err; +} + + +//////////////////////////////// BTInsertRecord ///////////////////////////////// + +OSStatus BTInsertRecord (SFCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + SInt32 nodesNeeded; + BlockDescriptor nodeRec; + UInt32 insertNodeNum; + UInt16 index; + Boolean recordFit; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + + err = CheckInsertParams (filePtr, iterator, record, recordLen); + if (err != noErr) + return err; + +// LogStartTime(kTraceInsertBTreeRecord); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBtree; + + ///////////////////////// Find Insert Position ////////////////////////////// + + // always call SearchTree for Insert + err = SearchTree (btreePtr, &iterator->key, treePathTable, &insertNodeNum, &nodeRec, &index); + + switch (err) // set/replace/insert decision point + { + case noErr: err = fsBTDuplicateRecordErr; + goto ErrorExit; + + case fsBTRecordNotFoundErr: break; + + case fsBTEmptyErr: // if tree empty add 1st leaf node + + if (btreePtr->freeNodes == 0) + { + err = ExtendBTree (btreePtr, btreePtr->totalNodes + 1); + M_ExitOnError (err); + } + + err = AllocateNode (btreePtr, &insertNodeNum); + M_ExitOnError (err); + + err = GetNewNode (btreePtr, insertNodeNum, &nodeRec); + M_ExitOnError (err); + + ((NodeDescPtr)nodeRec.buffer)->kind = kBTLeafNode; + ((NodeDescPtr)nodeRec.buffer)->height = 1; + + recordFit = InsertKeyRecord (btreePtr, nodeRec.buffer, 0, + &iterator->key, KeyLength(btreePtr, &iterator->key), + record->bufferAddress, recordLen ); + if (recordFit != true) + { + err = fsBTRecordTooLargeErr; + goto ErrorExit; + } + + err = UpdateNode (btreePtr, &nodeRec); + M_ExitOnError (err); + + // update BTreeControlBlock + btreePtr->treeDepth = 1; + btreePtr->rootNode = insertNodeNum; + btreePtr->firstLeafNode = insertNodeNum; + btreePtr->lastLeafNode = insertNodeNum; + M_BTreeHeaderDirty (btreePtr); + + goto Success; + + default: + goto ErrorExit; + } + + if (index > 0) + { + recordFit = InsertKeyRecord (btreePtr, nodeRec.buffer, index, + &iterator->key, KeyLength(btreePtr, &iterator->key), + record->bufferAddress, recordLen); + if (recordFit == true) + { + err = UpdateNode (btreePtr, &nodeRec); + M_ExitOnError (err); + + goto Success; + } + } + + /////////////////////// Extend File If Necessary //////////////////////////// + + nodesNeeded = btreePtr->treeDepth + 1 - btreePtr->freeNodes; //¥¥ math limit + if (nodesNeeded > 0) + { + nodesNeeded += btreePtr->totalNodes; + if (nodesNeeded > CalcMapBits (btreePtr)) // we'll need to add a map node too! + ++nodesNeeded; + + err = ExtendBTree (btreePtr, nodesNeeded); + M_ExitOnError (err); + } + + // no need to delete existing record + + err = InsertTree (btreePtr, treePathTable, &iterator->key, record->bufferAddress, + recordLen, &nodeRec, index, 1, kInsertRecord, &insertNodeNum); + M_ExitOnError (err); + + + //////////////////////////////// Success //////////////////////////////////// + +Success: + ++btreePtr->writeCount; // <CS10> + ++btreePtr->leafRecords; + M_BTreeHeaderDirty (btreePtr); + + // create hint + iterator->hint.writeCount = btreePtr->writeCount; // unused until <CS10> + iterator->hint.nodeNum = insertNodeNum; + iterator->hint.index = 0; // unused + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + +// LogEndTime(kTraceInsertBTreeRecord, noErr); + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + (void) ReleaseNode (btreePtr, &nodeRec); + + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + if (err == fsBTEmptyErr) + err = fsBTRecordNotFoundErr; + +// LogEndTime(kTraceInsertBTreeRecord, err); + + return err; +} + + + +////////////////////////////////// BTSetRecord ////////////////////////////////// +#if 0 +OSStatus BTSetRecord (SFCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + SInt32 nodesNeeded; + BlockDescriptor nodeRec; + UInt32 insertNodeNum; + UInt16 index; + Boolean recordFound = false; + Boolean recordFit; + Boolean operation; + Boolean validHint; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + + err = CheckInsertParams (filePtr, iterator, record, recordLen); + if (err != noErr) + return err; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBtree; + + + ///////////////////////// Find Insert Position ////////////////////////////// + + err = IsItAHint (btreePtr, iterator, &validHint); + M_ExitOnError (err); + + if (validHint) + { + insertNodeNum = iterator->hint.nodeNum; + + err = GetNode (btreePtr, insertNodeNum, &nodeRec); + if( err == noErr ) + { + err = TrySimpleReplace (btreePtr, nodeRec.buffer, iterator, record, recordLen, &recordFit); + M_ExitOnError (err); + + if (recordFit) + { + err = UpdateNode (btreePtr, &nodeRec); + M_ExitOnError (err); + + recordFound = true; + ++btreePtr->numValidHints; + goto Success; + } // else + else + { + (void) BTInvalidateHint( iterator ); + } + + err = ReleaseNode (btreePtr, &nodeRec); + M_ExitOnError (err); + } + } + + err = SearchTree (btreePtr, &iterator->key, treePathTable, &insertNodeNum, &nodeRec, &index); + + switch (err) // set/replace/insert decision point + { + case noErr: recordFound = true; + break; + + case fsBTRecordNotFoundErr: break; + + case fsBTEmptyErr: // if tree empty add 1st leaf node + + if (btreePtr->freeNodes == 0) + { + err = ExtendBTree (btreePtr, btreePtr->totalNodes + 1); + M_ExitOnError (err); + } + + err = AllocateNode (btreePtr, &insertNodeNum); + M_ExitOnError (err); + + err = GetNewNode (btreePtr, insertNodeNum, &nodeRec); + M_ExitOnError (err); + + ((NodeDescPtr)nodeRec.buffer)->kind = kBTLeafNode; + ((NodeDescPtr)nodeRec.buffer)->height = 1; + + recordFit = InsertKeyRecord (btreePtr, nodeRec.buffer, 0, + &iterator->key, KeyLength(btreePtr, &iterator->key), + record->bufferAddress, recordLen ); + if (recordFit != true) + { + err = fsBTRecordTooLargeErr; + goto ErrorExit; + } + + err = UpdateNode (btreePtr, &nodeRec); + M_ExitOnError (err); + + // update BTreeControlBlock + btreePtr->rootNode = insertNodeNum; + btreePtr->treeDepth = 1; + btreePtr->flags |= kBTHeaderDirty; + + goto Success; + + default: goto ErrorExit; + } + + + if (recordFound == true) // Simple Replace - optimization avoids unecessary ExtendBTree + { + err = TrySimpleReplace (btreePtr, nodeRec.buffer, iterator, record, recordLen, &recordFit); + M_ExitOnError (err); + + if (recordFit) + { + err = UpdateNode (btreePtr, &nodeRec); + M_ExitOnError (err); + + goto Success; + } + } + + + /////////////////////// Extend File If Necessary //////////////////////////// + + nodesNeeded = btreePtr->treeDepth + 1 - btreePtr->freeNodes; //¥¥ math limit + if (nodesNeeded > 0) + { + nodesNeeded += btreePtr->totalNodes; + if (nodesNeeded > CalcMapBits (btreePtr)) // we'll need to add a map node too! + ++nodesNeeded; + + err = ExtendBTree (btreePtr, nodesNeeded); + M_ExitOnError (err); + } + + + if (recordFound == true) // Delete existing record + { + DeleteRecord (btreePtr, nodeRec.buffer, index); + operation = kReplaceRecord; + } else { + operation = kInsertRecord; + } + + err = InsertTree (btreePtr, treePathTable, &iterator->key, record->bufferAddress, + recordLen, &nodeRec, index, 1, operation, &insertNodeNum); + M_ExitOnError (err); + + ++btreePtr->writeCount; // <CS10> writeCount changes only if the tree structure changed + +Success: + if (recordFound == false) + { + ++btreePtr->leafRecords; + M_BTreeHeaderDirty (btreePtr); + } + + // create hint + iterator->hint.writeCount = btreePtr->writeCount; // unused until <CS10> + iterator->hint.nodeNum = insertNodeNum; + iterator->hint.index = 0; // unused + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &nodeRec); + + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + return err; +} +#endif + + +//////////////////////////////// BTReplaceRecord //////////////////////////////// + +OSStatus BTReplaceRecord (SFCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + SInt32 nodesNeeded; + BlockDescriptor nodeRec; + UInt32 insertNodeNum; + UInt16 index; + Boolean recordFit; + Boolean validHint; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + + err = CheckInsertParams (filePtr, iterator, record, recordLen); + if (err != noErr) + return err; + +// LogStartTime(kTraceReplaceBTreeRecord); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBtree; + + ////////////////////////////// Take A Hint ////////////////////////////////// + + err = IsItAHint (btreePtr, iterator, &validHint); + M_ExitOnError (err); + + if (validHint) + { + insertNodeNum = iterator->hint.nodeNum; + + err = GetNode (btreePtr, insertNodeNum, &nodeRec); + if( err == noErr ) + { + err = TrySimpleReplace (btreePtr, nodeRec.buffer, iterator, record, recordLen, &recordFit); + M_ExitOnError (err); + + if (recordFit) + { + err = UpdateNode (btreePtr, &nodeRec); + M_ExitOnError (err); + + ++btreePtr->numValidHints; + + goto Success; + } + else + { + (void) BTInvalidateHint( iterator ); + } + + err = ReleaseNode (btreePtr, &nodeRec); + M_ExitOnError (err); + } + else + { + (void) BTInvalidateHint( iterator ); + } + } + + + ////////////////////////////// Get A Clue /////////////////////////////////// + + err = SearchTree (btreePtr, &iterator->key, treePathTable, &insertNodeNum, &nodeRec, &index); + M_ExitOnError (err); // record must exit for Replace + + // optimization - if simple replace will work then don't extend btree + // ¥¥ if we tried this before, and failed because it wouldn't fit then we shouldn't try this again... + + err = TrySimpleReplace (btreePtr, nodeRec.buffer, iterator, record, recordLen, &recordFit); + M_ExitOnError (err); + + if (recordFit) + { + err = UpdateNode (btreePtr, &nodeRec); + M_ExitOnError (err); + + goto Success; + } + + + //////////////////////////// Make Some Room ///////////////////////////////// + + nodesNeeded = btreePtr->treeDepth + 1 - btreePtr->freeNodes; //¥¥ math limit + if (nodesNeeded > 0) + { + nodesNeeded += btreePtr->totalNodes; + if (nodesNeeded > CalcMapBits (btreePtr)) // we'll need to add a map node too! + ++nodesNeeded; + + err = ExtendBTree (btreePtr, nodesNeeded); + M_ExitOnError (err); + } + + + DeleteRecord (btreePtr, nodeRec.buffer, index); // delete existing key/record + + err = InsertTree (btreePtr, treePathTable, &iterator->key, record->bufferAddress, + recordLen, &nodeRec, index, 1, kReplaceRecord, &insertNodeNum); + M_ExitOnError (err); + + ++btreePtr->writeCount; // <CS10> writeCount changes only if the tree structure changed + +Success: + // create hint + iterator->hint.writeCount = btreePtr->writeCount; // unused until <CS10> + iterator->hint.nodeNum = insertNodeNum; + iterator->hint.index = 0; // unused + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + +// LogEndTime(kTraceReplaceBTreeRecord, noErr); + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &nodeRec); + + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + +// LogEndTime(kTraceReplaceBTreeRecord, err); + + return err; +} + + + +//////////////////////////////// BTDeleteRecord ///////////////////////////////// + +OSStatus BTDeleteRecord (SFCB *filePtr, + BTreeIterator *iterator ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + BlockDescriptor nodeRec; + UInt32 nodeNum; + UInt16 index; + +// LogStartTime(kTraceDeleteBTreeRecord); + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + + M_ReturnErrorIf (filePtr == nil, paramErr); + M_ReturnErrorIf (iterator == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBtree; + if (btreePtr == nil) + { + err = fsBTInvalidFileErr; + goto ErrorExit; + } + +#if SupportsKeyDescriptors + if (btreePtr->keyDescPtr != nil) + { + err = CheckKey (&iterator->key, btreePtr->keyDescPtr, btreePtr->maxKeyLength); + M_ExitOnError (err); + } +#endif + + /////////////////////////////// Find Key //////////////////////////////////// + + //¥¥ check hint for simple delete case (index > 0, numRecords > 2) + + err = SearchTree (btreePtr, &iterator->key, treePathTable, &nodeNum, &nodeRec, &index); + M_ExitOnError (err); // record must exit for Delete + + + ///////////////////////////// Delete Record ///////////////////////////////// + + err = DeleteTree (btreePtr, treePathTable, &nodeRec, index, 1); + M_ExitOnError (err); + +//Success: + ++btreePtr->writeCount; // <CS10> + --btreePtr->leafRecords; + M_BTreeHeaderDirty (btreePtr); + + iterator->hint.nodeNum = 0; + +// LogEndTime(kTraceDeleteBTreeRecord, noErr); + + return noErr; + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + (void) ReleaseNode (btreePtr, &nodeRec); + +// LogEndTime(kTraceDeleteBTreeRecord, err); + + return err; +} + + + +OSStatus BTGetInformation (SFCB *filePtr, + UInt16 version, + BTreeInfoRec *info ) +{ +#pragma unused (version) + + BTreeControlBlockPtr btreePtr; + + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBtree; + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + M_ReturnErrorIf (info == nil, paramErr); + + //¥¥ check version? + + info->nodeSize = btreePtr->nodeSize; + info->maxKeyLength = btreePtr->maxKeyLength; + info->treeDepth = btreePtr->treeDepth; + info->numRecords = btreePtr->leafRecords; + info->numNodes = btreePtr->totalNodes; + info->numFreeNodes = btreePtr->freeNodes; + info->keyDescriptor = btreePtr->keyDescPtr; //¥¥ this won't do at all... + info->reserved = 0; + + if (btreePtr->keyDescPtr == nil) + info->keyDescLength = 0; + else + info->keyDescLength = (UInt32) btreePtr->keyDescPtr->length; + + return noErr; +} + + + +/*------------------------------------------------------------------------------- +Routine: BTFlushPath - Flush BTreeControlBlock to Header Node. + +Function: Brief_description_of_the_function_and_any_side_effects + + +Input: pathPtr - pointer to path control block for B*Tree file to flush + +Output: none + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus BTFlushPath (SFCB *filePtr) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + + +// LogStartTime(kTraceFlushBTree); + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBtree; + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + + err = UpdateHeader (btreePtr); + +// LogEndTime(kTraceFlushBTree, err); + + return err; +} + + + +/*------------------------------------------------------------------------------- +Routine: BTInvalidateHint - Invalidates the hint within a BTreeInterator. + +Function: Invalidates the hint within a BTreeInterator. + + +Input: iterator - pointer to BTreeIterator + +Output: iterator - iterator with the hint.nodeNum cleared + +Result: noErr - success + paramErr - iterator == nil +-------------------------------------------------------------------------------*/ + + +OSStatus BTInvalidateHint (BTreeIterator *iterator ) +{ + if (iterator == nil) + return paramErr; + + iterator->hint.nodeNum = 0; + + return noErr; +} + diff --git a/fsck_hfs/dfalib/BTree.h b/fsck_hfs/dfalib/BTree.h new file mode 100644 index 0000000..361316f --- /dev/null +++ b/fsck_hfs/dfalib/BTree.h @@ -0,0 +1,412 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreesInternal.h + + Contains: IPI to File Manager B-tree + + Version: HFS Plus 1.0 + + Copyright: © 1996-1998 by Apple Computer, Inc., all rights reserved. +*/ + +#ifndef __BTREESINTERNAL__ +#define __BTREESINTERNAL__ + +#include "SRuntime.h" + +// +// internal error codes +// +enum { + // FXM errors + + fxRangeErr = 16, // file position beyond mapped range + fxOvFlErr = 17, // extents file overflow + + // Unicode errors + + uniTooLongErr = 24, // Unicode string too long to convert to Str31 + uniBufferTooSmallErr = 25, // Unicode output buffer too small + uniNotMappableErr = 26, // Unicode string can't be mapped to given script + + // BTree Manager errors + + btNotFound = 32, // record not found + btExists = 33, // record already exists + btNoSpaceAvail = 34, // no available space + btNoFit = 35, // record doesn't fit in node + btBadNode = 36, // bad node detected + btBadHdr = 37, // bad BTree header record detected + dsBadRotate = 64, // bad BTree rotate + + // Catalog Manager errors + + cmNotFound = 48, // CNode not found + cmExists = 49, // CNode already exists + cmNotEmpty = 50, // directory CNode not empty (valence = 0) + cmRootCN = 51, // invalid reference to root CNode + cmBadNews = 52, // detected bad catalog structure + cmFThdDirErr = 53, // thread belongs to a directory not a file + cmFThdGone = 54, // file thread doesn't exist + cmParentNotFound = 55, // CNode for parent ID does not exist + cmNotAFolder = 56, // Destination of move is a file, not a folder + cmUnknownNodeType = 57, // Node type isn't recognized + + // Volume Check errors + + vcInvalidExtentErr = 60 // Extent record is out of bounds +}; + +enum { + fsBTInvalidHeaderErr = btBadHdr, + fsBTBadRotateErr = dsBadRotate, + fsBTInvalidNodeErr = btBadNode, + fsBTRecordTooLargeErr = btNoFit, + fsBTRecordNotFoundErr = btNotFound, + fsBTDuplicateRecordErr = btExists, + fsBTFullErr = btNoSpaceAvail, + + fsBTInvalidFileErr = 0x0302, /* no BTreeCB has been allocated for fork*/ + fsBTrFileAlreadyOpenErr = 0x0303, + fsBTInvalidIteratorErr = 0x0308, + fsBTEmptyErr = 0x030A, + fsBTNoMoreMapNodesErr = 0x030B, + fsBTBadNodeSize = 0x030C, + fsBTBadNodeType = 0x030D, + fsBTInvalidKeyLengthErr = 0x030E, + fsBTInvalidKeyDescriptor = 0x030F, + fsBTStartOfIterationErr = 0x0353, + fsBTEndOfIterationErr = 0x0354, + fsBTUnknownVersionErr = 0x0355, + fsBTTreeTooDeepErr = 0x0357, + fsBTInvalidKeyDescriptorErr = 0x0358, + fsBTInvalidKeyFieldErr = 0x035E, + fsBTInvalidKeyAttributeErr = 0x035F, + fsIteratorExitedScopeErr = 0x0A02, /* iterator exited the scope*/ + fsIteratorScopeExceptionErr = 0x0A03, /* iterator is undefined due to error or movement of scope locality*/ + fsUnknownIteratorMovementErr = 0x0A04, /* iterator movement is not defined*/ + fsInvalidIterationMovmentErr = 0x0A05, /* iterator movement is invalid in current context*/ + fsClientIDMismatchErr = 0x0A06, /* wrong client process ID*/ + fsEndOfIterationErr = 0x0A07, /* there were no objects left to return on iteration*/ + fsBTTimeOutErr = 0x0A08 /* BTree scan interrupted -- no time left for physical I/O */ +}; + + +struct FSBufferDescriptor { + LogicalAddress bufferAddress; + ByteCount itemSize; + ItemCount itemCount; +}; +typedef struct FSBufferDescriptor FSBufferDescriptor; + +typedef FSBufferDescriptor *FSBufferDescriptorPtr; + + + +typedef UInt64 FSSize; +typedef UInt32 ForkBlockNumber; + + +/* + BTreeObjID is used to indicate an access path using the + BTree access method to a specific fork of a file. This value + is session relative and not persistent between invocations of + an application. It is in fact an object ID to which requests + for the given path should be sent. + */ +typedef UInt32 BTreeObjID; + +/* + B*Tree Information Version +*/ + +enum BTreeInformationVersion{ + kBTreeInfoVersion = 0 +}; + +/* + B*Tree Iteration Operation Constants +*/ + +enum BTreeIterationOperations{ + kBTreeFirstRecord, + kBTreeNextRecord, + kBTreePrevRecord, + kBTreeLastRecord, + kBTreeCurrentRecord +}; +typedef UInt16 BTreeIterationOperation; + +/* + B*Tree Key Descriptor Limits +*/ + +enum { + kMaxKeyDescriptorLength = 23, +}; + +/* + B*Tree Key Descriptor Field Types +*/ + +enum { + kBTreeSkip = 0, + kBTreeByte = 1, + kBTreeSignedByte = 2, + kBTreeWord = 4, + kBTreeSignedWord = 5, + kBTreeLong = 6, + kBTreeSignedLong = 7, + kBTreeString = 3, // Pascal string + kBTreeFixLenString = 8, // Pascal string w/ fixed length buffer + kBTreeReserved = 9, // reserved for Desktop Manager (?) + kBTreeUseKeyCmpProc = 10, + //¥¥ not implemented yet... + kBTreeCString = 11, + kBTreeFixLenCString = 12, + kBTreeUniCodeString = 13, + kBTreeFixUniCodeString = 14 +}; +typedef UInt8 BTreeKeyType; + + +/* + B*Tree Key Descriptor String Field Attributes +*/ + +enum { + kBTreeCaseSens = 0x10, // case sensitive + kBTreeNotDiacSens = 0x20 // not diacritical sensitive +}; +typedef UInt8 BTreeStringAttributes; + +/* + Btree types: 0 is HFS CAT/EXT file, 1~127 are AppleShare B*Tree files, 128~254 unused + hfsBtreeType EQU 0 ; control file + validBTType EQU $80 ; user btree type starts from 128 + userBT1Type EQU $FF ; 255 is our Btree type. Used by BTInit and BTPatch +*/ + +enum BTreeTypes{ + kHFSBTreeType = 0, // control file + kUserBTreeType = 128, // user btree type starts from 128 + kReservedBTreeType = 255 // +}; + +enum { + kInvalidMRUCacheKey = -1L /* flag to denote current MRU cache key is invalid*/ + +}; + +/*============================================================================ + B*Tree Key Structures +============================================================================*/ + +/* + BTreeKeyDescriptor is used to indicate how keys for a particular B*Tree + are to be compared. + */ +typedef char BTreeKeyDescriptor[26]; +typedef char *BTreeKeyDescriptorPtr; + +/* + BTreeInformation is used to describe the public information about a BTree + */ +struct BTreeInformation{ + UInt16 NodeSize; + UInt16 MaxKeyLength; + UInt16 Depth; + UInt16 Reserved; + ItemCount NumRecords; + ItemCount NumNodes; + ItemCount NumFreeNodes; + ByteCount ClumpSize; + BTreeKeyDescriptor KeyDescriptor; + }; +typedef struct BTreeInformation BTreeInformation; +typedef BTreeInformation *BTreeInformationPtr; + +typedef BTreeKey *BTreeKeyPtr; + + +struct KeyDescriptor{ + UInt8 length; + UInt8 fieldDesc [kMaxKeyDescriptorLength]; +}; +typedef struct KeyDescriptor KeyDescriptor; +typedef KeyDescriptor *KeyDescriptorPtr; + +struct NumberFieldDescriptor{ + UInt8 fieldType; + UInt8 occurrence; // number of consecutive fields of this type +}; +typedef struct NumberFieldDescriptor NumberFieldDescriptor; + +struct StringFieldDescriptor{ + UInt8 fieldType; // kBTString + UInt8 occurrence; // number of consecutive fields of this type + UInt8 stringAttribute; + UInt8 filler; +}; +typedef struct StringFieldDescriptor StringFieldDescriptor; + +struct FixedLengthStringFieldDescriptor{ + UInt8 fieldType; // kBTFixLenString + UInt8 stringLength; + UInt8 occurrence; + UInt8 stringAttribute; +}; +typedef struct FixedLengthStringFieldDescriptor FixedLengthStringFieldDescriptor; + +/* + BTreeInfoRec Structure - for BTGetInformation +*/ +struct BTreeInfoRec{ + UInt16 version; + UInt16 nodeSize; + UInt16 maxKeyLength; + UInt16 treeDepth; + ItemCount numRecords; + ItemCount numNodes; + ItemCount numFreeNodes; + KeyDescriptorPtr keyDescriptor; + ByteCount keyDescLength; + UInt32 reserved; +}; +typedef struct BTreeInfoRec BTreeInfoRec; +typedef BTreeInfoRec *BTreeInfoPtr; + +/* + BTreeHint can never be exported to the outside. Use UInt32 BTreeHint[4], + UInt8 BTreeHint[16], etc. + */ +struct BTreeHint{ + ItemCount writeCount; + UInt32 nodeNum; // node the key was last seen in + UInt16 index; // index then key was last seen at + UInt16 reserved1; + UInt32 reserved2; +}; +typedef struct BTreeHint BTreeHint; +typedef BTreeHint *BTreeHintPtr; + +/* + BTree Iterator +*/ +struct BTreeIterator{ + BTreeHint hint; + UInt16 version; + UInt16 reserved; + BTreeKey key; +}; +typedef struct BTreeIterator BTreeIterator; +typedef BTreeIterator *BTreeIteratorPtr; + + +/*============================================================================ + B*Tree SPI +============================================================================*/ + +typedef SInt32 (* KeyCompareProcPtr) (BTreeKeyPtr a, BTreeKeyPtr b); + +typedef OSStatus (* GetBlockProcPtr) (SFCB *filePtr, + UInt32 blockNum, + GetBlockOptions options, + BlockDescriptor *block ); + + +typedef OSStatus (* ReleaseBlockProcPtr) (SFCB *filePtr, + BlockDescPtr blockPtr, + ReleaseBlockOptions options ); + +typedef OSStatus (* SetEndOfForkProcPtr) (SFCB *filePtr, + FSSize minEOF, + FSSize maxEOF ); + +typedef OSStatus (* SetBlockSizeProcPtr) (SFCB *filePtr, + ByteCount blockSize ); + +OSStatus SetEndOfForkProc ( SFCB *filePtr, FSSize minEOF, FSSize maxEOF ); + + + +extern OSStatus InitBTreeModule (void); + + +extern OSStatus BTInitialize (SFCB *filePtrPtr, + UInt16 maxKeyLength, + UInt16 nodeSize, + UInt8 btreeType, + KeyDescriptorPtr keyDescPtr ); + +extern OSStatus BTOpenPath (SFCB *filePtr, + KeyCompareProcPtr keyCompareProc, + GetBlockProcPtr getBlockProc, + ReleaseBlockProcPtr releaseBlockProc, + SetEndOfForkProcPtr setEndOfForkProc, + SetBlockSizeProcPtr setBlockSizeProc ); + +extern OSStatus BTClosePath (SFCB *filePtr ); + + +extern OSStatus BTSearchRecord (SFCB *filePtr, + BTreeIterator *searchIterator, + UInt32 heuristicHint, + FSBufferDescriptor *btRecord, + UInt16 *recordLen, + BTreeIterator *resultIterator ); + +extern OSStatus BTIterateRecord (SFCB *filePtr, + BTreeIterationOperation operation, + BTreeIterator *iterator, + FSBufferDescriptor *btRecord, + UInt16 *recordLen ); + +extern OSStatus BTInsertRecord (SFCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *btrecord, + UInt16 recordLen ); + +extern OSStatus BTReplaceRecord (SFCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *btRecord, + UInt16 recordLen ); + +extern OSStatus BTSetRecord (SFCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *btRecord, + UInt16 recordLen ); + +extern OSStatus BTDeleteRecord (SFCB *filePtr, + BTreeIterator *iterator ); + +extern OSStatus BTGetInformation (SFCB *filePtr, + UInt16 version, + BTreeInfoRec *info ); + +extern OSStatus BTFlushPath (SFCB *filePtr ); + +extern OSStatus BTInvalidateHint (BTreeIterator *iterator ); + +#endif // __BTREESINTERNAL__ diff --git a/fsck_hfs/dfalib/BTreeAllocate.c b/fsck_hfs/dfalib/BTreeAllocate.c new file mode 100644 index 0000000..f85a014 --- /dev/null +++ b/fsck_hfs/dfalib/BTreeAllocate.c @@ -0,0 +1,543 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreeAllocate.c + + Contains: BTree Node Allocation routines for the BTree Module. + + Version: xxx put the technology version here xxx + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1999 by Apple Computer, Inc., all rights reserved. +*/ + +#include "BTreePrivate.h" +#include "hfs_endian.h" + +///////////////////// Routines Internal To BTreeAllocate.c ////////////////////// + +OSStatus GetMapNode (BTreeControlBlockPtr btreePtr, + BlockDescriptor *nodePtr, + UInt16 **mapPtr, + UInt16 *mapSize ); + +///////////////////////////////////////////////////////////////////////////////// + +/*------------------------------------------------------------------------------- + +Routine: AllocateNode - Find Free Node, Mark It Used, and Return Node Number. + +Function: Searches the map records for the first free node, marks it "in use" and + returns the node number found. This routine should really only be called + when we know there are free blocks, otherwise it's just a waste of time. + +Note: We have to examine map nodes a word at a time rather than a long word + because the External BTree Mgr used map records that were not an integral + number of long words. Too bad. In our spare time could develop a more + sophisticated algorithm that read map records by long words (and long + word aligned) and handled the spare bytes at the beginning and end + appropriately. + +Input: btreePtr - pointer to control block for BTree file + +Output: nodeNum - number of node allocated + + +Result: noErr - success + fsBTNoMoreMapNodesErr - no free blocks were found + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus AllocateNode (BTreeControlBlockPtr btreePtr, UInt32 *nodeNum) +{ + OSStatus err; + BlockDescriptor node; + UInt16 *mapPtr, *pos; + UInt16 mapSize, size; + UInt16 freeWord; + UInt16 mask; + UInt16 bitOffset; + UInt32 nodeNumber; + + + nodeNumber = 0; // first node number of header map record + node.buffer = nil; // clear node.buffer to get header node + // - and for ErrorExit + + while (true) + { + err = GetMapNode (btreePtr, &node, &mapPtr, &mapSize); + M_ExitOnError (err); + + //////////////////////// Find Word with Free Bit //////////////////////////// + + pos = mapPtr; + size = mapSize; + size >>= 1; // convert to number of words + //¥¥ assumes mapRecords contain an integral number of words + + while ( size-- ) + { + if ( *pos++ != 0xFFFF ) // assume test fails, and increment pos + break; + } + + --pos; // whoa! backup + + if (*pos != 0xFFFF) // hey, we got one! + break; + + nodeNumber += mapSize << 3; // covert to number of bits (nodes) + } + + ///////////////////////// Find Free Bit in Word ///////////////////////////// + + freeWord = SWAP_BE16 (*pos); + bitOffset = 15; + mask = 0x8000; + + do { + if ( (freeWord & mask) == 0) + break; + mask >>= 1; + } while (--bitOffset); + + ////////////////////// Calculate Free Node Number /////////////////////////// + + nodeNumber += ((pos - mapPtr) << 4) + (15 - bitOffset); // (pos-mapPtr) = # of words! + + + ///////////////////////// Check for End of Map ////////////////////////////// + + if (nodeNumber >= btreePtr->totalNodes) + { + err = fsBTFullErr; + goto ErrorExit; + } + + /////////////////////////// Allocate the Node /////////////////////////////// + + *pos |= SWAP_BE16 (mask); // set the map bit for the node + + err = UpdateNode (btreePtr, &node); + M_ExitOnError (err); + + --btreePtr->freeNodes; + btreePtr->flags |= kBTHeaderDirty; + *nodeNum = nodeNumber; + + return noErr; + +////////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &node); + *nodeNum = 0; + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: FreeNode - Clear allocation bit for node. + +Function: Finds the bit representing the node specified by nodeNum in the node + map and clears the bit. + + +Input: btreePtr - pointer to control block for BTree file + nodeNum - number of node to mark free + +Output: none + +Result: noErr - success + fsBTNoMoreMapNodesErr - node number is beyond end of node map + != noErr - GetNode or ReleaseNode encountered some difficulty +-------------------------------------------------------------------------------*/ + +OSStatus FreeNode (BTreeControlBlockPtr btreePtr, UInt32 nodeNum) +{ + OSStatus err; + BlockDescriptor node; + UInt32 nodeIndex; + UInt16 mapSize; + UInt16 *mapPos; + UInt16 bitOffset; + + + //////////////////////////// Find Map Record //////////////////////////////// + nodeIndex = 0; // first node number of header map record + node.buffer = nil; // invalidate node.buffer to get header node + + while (nodeNum >= nodeIndex) + { + err = GetMapNode (btreePtr, &node, &mapPos, &mapSize); + M_ExitOnError (err); + + nodeIndex += mapSize << 3; // covert to number of bits (nodes) + } + + //////////////////////////// Mark Node Free ///////////////////////////////// + + nodeNum -= (nodeIndex - (mapSize << 3)); // relative to this map record + bitOffset = 15 - (nodeNum & 0x0000000F); // last 4 bits are bit offset + mapPos += nodeNum >> 4; // point to word containing map bit + M_SWAP_BE16_ClearBitNum (*mapPos, bitOffset); // clear it + + err = UpdateNode (btreePtr, &node); + M_ExitOnError (err); + + + ++btreePtr->freeNodes; + btreePtr->flags |= kBTHeaderDirty; //¥¥ how about a macro for this + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, &node); + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: ExtendBTree - Call FSAgent to extend file, and allocate necessary map nodes. + +Function: This routine calls the the FSAgent to extend the end of fork, if necessary, + to accomodate the number of nodes requested. It then allocates as many + map nodes as are necessary to account for all the nodes in the B*Tree. + If newTotalNodes is less than the current number of nodes, no action is + taken. + +Note: Internal HFS File Manager BTree Module counts on an integral number of + long words in map records, although they are not long word aligned. + +Input: btreePtr - pointer to control block for BTree file + newTotalNodes - total number of nodes the B*Tree is to extended to + +Output: none + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus ExtendBTree (BTreeControlBlockPtr btreePtr, + UInt32 newTotalNodes ) +{ + OSStatus err; + SFCB *filePtr; + FSSize minEOF, maxEOF; + UInt16 nodeSize; + UInt32 oldTotalNodes; + UInt32 newMapNodes; + UInt32 mapBits, totalMapBits; + UInt32 recStartBit; + UInt32 nodeNum, nextNodeNum; + UInt32 firstNewMapNodeNum, lastNewMapNodeNum; + BlockDescriptor mapNode, newNode; + UInt16 *mapPos; + UInt16 *mapStart; + UInt16 mapSize; + UInt16 mapNodeRecSize; + UInt32 bitInWord, bitInRecord; + UInt16 mapIndex; + + + oldTotalNodes = btreePtr->totalNodes; + if (newTotalNodes <= oldTotalNodes) // we're done! + return noErr; + + nodeSize = btreePtr->nodeSize; + filePtr = btreePtr->fcbPtr; + + mapNode.buffer = nil; + newNode.buffer = nil; + + mapNodeRecSize = nodeSize - sizeof(BTNodeDescriptor) - 6; // 2 bytes of free space (see note) + + //¥¥ update for proper 64 bit arithmetic!! + + + //////////////////////// Count Bits In Node Map ///////////////////////////// + + totalMapBits = 0; + do { + err = GetMapNode (btreePtr, &mapNode, &mapStart, &mapSize); + M_ExitOnError (err); + + mapBits = mapSize << 3; // mapSize (in bytes) * 8 + recStartBit = totalMapBits; // bit number of first bit in map record + totalMapBits += mapBits; + + } while ( ((BTNodeDescriptor*)mapNode.buffer)->fLink != 0 ); + + if (DEBUG_BUILD && totalMapBits != CalcMapBits (btreePtr)) + Panic ("\pExtendBTree: totalMapBits != CalcMapBits"); + + /////////////////////// Extend LEOF If Necessary //////////////////////////// + + minEOF = (UInt64)newTotalNodes * (UInt64)nodeSize; + if ( filePtr->fcbLogicalSize < minEOF ) + { + maxEOF = 0xFFFFFFFFFFFFFFFFULL; + + err = btreePtr->setEndOfForkProc (btreePtr->fcbPtr, minEOF, maxEOF); + M_ExitOnError (err); + } + + + //////////////////// Calc New Total Number Of Nodes ///////////////////////// + + newTotalNodes = filePtr->fcbLogicalSize / nodeSize; //¥¥ hack! + //¥¥ do we wish to perform any verification of newTotalNodes at this point? + + btreePtr->totalNodes = newTotalNodes; //¥¥ do we need to update freeNodes here too? + + + ////////////// Calculate Number Of New Map Nodes Required /////////////////// + + newMapNodes = 0; + if (newTotalNodes > totalMapBits) + { + newMapNodes = (((newTotalNodes - totalMapBits) >> 3) / mapNodeRecSize) + 1; + firstNewMapNodeNum = oldTotalNodes; + lastNewMapNodeNum = firstNewMapNodeNum + newMapNodes - 1; + } + else + { + err = ReleaseNode (btreePtr, &mapNode); + M_ExitOnError (err); + + goto Success; + } + + + /////////////////////// Initialize New Map Nodes //////////////////////////// + + ((BTNodeDescriptor*)mapNode.buffer)->fLink = firstNewMapNodeNum; + + nodeNum = firstNewMapNodeNum; + while (true) + { + err = GetNewNode (btreePtr, nodeNum, &newNode); + M_ExitOnError (err); + + ((NodeDescPtr)newNode.buffer)->numRecords = 1; + ((NodeDescPtr)newNode.buffer)->kind = kBTMapNode; + + // set free space offset + *(UInt16 *)((Ptr)newNode.buffer + nodeSize - 4) = nodeSize - 6; + + if (nodeNum++ == lastNewMapNodeNum) + break; + + ((BTNodeDescriptor*)newNode.buffer)->fLink = nodeNum; // point to next map node + + err = UpdateNode (btreePtr, &newNode); + M_ExitOnError (err); + } + + err = UpdateNode (btreePtr, &newNode); + M_ExitOnError (err); + + + ///////////////////// Mark New Map Nodes Allocated ////////////////////////// + + nodeNum = firstNewMapNodeNum; + do { + bitInRecord = nodeNum - recStartBit; + + while (bitInRecord >= mapBits) + { + nextNodeNum = ((NodeDescPtr)mapNode.buffer)->fLink; + if ( nextNodeNum == 0) + { + err = fsBTNoMoreMapNodesErr; + goto ErrorExit; + } + + err = UpdateNode (btreePtr, &mapNode); + M_ExitOnError (err); + + err = GetNode (btreePtr, nextNodeNum, &mapNode); + M_ExitOnError (err); + + mapIndex = 0; + + mapStart = (UInt16 *) GetRecordAddress (btreePtr, mapNode.buffer, mapIndex); + mapSize = GetRecordSize (btreePtr, mapNode.buffer, mapIndex); + + if (DEBUG_BUILD && mapSize != M_MapRecordSize (btreePtr->nodeSize) ) + { + Panic ("\pExtendBTree: mapSize != M_MapRecordSize"); + } + + mapBits = mapSize << 3; // mapSize (in bytes) * 8 + recStartBit = totalMapBits; // bit number of first bit in map record + totalMapBits += mapBits; + + bitInRecord = nodeNum - recStartBit; + } + + mapPos = mapStart + ((nodeNum - recStartBit) >> 4); + bitInWord = 15 - ((nodeNum - recStartBit) & 0x0000000F); + M_SWAP_BE16_SetBitNum (*mapPos, bitInWord); + + ++nodeNum; + + } while (nodeNum <= lastNewMapNodeNum); + + err = UpdateNode (btreePtr, &mapNode); + M_ExitOnError (err); + + + //////////////////////////////// Success //////////////////////////////////// + +Success: + + btreePtr->totalNodes = newTotalNodes; + btreePtr->freeNodes += (newTotalNodes - oldTotalNodes) - newMapNodes; + + btreePtr->flags |= kBTHeaderDirty; //¥¥ how about a macro for this + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &mapNode); + (void) ReleaseNode (btreePtr, &newNode); + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: GetMapNode - Get the next map node and pointer to the map record. + +Function: Given a BlockDescriptor to a map node in nodePtr, GetMapNode releases + it and gets the next node. If nodePtr->buffer is nil, then the header + node is retrieved. + + +Input: btreePtr - pointer to control block for BTree file + nodePtr - pointer to a BlockDescriptor of a map node + +Output: nodePtr - pointer to the BlockDescriptor for the next map node + mapPtr - pointer to the map record within the map node + mapSize - number of bytes in the map record + +Result: noErr - success + fsBTNoMoreMapNodesErr - we've run out of map nodes + fsBTInvalidNodeErr - bad node, or not node type kMapNode + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus GetMapNode (BTreeControlBlockPtr btreePtr, + BlockDescriptor *nodePtr, + UInt16 **mapPtr, + UInt16 *mapSize ) +{ + OSStatus err; + UInt16 mapIndex; + UInt32 nextNodeNum; + + if (nodePtr->buffer != nil) // if iterator is valid... + { + nextNodeNum = ((NodeDescPtr)nodePtr->buffer)->fLink; + if (nextNodeNum == 0) + { + err = fsBTNoMoreMapNodesErr; + goto ErrorExit; + } + + err = ReleaseNode (btreePtr, nodePtr); + M_ExitOnError (err); + + err = GetNode (btreePtr, nextNodeNum, nodePtr); + M_ExitOnError (err); + + if ( ((NodeDescPtr)nodePtr->buffer)->kind != kBTMapNode) + { + err = fsBTBadNodeType; + goto ErrorExit; + } + + ++btreePtr->numMapNodesRead; + mapIndex = 0; + } else { + err = GetNode (btreePtr, kHeaderNodeNum, nodePtr); + M_ExitOnError (err); + + if ( ((NodeDescPtr)nodePtr->buffer)->kind != kBTHeaderNode) + { + err = fsBTInvalidHeaderErr; //¥¥ or fsBTBadNodeType + goto ErrorExit; + } + + mapIndex = 2; + } + + + *mapPtr = (UInt16 *) GetRecordAddress (btreePtr, nodePtr->buffer, mapIndex); + *mapSize = GetRecordSize (btreePtr, nodePtr->buffer, mapIndex); + + return noErr; + + +ErrorExit: + + (void) ReleaseNode (btreePtr, nodePtr); + + *mapPtr = nil; + *mapSize = 0; + + return err; +} + + + +////////////////////////////////// CalcMapBits ////////////////////////////////// + +UInt32 CalcMapBits (BTreeControlBlockPtr btreePtr) +{ + UInt32 mapBits; + + mapBits = M_HeaderMapRecordSize (btreePtr->nodeSize) << 3; + + while (mapBits < btreePtr->totalNodes) + mapBits += M_MapRecordSize (btreePtr->nodeSize) << 3; + + return mapBits; +} diff --git a/fsck_hfs/dfalib/BTreeMiscOps.c b/fsck_hfs/dfalib/BTreeMiscOps.c new file mode 100644 index 0000000..5faa741 --- /dev/null +++ b/fsck_hfs/dfalib/BTreeMiscOps.c @@ -0,0 +1,581 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreeMiscOps.c + + Contains: Miscellaneous operations for the BTree Module. + + Version: xxx put the technology version here xxx + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1999 by Apple Computer, Inc., all rights reserved. +*/ + +#include "BTreePrivate.h" + + +////////////////////////////// Routine Definitions ////////////////////////////// + +/*------------------------------------------------------------------------------- +Routine: CalcKeyRecordSize - Return size of combined key/record structure. + +Function: Rounds keySize and recSize so they will end on word boundaries. + Does NOT add size of offset. + +Input: keySize - length of key (including length field) + recSize - length of record data + +Output: none + +Result: UInt16 - size of combined key/record that will be inserted in btree +-------------------------------------------------------------------------------*/ + +UInt16 CalcKeyRecordSize (UInt16 keySize, + UInt16 recSize ) +{ + if ( M_IsOdd (keySize) ) keySize += 1; // pad byte + + if (M_IsOdd (recSize) ) recSize += 1; // pad byte + + return (keySize + recSize); +} + + + +/*------------------------------------------------------------------------------- +Routine: VerifyHeader - Validate fields of the BTree header record. + +Function: Examines the fields of the BTree header record to determine if the + fork appears to contain a valid BTree. + +Input: forkPtr - pointer to fork control block + header - pointer to BTree header + + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus VerifyHeader (SFCB *filePtr, + BTHeaderRec *header ) +{ + UInt32 forkSize; + UInt32 totalNodes; + + + switch (header->nodeSize) // node size == 512*2^n + { + case 512: + case 1024: + case 2048: + case 4096: + case 8192: + case 16384: + case 32768: break; + default: return fsBTInvalidHeaderErr; //¥¥ E_BadNodeType + } + + totalNodes = header->totalNodes; + + forkSize = totalNodes * header->nodeSize; + + if ( forkSize != filePtr->fcbLogicalSize ) + return fsBTInvalidHeaderErr; + + if ( header->freeNodes >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->rootNode >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->firstLeafNode >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->lastLeafNode >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->treeDepth > kMaxTreeDepth ) + return fsBTInvalidHeaderErr; + + + /////////////////////////// Check BTree Type //////////////////////////////// + + switch (header->btreeType) + { + case 0: // HFS Type - no Key Descriptor + case kUserBTreeType: // with Key Descriptors etc. + case kReservedBTreeType: // Desktop Mgr BTree ? + break; + + default: return fsBTUnknownVersionErr; + } + + return noErr; +} + + + +/*------------------------------------------------------------------------------- +Routine: UpdateHeader - Write BTreeInfoRec fields to Header node. + +Function: Checks the kBTHeaderDirty flag in the BTreeInfoRec and updates the + header node if necessary. + +Input: btreePtr - pointer to BTreeInfoRec + + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus UpdateHeader (BTreeControlBlockPtr btreePtr) +{ + OSStatus err; + BlockDescriptor node; + BTHeaderRec *header; + + + if ((btreePtr->flags & kBTHeaderDirty) == 0) // btree info already flushed + return noErr; + + + err = GetNode (btreePtr, kHeaderNodeNum, &node ); + if (err != noErr) + return err; + + header = (BTHeaderRec*) (node.buffer + sizeof(BTNodeDescriptor)); + + header->treeDepth = btreePtr->treeDepth; + header->rootNode = btreePtr->rootNode; + header->leafRecords = btreePtr->leafRecords; + header->firstLeafNode = btreePtr->firstLeafNode; + header->lastLeafNode = btreePtr->lastLeafNode; + header->nodeSize = btreePtr->nodeSize; //¥¥ this shouldn't change + header->maxKeyLength = btreePtr->maxKeyLength; //¥¥ neither should this + header->totalNodes = btreePtr->totalNodes; + header->freeNodes = btreePtr->freeNodes; + header->btreeType = btreePtr->btreeType; + + // ignore header->clumpSize; //¥¥ rename this field? + + err = UpdateNode (btreePtr, &node); + + btreePtr->flags &= (~kBTHeaderDirty); + + return err; +} + + + +/*------------------------------------------------------------------------------- +Routine: FindIteratorPosition - One_line_description. + +Function: Brief_description_of_the_function_and_any_side_effects + +Algorithm: see FSC.BT.BTIterateRecord.PICT + +Note: //¥¥ document side-effects of bad node hints + +Input: btreePtr - description + iterator - description + + +Output: iterator - description + left - description + middle - description + right - description + nodeNum - description + returnIndex - description + foundRecord - description + + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus FindIteratorPosition (BTreeControlBlockPtr btreePtr, + BTreeIteratorPtr iterator, + BlockDescriptor *left, + BlockDescriptor *middle, + BlockDescriptor *right, + UInt32 *returnNodeNum, + UInt16 *returnIndex, + Boolean *foundRecord ) +{ + OSStatus err; + Boolean foundIt; + UInt32 nodeNum; + UInt16 leftIndex, index, rightIndex; + Boolean validHint; + + // assume btreePtr valid + // assume left, middle, right point to BlockDescriptors + // assume nodeNum points to UInt32 + // assume index points to UInt16 + // assume foundRecord points to Boolean + + left->buffer = nil; + middle->buffer = nil; + right->buffer = nil; + + foundIt = false; + + if (iterator == nil) // do we have an iterator? + { + err = fsBTInvalidIteratorErr; + goto ErrorExit; + } + +#if SupportsKeyDescriptors + //¥¥ verify iterator key (change CheckKey to take btreePtr instead of keyDescPtr?) + if (btreePtr->keyDescPtr != nil) + { + err = CheckKey (&iterator->key, btreePtr->keyDescPtr, btreePtr->maxKeyLength ); + M_ExitOnError (err); + } +#endif + + err = IsItAHint (btreePtr, iterator, &validHint); + M_ExitOnError (err); + + nodeNum = iterator->hint.nodeNum; + if (! validHint) // does the hint appear to be valid? + { + goto SearchTheTree; + } + + err = GetNode (btreePtr, nodeNum, middle); + if( err == fsBTInvalidNodeErr ) // returned if nodeNum is out of range + goto SearchTheTree; + + M_ExitOnError (err); + + if ( ((NodeDescPtr) middle->buffer)->kind != kBTLeafNode || + ((NodeDescPtr) middle->buffer)->numRecords <= 0 ) + { + goto SearchTheTree; + } + + ++btreePtr->numValidHints; + + foundIt = SearchNode (btreePtr, middle->buffer, &iterator->key, &index); + if (foundIt == true) + { + goto SuccessfulExit; + } + + if (index == 0) + { + if (((NodeDescPtr) middle->buffer)->bLink == 0) // before 1st btree record + { + goto SuccessfulExit; + } + + nodeNum = ((NodeDescPtr) middle->buffer)->bLink; + + err = GetLeftSiblingNode (btreePtr, middle->buffer, left); + M_ExitOnError (err); + + if ( ((NodeDescPtr) left->buffer)->kind != kBTLeafNode || + ((NodeDescPtr) left->buffer)->numRecords <= 0 ) + { + goto SearchTheTree; + } + + foundIt = SearchNode (btreePtr, left->buffer, &iterator->key, &leftIndex); + if (foundIt == true) + { + *right = *middle; + *middle = *left; + left->buffer = nil; + index = leftIndex; + + goto SuccessfulExit; + } + + if (leftIndex == 0) // we're lost! + { + goto SearchTheTree; + } + else if (leftIndex >= ((NodeDescPtr) left->buffer)->numRecords) + { + nodeNum = ((NodeDescPtr) left->buffer)->fLink; + + PanicIf (index != 0, "\pFindIteratorPosition: index != 0"); //¥¥ just checking... + goto SuccessfulExit; + } + else + { + *right = *middle; + *middle = *left; + left->buffer = nil; + index = leftIndex; + + goto SuccessfulExit; + } + } + else if (index >= ((NodeDescPtr) middle->buffer)->numRecords) + { + if (((NodeDescPtr) middle->buffer)->fLink == 0) // beyond last record + { + goto SuccessfulExit; + } + + nodeNum = ((NodeDescPtr) middle->buffer)->fLink; + + err = GetRightSiblingNode (btreePtr, middle->buffer, right); + M_ExitOnError (err); + + if ( ((NodeDescPtr) right->buffer)->kind != kBTLeafNode || + ((NodeDescPtr) right->buffer)->numRecords <= 0 ) + { + goto SearchTheTree; + } + + foundIt = SearchNode (btreePtr, right->buffer, &iterator->key, &rightIndex); + if (rightIndex >= ((NodeDescPtr) right->buffer)->numRecords) // we're lost + { + goto SearchTheTree; + } + else // we found it, or rightIndex==0, or rightIndex<numRecs + { + *left = *middle; + *middle = *right; + right->buffer = nil; + index = rightIndex; + + goto SuccessfulExit; + } + } + + + //////////////////////////// Search The Tree //////////////////////////////// + +SearchTheTree: + { + TreePathTable treePathTable; // so we only use stack space if we need to + + err = ReleaseNode (btreePtr, left); M_ExitOnError (err); + err = ReleaseNode (btreePtr, middle); M_ExitOnError (err); + err = ReleaseNode (btreePtr, right); M_ExitOnError (err); + + err = SearchTree ( btreePtr, &iterator->key, treePathTable, &nodeNum, middle, &index); + switch (err) //¥¥ separate find condition from exceptions + { + case noErr: foundIt = true; break; + case fsBTRecordNotFoundErr: break; + default: goto ErrorExit; + } + } + + /////////////////////////////// Success! //////////////////////////////////// + +SuccessfulExit: + + *returnNodeNum = nodeNum; + *returnIndex = index; + *foundRecord = foundIt; + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, left); + (void) ReleaseNode (btreePtr, middle); + (void) ReleaseNode (btreePtr, right); + + *returnNodeNum = 0; + *returnIndex = 0; + *foundRecord = false; + + return err; +} + + + +/////////////////////////////// CheckInsertParams /////////////////////////////// + +OSStatus CheckInsertParams (SFCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen ) +{ + BTreeControlBlockPtr btreePtr; + + if (filePtr == nil) return paramErr; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBtree; + if (btreePtr == nil) return fsBTInvalidFileErr; + if (iterator == nil) return paramErr; + if (record == nil) return paramErr; + +#if SupportsKeyDescriptors + if (btreePtr->keyDescPtr != nil) + { + OSStatus err; + + err = CheckKey (&iterator->key, btreePtr->keyDescPtr, btreePtr->maxKeyLength); + if (err != noErr) + return err; + } +#endif + + // check total key/record size limit + if ( CalcKeyRecordSize (CalcKeySize(btreePtr, &iterator->key), recordLen) > (btreePtr->nodeSize >> 1)) + return fsBTRecordTooLargeErr; + + return noErr; +} + + + +/*------------------------------------------------------------------------------- +Routine: TrySimpleReplace - Attempts a simple insert, set, or replace. + +Function: If a hint exitst for the iterator, attempt to find the key in the hint + node. If the key is found, an insert operation fails. If the is not + found, a replace operation fails. If the key was not found, and the + insert position is greater than 0 and less than numRecords, the record + is inserted, provided there is enough freeSpace. If the key was found, + and there is more freeSpace than the difference between the new record + and the old record, the old record is deleted and the new record is + inserted. + +Assumptions: iterator key has already been checked by CheckKey + + +Input: btreePtr - description + iterator - description + record - description + recordLen - description + operation - description + + +Output: recordInserted - description + + +Result: noErr - success + E_RecordExits - insert operation failure + != noErr - GetNode, ReleaseNode, UpdateNode returned an error +-------------------------------------------------------------------------------*/ + +OSStatus TrySimpleReplace (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen, + Boolean *recordInserted ) +{ + UInt32 oldSpace; + UInt32 spaceNeeded; + UInt16 index; + UInt16 keySize; + Boolean foundIt; + Boolean didItFit; + + + *recordInserted = false; // we'll assume this won't work... + + if ( nodePtr->kind != kBTLeafNode ) + return noErr; // we're in the weeds! + + foundIt = SearchNode (btreePtr, nodePtr, &iterator->key, &index); + + if ( foundIt == false ) + return noErr; // we might be lost... + + keySize = CalcKeySize(btreePtr, &iterator->key); // includes length field + + spaceNeeded = CalcKeyRecordSize (keySize, recordLen); + + oldSpace = GetRecordSize (btreePtr, nodePtr, index); + + if ( spaceNeeded == oldSpace ) + { + UInt8 * dst; + + dst = GetRecordAddress (btreePtr, nodePtr, index); + + if ( M_IsOdd (keySize) ) + ++keySize; // add pad byte + + dst += keySize; // skip over key to point at record + + CopyMemory(record->bufferAddress, dst, recordLen); // blast away... + + *recordInserted = true; + } + else if ( (GetNodeFreeSize(btreePtr, nodePtr) + oldSpace) >= spaceNeeded) + { + DeleteRecord (btreePtr, nodePtr, index); + + didItFit = InsertKeyRecord (btreePtr, nodePtr, index, + &iterator->key, KeyLength(btreePtr, &iterator->key), + record->bufferAddress, recordLen); + PanicIf (didItFit == false, "\pTrySimpleInsert: InsertKeyRecord returned false!"); + + *recordInserted = true; + } + // else not enough space... + + return noErr; +} + + +/*------------------------------------------------------------------------------- +Routine: IsItAHint - checks the hint within a BTreeInterator. + +Function: checks the hint within a BTreeInterator. If it is non-zero, it may + possibly be valid. + +Input: btreePtr - pointer to control block for BTree file + iterator - pointer to BTreeIterator + +Output: answer - true if the hint looks reasonable + - false if the hint is 0 + +Result: noErr - success +-------------------------------------------------------------------------------*/ + + +OSStatus IsItAHint (BTreeControlBlockPtr btreePtr, BTreeIterator *iterator, Boolean *answer) +{ + ++btreePtr->numHintChecks; + + //¥¥ shouldn't we do a range check? + if (iterator->hint.nodeNum == 0) + { + *answer = false; + } + else + { + *answer = true; + ++btreePtr->numPossibleHints; + } + + return noErr; +} diff --git a/fsck_hfs/dfalib/BTreeNodeOps.c b/fsck_hfs/dfalib/BTreeNodeOps.c new file mode 100644 index 0000000..9f1e00a --- /dev/null +++ b/fsck_hfs/dfalib/BTreeNodeOps.c @@ -0,0 +1,1034 @@ +/* + * Copyright (c) 1999, 2005-2006 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreeNodeOps.c + + Contains: Single-node operations for the BTree Module. + + Version: xxx put the technology version here xxx + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1999 by Apple Computer, Inc., all rights reserved. +*/ + +#include "BTreePrivate.h" +#include "hfs_endian.h" +#include "../fsck_hfs.h" + + +///////////////////////// BTree Module Node Operations ////////////////////////// +// +// GetNode - Call FS Agent to get node +// GetNewNode - Call FS Agent to get a new node +// ReleaseNode - Call FS Agent to release node obtained by GetNode. +// UpdateNode - Mark a node as dirty and call FS Agent to release it. +// +// ClearNode - Clear a node to all zeroes. +// +// InsertRecord - Inserts a record into a BTree node. +// InsertKeyRecord - Inserts a key and record pair into a BTree node. +// DeleteRecord - Deletes a record from a BTree node. +// +// SearchNode - Return index for record that matches key. +// LocateRecord - Return pointer to key and data, and size of data. +// +// GetNodeDataSize - Return the amount of space used for data in the node. +// GetNodeFreeSize - Return the amount of free space in the node. +// +// GetRecordOffset - Return the offset for record "index". +// GetRecordAddress - Return address of record "index". +// GetOffsetAddress - Return address of offset for record "index". +// +// InsertOffset - Inserts a new offset into a node. +// DeleteOffset - Deletes an offset from a node. +// +// MoveRecordsLeft - Move records left within a node. +// MoveRecordsRight - Move records right within a node. +// +///////////////////////////////////////////////////////////////////////////////// + + + +////////////////////// Routines Internal To BTreeNodeOps.c ////////////////////// + +UInt16 GetRecordOffset (BTreeControlBlockPtr btree, + NodeDescPtr node, + UInt16 index ); + +UInt16 *GetOffsetAddress (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ); + +void InsertOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + UInt16 delta ); + +void DeleteOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ); + + +///////////////////////////////////////////////////////////////////////////////// + +#define GetRecordOffset(btreePtr,node,index) (*(short *) ((UInt8 *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize)) + + +/*------------------------------------------------------------------------------- + +Routine: GetNode - Call FS Agent to get node + +Function: Gets an existing BTree node from FS Agent and verifies it. + +Input: btreePtr - pointer to BTree control block + nodeNum - number of node to request + +Output: nodePtr - pointer to beginning of node (nil if error) + +Result: + noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus GetNode (BTreeControlBlockPtr btreePtr, + UInt32 nodeNum, + NodeRec *nodePtr ) +{ + OSStatus err; + GetBlockProcPtr getNodeProc; + + +// LogStartTime(kTraceGetNode); + + //¥¥ is nodeNum within proper range? + if( nodeNum >= btreePtr->totalNodes ) + { + Panic("\pGetNode:nodeNum >= totalNodes"); + if (debug) fprintf(stderr, "%s(%d): nodeNum %u > totalNodes %u\n", __FUNCTION__, __LINE__, nodeNum, btreePtr->totalNodes); + err = fsBTInvalidNodeErr; + goto ErrorExit; + } + + getNodeProc = btreePtr->getBlockProc; + err = getNodeProc (btreePtr->fcbPtr, + nodeNum, + kGetBlock, + nodePtr ); + + if (err != noErr) + { + Panic ("\pGetNode: getNodeProc returned error."); + nodePtr->buffer = nil; + goto ErrorExit; + } + ++btreePtr->numGetNodes; + + err = hfs_swap_BTNode(nodePtr, btreePtr->fcbPtr, kSwapBTNodeBigToHost); + if (err != noErr) + { + (void) TrashNode (btreePtr, nodePtr); // ignore error + goto ErrorExit; + } + +// LogEndTime(kTraceGetNode, noErr); + + return noErr; + +ErrorExit: + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + +// LogEndTime(kTraceGetNode, err); + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: GetNewNode - Call FS Agent to get a new node + +Function: Gets a new BTree node from FS Agent and initializes it to an empty + state. + +Input: btreePtr - pointer to BTree control block + nodeNum - number of node to request + +Output: returnNodePtr - pointer to beginning of node (nil if error) + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus GetNewNode (BTreeControlBlockPtr btreePtr, + UInt32 nodeNum, + NodeRec *returnNodePtr ) +{ + OSStatus err; + NodeDescPtr node; + void *pos; + GetBlockProcPtr getNodeProc; + + + //////////////////////// get buffer for new node //////////////////////////// + + getNodeProc = btreePtr->getBlockProc; + err = getNodeProc (btreePtr->fcbPtr, + nodeNum, + kGetBlock+kGetEmptyBlock, + returnNodePtr ); + + if (err != noErr) + { + Panic ("\pGetNewNode: getNodeProc returned error."); + returnNodePtr->buffer = nil; + return err; + } + ++btreePtr->numGetNewNodes; + + + ////////////////////////// initialize the node ////////////////////////////// + + node = returnNodePtr->buffer; + + ClearNode (btreePtr, node); // clear the node + + pos = (char *)node + btreePtr->nodeSize - 2; // find address of last offset + *(UInt16 *)pos = sizeof (BTNodeDescriptor); // set offset to beginning of free space + + + return noErr; +} + + + +/*------------------------------------------------------------------------------- + +Routine: ReleaseNode - Call FS Agent to release node obtained by GetNode. + +Function: Informs the FS Agent that a BTree node may be released. + +Input: btreePtr - pointer to BTree control block + nodeNum - number of node to release + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus ReleaseNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ) +{ + OSStatus err; + ReleaseBlockProcPtr releaseNodeProc; + ReleaseBlockOptions options = kReleaseBlock; + +// LogStartTime(kTraceReleaseNode); + + err = noErr; + + if (nodePtr->buffer != nil) + { + /* + * The nodes must remain in the cache as big endian! + */ + err = hfs_swap_BTNode(nodePtr, btreePtr->fcbPtr, kSwapBTNodeHostToBig); + if (err) + { + options |= kTrashBlock; + } + + releaseNodeProc = btreePtr->releaseBlockProc; + err = releaseNodeProc (btreePtr->fcbPtr, + nodePtr, + options ); + PanicIf (err, "\pReleaseNode: releaseNodeProc returned error."); + ++btreePtr->numReleaseNodes; + } + + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + +// LogEndTime(kTraceReleaseNode, err); + + return err; +} + + +/*------------------------------------------------------------------------------- + +Routine: TrashNode - Call FS Agent to release node obtained by GetNode, and + not store it...mark it as bad. + +Function: Informs the FS Agent that a BTree node may be released and thrown away. + +Input: btreePtr - pointer to BTree control block + nodeNum - number of node to release + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus TrashNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ) +{ + OSStatus err; + ReleaseBlockProcPtr releaseNodeProc; + + + err = noErr; + + if (nodePtr->buffer != nil) + { + releaseNodeProc = btreePtr->releaseBlockProc; + err = releaseNodeProc (btreePtr->fcbPtr, + nodePtr, + kReleaseBlock | kTrashBlock ); + PanicIf (err, "\pTrashNode: releaseNodeProc returned error."); + ++btreePtr->numReleaseNodes; + } + + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + + return err; +} + + +/*------------------------------------------------------------------------------- + +Routine: UpdateNode - Mark a node as dirty and call FS Agent to release it. + +Function: Marks a BTree node dirty and informs the FS Agent that it may be released. + +Input: btreePtr - pointer to BTree control block + nodeNum - number of node to release + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus UpdateNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ) +{ + OSStatus err; + ReleaseBlockProcPtr releaseNodeProc; + ReleaseBlockOptions options = kMarkBlockDirty; + + err = noErr; + + if (nodePtr->buffer != nil) //¥¥ why call UpdateNode if nil ?!? + { + // LogStartTime(kTraceReleaseNode); + + err = hfs_swap_BTNode(nodePtr, btreePtr->fcbPtr, kSwapBTNodeHostToBig); + if (err != noErr) + { + options = kReleaseBlock | kTrashBlock; + } + + releaseNodeProc = btreePtr->releaseBlockProc; + err = releaseNodeProc (btreePtr->fcbPtr, + nodePtr, + options ); + + // LogEndTime(kTraceReleaseNode, err); + + M_ExitOnError (err); + ++btreePtr->numUpdateNodes; + } + + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + + return noErr; + +ErrorExit: + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: ClearNode - Clear a node to all zeroes. + +Function: Writes zeroes from beginning of node for nodeSize bytes. + +Input: btreePtr - pointer to BTree control block + node - pointer to node to clear + +Result: none +-------------------------------------------------------------------------------*/ + +void ClearNode (BTreeControlBlockPtr btreePtr, NodeDescPtr node ) +{ + ClearMemory( node, btreePtr->nodeSize ); +} + +/*------------------------------------------------------------------------------- + +Routine: InsertRecord - Inserts a record into a BTree node. + +Function: + +Note: Record size must be even! + +Input: btreePtr - pointer to BTree control block + node - pointer to node to insert the record + index - position record is to be inserted + recPtr - pointer to record to insert + +Result: noErr - success + fsBTFullErr - record larger than remaining free space. +-------------------------------------------------------------------------------*/ + +Boolean InsertRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + RecordPtr recPtr, + UInt16 recSize ) +{ + UInt16 freeSpace; + UInt16 indexOffset; + UInt16 freeOffset; + UInt16 bytesToMove; + void *src; + void *dst; + + //// will new record fit in node? + + freeSpace = GetNodeFreeSize (btreePtr, node); + //¥¥ we could get freeOffset & calc freeSpace + if ( freeSpace < recSize + 2) + { + return false; + } + + + //// make hole for new record + + indexOffset = GetRecordOffset (btreePtr, node, index); + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + src = ((Ptr) node) + indexOffset; + dst = ((Ptr) src) + recSize; + bytesToMove = freeOffset - indexOffset; + MoveRecordsRight (src, dst, bytesToMove); + + + //// adjust offsets for moved records + + InsertOffset (btreePtr, node, index, recSize); + + + //// move in the new record + + dst = ((Ptr) node) + indexOffset; + MoveRecordsLeft (recPtr, dst, recSize); + + return true; +} + + + +/*------------------------------------------------------------------------------- + +Routine: InsertKeyRecord - Inserts a record into a BTree node. + +Function: + +Note: Record size must be even! + +Input: btreePtr - pointer to BTree control block + node - pointer to node to insert the record + index - position record is to be inserted + keyPtr - pointer to key for record to insert + keyLength - length of key (or maxKeyLength) + recPtr - pointer to record to insert + recSize - number of bytes to copy for record + +Result: noErr - success + fsBTFullErr - record larger than remaining free space. +-------------------------------------------------------------------------------*/ + +Boolean InsertKeyRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + KeyPtr keyPtr, + UInt16 keyLength, + RecordPtr recPtr, + UInt16 recSize ) +{ + UInt16 freeSpace; + UInt16 indexOffset; + UInt16 freeOffset; + UInt16 bytesToMove; + UInt8 * src; + UInt8 * dst; + UInt16 keySize; + UInt16 rawKeyLength; + UInt16 sizeOfLength; + + //// calculate actual key size + + if ( btreePtr->attributes & kBTBigKeysMask ) + keySize = keyLength + sizeof(UInt16); + else + keySize = keyLength + sizeof(UInt8); + + if ( M_IsOdd (keySize) ) + ++keySize; // add pad byte + + + //// will new record fit in node? + + freeSpace = GetNodeFreeSize (btreePtr, node); + //¥¥ we could get freeOffset & calc freeSpace + if ( freeSpace < keySize + recSize + 2) + { + return false; + } + + + //// make hole for new record + + indexOffset = GetRecordOffset (btreePtr, node, index); + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + src = ((UInt8 *) node) + indexOffset; + dst = ((UInt8 *) src) + keySize + recSize; + bytesToMove = freeOffset - indexOffset; + MoveRecordsRight (src, dst, bytesToMove); + + + //// adjust offsets for moved records + + InsertOffset (btreePtr, node, index, keySize + recSize); + + + //// copy record key + + dst = ((UInt8 *) node) + indexOffset; + + if ( btreePtr->attributes & kBTBigKeysMask ) + { + *((UInt16 *)dst) = keyLength; // use keyLength rather than key.length + rawKeyLength = keyPtr->length16; + sizeOfLength = 2; + } + else + { + *dst = keyLength; // use keyLength rather than key.length + rawKeyLength = keyPtr->length8; + sizeOfLength = 1; + } + dst += sizeOfLength; + + MoveRecordsLeft ( ((UInt8 *) keyPtr) + sizeOfLength, dst, rawKeyLength); // copy key + + // any pad bytes? + bytesToMove = keySize - rawKeyLength; + if (bytesToMove) + ClearMemory (dst + rawKeyLength, bytesToMove); // clear pad bytes in index key + + + //// copy record data + + dst = ((UInt8 *) node) + indexOffset + keySize; + MoveRecordsLeft (recPtr, dst, recSize); + + return true; +} + + + +/*------------------------------------------------------------------------------- + +Routine: DeleteRecord - Deletes a record from a BTree node. + +Function: + +Input: btreePtr - pointer to BTree control block + node - pointer to node to insert the record + index - position record is to be inserted + +Result: none +-------------------------------------------------------------------------------*/ + +void DeleteRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + SInt16 indexOffset; + SInt16 nextOffset; + SInt16 freeOffset; + SInt16 bytesToMove; + void *src; + void *dst; + + //// compress records + indexOffset = GetRecordOffset (btreePtr, node, index); + nextOffset = GetRecordOffset (btreePtr, node, index + 1); + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + src = ((Ptr) node) + nextOffset; + dst = ((Ptr) node) + indexOffset; + bytesToMove = freeOffset - nextOffset; + MoveRecordsLeft (src, dst, bytesToMove); + + //// Adjust the offsets + DeleteOffset (btreePtr, node, index); +} + + + +/*------------------------------------------------------------------------------- + +Routine: SearchNode - Return index for record that matches key. + +Function: Returns the record index for the record that matches the search key. + If no record was found that matches the search key, the "insert index" + of where the record should go is returned instead. + +Algorithm: A binary search algorithm is used to find the specified key. + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + searchKey - pointer to the key to match + +Output: index - pointer to beginning of key for record + +Result: true - success (index = record index) + false - key did not match anything in node (index = insert index) +-------------------------------------------------------------------------------*/ + +Boolean SearchNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + KeyPtr searchKey, + UInt16 *returnIndex ) +{ + SInt32 lowerBound; + SInt32 upperBound; + SInt32 index; + SInt32 result; + KeyPtr trialKey; +#if !SupportsKeyDescriptors + KeyCompareProcPtr compareProc = btreePtr->keyCompareProc; +#endif + + lowerBound = 0; + upperBound = node->numRecords - 1; + + while (lowerBound <= upperBound) + { + index = (lowerBound + upperBound) >> 1; // divide by 2 + + trialKey = (KeyPtr) GetRecordAddress (btreePtr, node, index ); + + #if SupportsKeyDescriptors + result = CompareKeys (btreePtr, searchKey, trialKey); + #else + result = compareProc(searchKey, trialKey); + #endif + + if (result < 0) upperBound = index - 1; // search < trial + else if (result > 0) lowerBound = index + 1; // search > trial + else // search = trial + { + *returnIndex = index; + return true; + } + } + + *returnIndex = lowerBound; // lowerBound is insert index + return false; +} + + +/*------------------------------------------------------------------------------- + +Routine: GetRecordByIndex - Return pointer to key and data, and size of data. + +Function: Returns a pointer to beginning of key for record, a pointer to the + beginning of the data for the record, and the size of the record data + (does not include the size of the key). + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - index of record to get + +Output: keyPtr - pointer to beginning of key for record + dataPtr - pointer to beginning of data for record + dataSize - size of the data portion of the record + +Result: none +-------------------------------------------------------------------------------*/ + +OSStatus GetRecordByIndex (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + KeyPtr *keyPtr, + UInt8 * *dataPtr, + UInt16 *dataSize ) +{ + UInt16 offset; + UInt16 nextOffset; + UInt16 keySize; + + // + // Make sure index is valid (in range 0..numRecords-1) + // + if (index >= node->numRecords) + return fsBTRecordNotFoundErr; + + //// find keyPtr + offset = GetRecordOffset (btreePtr, node, index); + *keyPtr = (KeyPtr) ((Ptr)node + offset); + + //// find dataPtr + keySize = CalcKeySize(btreePtr, *keyPtr); + if ( M_IsOdd (keySize) ) + ++keySize; // add pad byte + + offset += keySize; // add the key length to find data offset + *dataPtr = (UInt8 *) node + offset; + + //// find dataSize + nextOffset = GetRecordOffset (btreePtr, node, index + 1); + *dataSize = nextOffset - offset; + + return noErr; +} + + + +/*------------------------------------------------------------------------------- + +Routine: GetNodeDataSize - Return the amount of space used for data in the node. + +Function: Gets the size of the data currently contained in a node, excluding + the node header. (record data + offset overhead) + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + +Result: - number of bytes used for data and offsets in the node. +-------------------------------------------------------------------------------*/ + +UInt16 GetNodeDataSize (BTreeControlBlockPtr btreePtr, NodeDescPtr node ) +{ + UInt16 freeOffset; + + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + return freeOffset + (node->numRecords << 1) - sizeof (BTNodeDescriptor); +} + + + +/*------------------------------------------------------------------------------- + +Routine: GetNodeFreeSize - Return the amount of free space in the node. + +Function: + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + +Result: - number of bytes of free space in the node. +-------------------------------------------------------------------------------*/ + +UInt16 GetNodeFreeSize (BTreeControlBlockPtr btreePtr, NodeDescPtr node ) +{ + UInt16 freeOffset; + + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); //¥¥ inline? + + return btreePtr->nodeSize - freeOffset - (node->numRecords << 1) - kOffsetSize; +} + + + +/*------------------------------------------------------------------------------- + +Routine: GetRecordOffset - Return the offset for record "index". + +Function: + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain offset for + +Result: - offset (in bytes) from beginning of node of record specified by index +-------------------------------------------------------------------------------*/ +// make this a macro (for inlining) +#if 0 +UInt16 GetRecordOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + void *pos; + + + pos = (UInt8 *)node + btreePtr->nodeSize - (index << 1) - kOffsetSize; + + return *(short *)pos; +} +#endif + + + +/*------------------------------------------------------------------------------- + +Routine: GetRecordAddress - Return address of record "index". + +Function: + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain offset address for + +Result: - pointer to record "index". +-------------------------------------------------------------------------------*/ +// make this a macro (for inlining) +#if 0 +UInt8 * GetRecordAddress (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + UInt8 * pos; + + pos = (UInt8 *)node + GetRecordOffset (btreePtr, node, index); + + return pos; +} +#endif + + + +/*------------------------------------------------------------------------------- + +Routine: GetRecordSize - Return size of record "index". + +Function: + +Note: This does not work on the FreeSpace index! + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain record size for + +Result: - size of record "index". +-------------------------------------------------------------------------------*/ + +UInt16 GetRecordSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + UInt16 *pos; + + pos = (UInt16 *) ((Ptr)node + btreePtr->nodeSize - (index << 1) - kOffsetSize); + + return *(pos-1) - *pos; +} + + + +/*------------------------------------------------------------------------------- +Routine: GetOffsetAddress - Return address of offset for record "index". + +Function: + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain offset address for + +Result: - pointer to offset for record "index". +-------------------------------------------------------------------------------*/ + +UInt16 *GetOffsetAddress (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + void *pos; + + pos = (Ptr)node + btreePtr->nodeSize - (index << 1) -2; + + return (UInt16 *)pos; +} + + + +/*------------------------------------------------------------------------------- +Routine: GetChildNodeNum - Return child node number from index record "index". + +Function: Returns the first UInt32 stored after the key for record "index". + +Assumes: The node is an Index Node. + The key.length stored at record "index" is ODD. //¥¥ change for variable length index keys + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain child node number from + +Result: - child node number from record "index". +-------------------------------------------------------------------------------*/ + +UInt32 GetChildNodeNum (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + UInt16 index ) +{ + UInt8 * pos; + + pos = GetRecordAddress (btreePtr, nodePtr, index); + pos += CalcKeySize(btreePtr, (BTreeKey *) pos); // key.length + size of length field + + return *(UInt32 *)pos; +} + + + +/*------------------------------------------------------------------------------- +Routine: InsertOffset - Add an offset and adjust existing offsets by delta. + +Function: Add an offset at 'index' by shifting 'index+1' through the last offset + and adjusting them by 'delta', the size of the record to be inserted. + The number of records contained in the node is also incremented. + +Input: btreePtr - pointer to BTree control block + node - pointer to node + index - index at which to insert record + delta - size of record to be inserted + +Result: none +-------------------------------------------------------------------------------*/ + +void InsertOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + UInt16 delta ) +{ + UInt16 *src, *dst; + UInt16 numOffsets; + + src = GetOffsetAddress (btreePtr, node, node->numRecords); // point to free offset + dst = src - 1; // point to new offset + numOffsets = node->numRecords++ - index; // subtract index & postincrement + + do { + *dst++ = *src++ + delta; // to tricky? + } while (numOffsets--); +} + + + +/*------------------------------------------------------------------------------- + +Routine: DeleteOffset - Delete an offset. + +Function: Delete the offset at 'index' by shifting 'index+1' through the last offset + and adjusting them by the size of the record 'index'. + The number of records contained in the node is also decremented. + +Input: btreePtr - pointer to BTree control block + node - pointer to node + index - index at which to delete record + +Result: none +-------------------------------------------------------------------------------*/ + +void DeleteOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + UInt16 *src, *dst; + UInt16 numOffsets; + UInt16 delta; + + dst = GetOffsetAddress (btreePtr, node, index); + src = dst - 1; + delta = *src - *dst; + numOffsets = --node->numRecords - index; // predecrement numRecords & subtract index + + while (numOffsets--) + { + *--dst = *--src - delta; // work our way left + } +} + + + +/*------------------------------------------------------------------------------- + +Routine: MoveRecordsLeft - Move records left within a node. + +Function: Moves a number of bytes from src to dst. Safely handles overlapping + ranges if the bytes are being moved to the "left". No bytes are moved + if bytesToMove is zero. + +Input: src - pointer to source + dst - pointer to destination + bytesToMove - number of bytes to move records + +Result: none +-------------------------------------------------------------------------------*/ +#if 0 +void MoveRecordsLeft (UInt8 * src, + UInt8 * dst, + UInt16 bytesToMove ) +{ + while (bytesToMove--) + *dst++ = *src++; +} +#endif + + +/*------------------------------------------------------------------------------- + +Routine: MoveRecordsRight - Move records right within a node. + +Function: Moves a number of bytes from src to dst. Safely handles overlapping + ranges if the bytes are being moved to the "right". No bytes are moved + if bytesToMove is zero. + +Input: src - pointer to source + dst - pointer to destination + bytesToMove - number of bytes to move records + +Result: none +-------------------------------------------------------------------------------*/ +#if 0 +void MoveRecordsRight (UInt8 * src, + UInt8 * dst, + UInt16 bytesToMove ) +{ + src += bytesToMove; + dst += bytesToMove; + + while (bytesToMove--) + *--dst = *--src; +} +#endif diff --git a/fsck_hfs/dfalib/BTreePrivate.h b/fsck_hfs/dfalib/BTreePrivate.h new file mode 100644 index 0000000..f0791ff --- /dev/null +++ b/fsck_hfs/dfalib/BTreePrivate.h @@ -0,0 +1,430 @@ +/* + * Copyright (c) 1999-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreePrivate.h + + Contains: Private interface file for the BTree Module. + + Version: xxx put the technology version here xxx + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1998 by Apple Computer, Inc., all rights reserved. +*/ + +#ifndef __BTREEPRIVATE__ +#define __BTREEPRIVATE__ + +#include "BTree.h" + +/////////////////////////////////// Constants /////////////////////////////////// + +#define SupportsKeyDescriptors 0 + +#define kBTreeVersion 1 +#define kMaxTreeDepth 16 + + +#define kHeaderNodeNum 0 +#define kKeyDescRecord 1 + + +// Header Node Record Offsets +enum { + kHeaderRecOffset = 0x000E, + kKeyDescRecOffset = 0x0078, + kHeaderMapRecOffset = 0x00F8 +}; + +#define kMinNodeSize 512 + +#define kMinRecordSize 6 + //¥¥ where is minimum record size enforced? + +// miscellaneous BTree constants +enum { + kOffsetSize = 2 +}; + +// Insert Operations +typedef enum { + kInsertRecord = 0, + kReplaceRecord = 1 +} InsertType; + +// illegal string attribute bits set in mask +#define kBadStrAttribMask 0xCF + + + +//////////////////////////////////// Macros ///////////////////////////////////// + +#define M_NodesInMap(mapSize) ((mapSize) << 3) + +#define M_ClearBitNum(integer,bitNumber) ((integer) &= (~(1<<(bitNumber)))) +#define M_SetBitNum(integer,bitNumber) ((integer) |= (1<<(bitNumber))) +#define M_IsOdd(integer) (((integer) & 1) != 0) +#define M_IsEven(integer) (((integer) & 1) == 0) +#define M_BTreeHeaderDirty(btreePtr) btreePtr->flags |= kBTHeaderDirty + +#define M_MapRecordSize(nodeSize) (nodeSize - sizeof (BTNodeDescriptor) - 6) +#define M_HeaderMapRecordSize(nodeSize) (nodeSize - sizeof(BTNodeDescriptor) - sizeof(BTHeaderRec) - 128 - 8) + +#define M_SWAP_BE16_ClearBitNum(integer,bitNumber) ((integer) &= SWAP_BE16(~(1<<(bitNumber)))) +#define M_SWAP_BE16_SetBitNum(integer,bitNumber) ((integer) |= SWAP_BE16(1<<(bitNumber))) + +#if DEBUG_BUILD + #define Panic( message ) DebugStr( (ConstStr255Param) message ) + #define PanicIf( condition, message ) if ( (condition) != 0 ) DebugStr( message ) +#else + #define Panic( message ) do { ; } while (0) + #define PanicIf( condition, message ) do { ; } while (0) +#endif + +///////////////////////////////////// Types ///////////////////////////////////// +struct BTreeExtensionRec; + +typedef struct BTreeControlBlock { // fields specific to BTree CBs + + UInt8 keyCompareType; /* Key string Comparison Type */ + UInt8 btreeType; + SInt16 obsolete_fileRefNum; // refNum of btree file + KeyCompareProcPtr keyCompareProc; + UInt8 reserved2[16]; // keep for alignment with old style fields + UInt16 treeDepth; + UInt32 rootNode; + UInt32 leafRecords; + UInt32 firstLeafNode; + UInt32 lastLeafNode; + UInt16 nodeSize; + UInt16 maxKeyLength; + UInt32 totalNodes; + UInt32 freeNodes; + UInt32 reserved3[16]; /* reserved*/ + + // new fields + SInt16 version; + UInt32 flags; // dynamic flags + UInt32 attributes; // persistent flags + KeyDescriptorPtr keyDescPtr; + UInt32 writeCount; + + GetBlockProcPtr getBlockProc; + ReleaseBlockProcPtr releaseBlockProc; + SetEndOfForkProcPtr setEndOfForkProc; + BTreeIterator lastIterator; // needed for System 7 iteration context + + // statistical information + UInt32 numGetNodes; + UInt32 numGetNewNodes; + UInt32 numReleaseNodes; + UInt32 numUpdateNodes; + UInt32 numMapNodesRead; // map nodes beyond header node + UInt32 numHintChecks; + UInt32 numPossibleHints; // Looks like a formated hint + UInt32 numValidHints; // Hint used to find correct record. + + struct BTreeExtensionsRec *refCon; // Used by DFA to point to private data. + SFCB *fcbPtr; // fcb of btree file + +} BTreeControlBlock, *BTreeControlBlockPtr; + + +UInt32 CalcKeySize(const BTreeControlBlock *btcb, const BTreeKey *key); +#define CalcKeySize(btcb, key) ( ((btcb)->attributes & kBTBigKeysMask) ? ((key)->length16 + 2) : ((key)->length8 + 1) ) + +UInt32 MaxKeySize(const BTreeControlBlock *btcb); +#define MaxKeySize(btcb) ( (btcb)->maxKeyLength + ((btcb)->attributes & kBTBigKeysMask ? 2 : 1)) + +UInt32 KeyLength(const BTreeControlBlock *btcb, const BTreeKey *key); +#define KeyLength(btcb, key) ( ((btcb)->attributes & kBTBigKeysMask) ? (key)->length16 : (key)->length8 ) + + +typedef enum { + kBTHeaderDirty = 0x00000001 +} BTreeFlags; + + +typedef SInt8 *NodeBuffer; +typedef BlockDescriptor NodeRec, *NodePtr; //¥¥ remove this someday... + + + + +//// Tree Path Table - constructed by SearchTree, used by InsertTree and DeleteTree + +typedef struct { + UInt32 node; // node number + UInt16 index; + UInt16 reserved; // align size to a power of 2 +} TreePathRecord, *TreePathRecordPtr; + +typedef TreePathRecord TreePathTable [kMaxTreeDepth]; + + +//// InsertKey - used by InsertTree, InsertLevel and InsertNode + +struct InsertKey { + BTreeKeyPtr keyPtr; + UInt8 * recPtr; + UInt16 keyLength; + UInt16 recSize; + Boolean replacingKey; + Boolean skipRotate; +}; + +typedef struct InsertKey InsertKey; + + +//// For Notational Convenience + +typedef BTNodeDescriptor* NodeDescPtr; +typedef UInt8 *RecordPtr; +typedef BTreeKeyPtr KeyPtr; + + +//////////////////////////////////// Globals //////////////////////////////////// + + +//////////////////////////////////// Macros ///////////////////////////////////// + +// Exit function on error +#define M_ExitOnError( result ) if ( ( result ) != noErr ) goto ErrorExit; else ; + +// Test for passed condition and return if true +#define M_ReturnErrorIf( condition, error ) if ( condition ) return( error ) + +#if DEBUG_BUILD + #define Panic( message ) DebugStr( (ConstStr255Param) message ) + #define PanicIf( condition, message ) if ( (condition) != 0 ) DebugStr( message ) +#else + #define Panic( message ) do { ; } while (0) + #define PanicIf( condition, message ) do { ; } while (0) +#endif + +//////////////////////////////// Key Operations ///////////////////////////////// + +SInt32 CompareKeys (BTreeControlBlockPtr btreePtr, + KeyPtr searchKey, + KeyPtr trialKey ); + +OSStatus GetKeyDescriptor (BTreeControlBlockPtr btreePtr, + NodeDescPtr headerNode ); + +OSStatus CheckKeyDescriptor (KeyDescriptorPtr keyDescPtr, + UInt16 maxKeyLength ); + +OSStatus CheckKey (KeyPtr keyPtr, + KeyDescriptorPtr keyDescPtr, + UInt16 maxKeyLength ); + + + +//////////////////////////////// Map Operations ///////////////////////////////// + +OSStatus AllocateNode (BTreeControlBlockPtr btreePtr, + UInt32 *nodeNum); + +OSStatus FreeNode (BTreeControlBlockPtr btreePtr, + UInt32 nodeNum); + +OSStatus ExtendBTree (BTreeControlBlockPtr btreePtr, + UInt32 nodes ); + +UInt32 CalcMapBits (BTreeControlBlockPtr btreePtr); + + +//////////////////////////////// Misc Operations //////////////////////////////// + +UInt16 CalcKeyRecordSize (UInt16 keySize, + UInt16 recSize ); + +OSStatus VerifyHeader (SFCB *filePtr, + BTHeaderRec *header ); + +OSStatus UpdateHeader (BTreeControlBlockPtr btreePtr ); + +OSStatus FindIteratorPosition (BTreeControlBlockPtr btreePtr, + BTreeIteratorPtr iterator, + BlockDescriptor *left, + BlockDescriptor *middle, + BlockDescriptor *right, + UInt32 *nodeNum, + UInt16 *index, + Boolean *foundRecord ); + +OSStatus CheckInsertParams (SFCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen ); + +OSStatus TrySimpleReplace (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen, + Boolean *recordInserted ); + +OSStatus IsItAHint (BTreeControlBlockPtr btreePtr, + BTreeIterator *iterator, + Boolean *answer ); + +//////////////////////////////// Node Operations //////////////////////////////// + +//// Node Operations + +OSStatus GetNode (BTreeControlBlockPtr btreePtr, + UInt32 nodeNum, + NodeRec *returnNodePtr ); + +OSStatus GetLeftSiblingNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + NodeRec *left ); + +#define GetLeftSiblingNode(btree,node,left) GetNode ((btree), ((NodeDescPtr)(node))->bLink, (left)) + +OSStatus GetRightSiblingNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + NodeRec *right ); + +#define GetRightSiblingNode(btree,node,right) GetNode ((btree), ((NodeDescPtr)(node))->fLink, (right)) + + +OSStatus GetNewNode (BTreeControlBlockPtr btreePtr, + UInt32 nodeNum, + NodeRec *returnNodePtr ); + +OSStatus ReleaseNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ); +OSStatus TrashNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ); + +OSStatus UpdateNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ); + +OSStatus GetMapNode (BTreeControlBlockPtr btreePtr, + BlockDescriptor *nodePtr, + UInt16 **mapPtr, + UInt16 *mapSize ); + +//// Node Buffer Operations + +void ClearNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + +UInt16 GetNodeDataSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + +UInt16 GetNodeFreeSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + + +//// Record Operations + +Boolean InsertRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + RecordPtr recPtr, + UInt16 recSize ); + +Boolean InsertKeyRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + KeyPtr keyPtr, + UInt16 keyLength, + RecordPtr recPtr, + UInt16 recSize ); + +void DeleteRecord (BTreeControlBlockPtr btree, + NodeDescPtr node, + UInt16 index ); + + +Boolean SearchNode (BTreeControlBlockPtr btree, + NodeDescPtr node, + KeyPtr searchKey, + UInt16 *index ); + +OSStatus GetRecordByIndex (BTreeControlBlockPtr btree, + NodeDescPtr node, + UInt16 index, + KeyPtr *keyPtr, + UInt8 * *dataPtr, + UInt16 *dataSize ); + +UInt8 * GetRecordAddress (BTreeControlBlockPtr btree, + NodeDescPtr node, + UInt16 index ); + +#define GetRecordAddress(btreePtr,node,index) ((UInt8 *)(node) + (*(short *) ((UInt8 *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize))) + + +UInt16 GetRecordSize (BTreeControlBlockPtr btree, + NodeDescPtr node, + UInt16 index ); + +UInt32 GetChildNodeNum (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + UInt16 index ); + +void MoveRecordsLeft (UInt8 * src, + UInt8 * dst, + UInt16 bytesToMove ); + +#define MoveRecordsLeft(src,dst,bytes) CopyMemory((src),(dst),(bytes)) + +void MoveRecordsRight (UInt8 * src, + UInt8 * dst, + UInt16 bytesToMove ); + +#define MoveRecordsRight(src,dst,bytes) CopyMemory((src),(dst),(bytes)) + + + +//////////////////////////////// Tree Operations //////////////////////////////// + +OSStatus SearchTree (BTreeControlBlockPtr btreePtr, + BTreeKeyPtr keyPtr, + TreePathTable treePathTable, + UInt32 *nodeNum, + BlockDescriptor *nodePtr, + UInt16 *index ); + +OSStatus InsertTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level, + Boolean replacingKey, + UInt32 *insertNode ); + +OSStatus DeleteTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level ); + +#endif //__BTREEPRIVATE__ diff --git a/fsck_hfs/dfalib/BTreeScanner.c b/fsck_hfs/dfalib/BTreeScanner.c new file mode 100755 index 0000000..1c2ba1f --- /dev/null +++ b/fsck_hfs/dfalib/BTreeScanner.c @@ -0,0 +1,385 @@ +/* + * Copyright (c) 1996-2002, 2005, 2012 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + * + * @(#)BTreeScanner.c + */ + + +#include "BTreeScanner.h" +#include "Scavenger.h" +#include "../cache.h" +#include "../fsck_hfs.h" + +static int FindNextLeafNode( BTScanState *scanState ); +static int ReadMultipleNodes( BTScanState *scanState ); + + +//_________________________________________________________________________________ +// +// Routine: BTScanNextRecord +// +// Purpose: Return the next leaf record in a scan. +// +// Inputs: +// scanState Scanner's current state +// +// Outputs: +// key Key of found record (points into buffer) +// data Data of found record (points into buffer) +// dataSize Size of data in found record +// +// Result: +// noErr Found a valid record +// btNotFound No more records +// +// Notes: +// This routine returns pointers to the found record's key and data. It +// does not copy the key or data to a caller-supplied buffer (like +// GetBTreeRecord would). The caller must not modify the key or data. +//_________________________________________________________________________________ + +int BTScanNextRecord( BTScanState * scanState, + void * * key, + void * * data, + u_int32_t * dataSize ) +{ + int err; + u_int16_t dataSizeShort; + int64_t maxLeafRecs; + + err = noErr; + + // + // If this is the first call, there won't be any nodes in the buffer, so go + // find the first first leaf node (if any). + // + if ( scanState->nodesLeftInBuffer <= 0 ) + err = FindNextLeafNode( scanState ); + + // btcb->leafRecords may be fragile (the B-Tree header could be damaged) + // so in order to do a sanity check on the max number of leaf records we + // could have we use the catalog file's physical size divided by the smallest + // leaf node record size to get our ceiling. + maxLeafRecs = scanState->btcb->fcbPtr->fcbPhysicalSize / sizeof( HFSCatalogThread ); + + while ( err == noErr ) + { + // See if we have a record in the current node + err = GetRecordByIndex( scanState->btcb, scanState->currentNodePtr, + scanState->recordNum, (KeyPtr *) key, + (UInt8 **) data, &dataSizeShort ); + if ( err == noErr ) + { + ++scanState->recordsFound; + ++scanState->recordNum; + if (dataSize != NULL) + *dataSize = dataSizeShort; + return noErr; + } + + // We're done with the current node. See if we've returned all the records + if ( scanState->recordsFound >= maxLeafRecs ) + return btNotFound; + + // Move to the first record of the next leaf node + scanState->recordNum = 0; + err = FindNextLeafNode( scanState ); + } + + // + // If we got an EOF error from FindNextLeafNode, then there are no more leaf + // records to be found. + // + if ( err == fsEndOfIterationErr ) + err = btNotFound; + + return err; + +} /* BTScanNextRecord */ + + +//_________________________________________________________________________________ +// +// Routine: FindNextLeafNode +// +// Purpose: Point to the next leaf node in the buffer. Read more nodes +// into the buffer if needed (and allowed). +// +// Inputs: +// scanState Scanner's current state +// +// Result: +// noErr Found a valid record +// fsEndOfIterationErr No more nodes in file +//_________________________________________________________________________________ + +static int FindNextLeafNode( BTScanState *scanState ) +{ + int err; + BlockDescriptor myBlockDescriptor; + + err = noErr; // Assume everything will be OK + + while ( 1 ) + { + ++scanState->nodeNum; + --scanState->nodesLeftInBuffer; + if ( scanState->nodesLeftInBuffer <= 0 ) + { + // read some more nodes into buffer + err = ReadMultipleNodes( scanState ); + if ( err != noErr ) + break; + } + else + { + // Adjust to point to the next node in the buffer + + // If we've looked at all nodes in the tree, then we're done + if ( scanState->nodeNum >= scanState->btcb->totalNodes ) + return fsEndOfIterationErr; + + scanState->currentNodePtr = (BTNodeDescriptor *)((UInt8 *)scanState->currentNodePtr + scanState->btcb->nodeSize); + } + + // need to manufacture a BlockDescriptor since hfs_swap_BTNode expects one as input + myBlockDescriptor.buffer = (void *) scanState->currentNodePtr; + myBlockDescriptor.blockHeader = NULL; + myBlockDescriptor.blockNum = scanState->nodeNum; + myBlockDescriptor.blockSize = scanState->btcb->nodeSize; + myBlockDescriptor.blockReadFromDisk = false; + myBlockDescriptor.fragmented = false; + err = hfs_swap_BTNode(&myBlockDescriptor, scanState->btcb->fcbPtr, kSwapBTNodeBigToHost); + if ( err != noErr ) + { + err = noErr; + continue; + } + + // NOTE - todo - add less lame sanity check to allow leaf nodes that + // only have damaged kind. + if ( scanState->currentNodePtr->kind == kBTLeafNode ) + break; + } + + return err; + +} /* FindNextLeafNode */ + + +//_________________________________________________________________________________ +// +// Routine: ReadMultipleNodes +// +// Purpose: Read one or more nodes into the buffer. +// +// Inputs: +// theScanStatePtr Scanner's current state +// +// Result: +// noErr One or nodes were read +// fsEndOfIterationErr No nodes left in file, none in buffer +//_________________________________________________________________________________ + +int CacheRawRead (Cache_t *cache, uint64_t off, uint32_t len, void *buf); + +static int ReadMultipleNodes( BTScanState *theScanStatePtr ) +{ + int myErr = noErr; + BTreeControlBlockPtr myBTreeCBPtr; + UInt64 myPhyBlockNum; + SInt64 myPhyOffset; + UInt64 mySectorOffset; // offset within file (in 512-byte sectors) + UInt32 myContiguousBytes; + + myBTreeCBPtr = theScanStatePtr->btcb; + + // map logical block in catalog btree file to physical block on volume + mySectorOffset = + (((UInt64)theScanStatePtr->nodeNum * (UInt64)myBTreeCBPtr->fcbPtr->fcbBlockSize) >> kSectorShift); + myErr = MapFileBlockC( myBTreeCBPtr->fcbPtr->fcbVolume, myBTreeCBPtr->fcbPtr, + theScanStatePtr->bufferSize, mySectorOffset, + &myPhyBlockNum, &myContiguousBytes ); + if ( myErr != noErr ) + { + myErr = fsEndOfIterationErr; + goto ExitThisRoutine; + } + + // now read blocks from the device + myPhyOffset = (SInt64) ( ( (UInt64) myPhyBlockNum ) << kSectorShift ); + + // Go through the cache, so we can get any locked-in journal changes + Buf_t *tmpbuf = NULL; + + myErr = CacheRead( myBTreeCBPtr->fcbPtr->fcbVolume->vcbBlockCache, + myPhyOffset, myContiguousBytes, &tmpbuf ); + + if ( myErr == noErr ) + { + if (tmpbuf) + { + if (tmpbuf->Length < myContiguousBytes) + abort(); + memcpy(theScanStatePtr->bufferPtr, tmpbuf->Buffer, myContiguousBytes); + CacheRelease(myBTreeCBPtr->fcbPtr->fcbVolume->vcbBlockCache, tmpbuf, 0); + } else + abort(); +#if DEBUG_BTREE + /* + * This code was written to help debug a cache problem, where CacheRead() + * was returning different results than CacheRawRead(). I am leaving it + * around because I fear that will happen again, so it can be used for + * reference, rather than rewrite it then. + */ + size_t tempBufferSize = myContiguousBytes; + int tempError; + unsigned char *tempBuffer = malloc(myContiguousBytes); + if (tempBuffer == NULL) + abort(); + tempError = CacheRawRead( myBTreeCBPtr->fcbPtr->fcbVolume->vcbBlockCache, + myPhyOffset, myContiguousBytes, tempBuffer); + if (memcmp(tempBuffer, theScanStatePtr->bufferPtr, myContiguousBytes) != 0) + { + uint8_t *raw, *cached; + fprintf(stderr, "CacheRead and CacheRawRead returned different values\n"); + fprintf(stderr, "\tmyPhyOffset = %llu, myContiguousBytes = %u\n", myPhyOffset, myContiguousBytes); + size_t i = 0; + for (i = 0; i < myContiguousBytes; i++) + { + cached = ((uint8_t*)theScanStatePtr->bufferPtr)[i]; + raw = tempBuffer[i]; + if (cached != raw) + { + fprintf(stderr, "\tOffset %zu: cached value = 0x%02x, raw value = 0x%02x\n", i, cached, raw); + break; + } + } + extern void dumpCache(void*); + dumpCache(myBTreeCBPtr->fcbPtr->fcbVolume->vcbBlockCache); + abort(); + } + free(tempBuffer); +#endif + } + + + + if ( myErr != noErr ) + { + myErr = fsEndOfIterationErr; + goto ExitThisRoutine; + } + + theScanStatePtr->nodesLeftInBuffer = myContiguousBytes / + theScanStatePtr->btcb->nodeSize; + theScanStatePtr->currentNodePtr = (BTNodeDescriptor *) theScanStatePtr->bufferPtr; + +ExitThisRoutine: + return myErr; + +} /* ReadMultipleNodes */ + + +//_________________________________________________________________________________ +// +// Routine: BTScanInitialize +// +// Purpose: Prepare to start a new BTree scan. +// +// Inputs: +// btreeFile The B-Tree's file control block +// +// Outputs: +// scanState Scanner's current state; pass to other scanner calls +// +//_________________________________________________________________________________ + +int BTScanInitialize( const SFCB * btreeFile, + BTScanState * scanState ) +{ + BTreeControlBlock *btcb; + u_int32_t bufferSize; + + // + // Make sure this is a valid B-Tree file + // + btcb = (BTreeControlBlock *) btreeFile->fcbBtree; + if (btcb == NULL) + return R_RdErr; + + // + // Make sure buffer size is big enough, and a multiple of the + // B-Tree node size + // + bufferSize = (kCatScanBufferSize / btcb->nodeSize) * btcb->nodeSize; + + // + // Set up the scanner's state + // + scanState->bufferSize = bufferSize; + scanState->bufferPtr = (void *) AllocateMemory( bufferSize ); + if ( scanState->bufferPtr == NULL ) + return( R_NoMem ); + + scanState->btcb = btcb; + scanState->nodeNum = 0; + scanState->recordNum = 0; + scanState->currentNodePtr = NULL; + scanState->nodesLeftInBuffer = 0; // no nodes currently in buffer + scanState->recordsFound = 0; + + return noErr; + +} /* BTScanInitialize */ + + +//_________________________________________________________________________________ +// +// Routine: BTScanTerminate +// +// Purpose: Return state information about a scan so that it can be resumed +// later via BTScanInitialize. +// +// Inputs: +// scanState Scanner's current state +// +// Outputs: +// nextNode Node number to resume a scan (pass to BTScanInitialize) +// nextRecord Record number to resume a scan (pass to BTScanInitialize) +// recordsFound Valid records seen so far (pass to BTScanInitialize) +//_________________________________________________________________________________ + +int BTScanTerminate( BTScanState * scanState ) +{ + if ( scanState->bufferPtr != NULL ) + { + DisposeMemory( scanState->bufferPtr ); + scanState->bufferPtr = NULL; + scanState->currentNodePtr = NULL; + } + + return noErr; + +} /* BTScanTerminate */ + + diff --git a/fsck_hfs/dfalib/BTreeScanner.h b/fsck_hfs/dfalib/BTreeScanner.h new file mode 100755 index 0000000..ce47315 --- /dev/null +++ b/fsck_hfs/dfalib/BTreeScanner.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 1996-2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + * + * @(#)BTreeScanner.h + */ + +#ifndef _BTREESCANNER_H_ +#define _BTREESCANNER_H_ + +#include "BTreePrivate.h" + +// btree node scanner buffer size. Joe Sokol suggests 128K as a max (2002 WWDC) +enum { kCatScanBufferSize = (128 * 1024) }; + + +/* + BTScanState - This structure is used to keep track of the current state + of a BTree scan. It contains both the dynamic state information (like + the current node number and record number) and information that is static + for the duration of a scan (such as buffer pointers). + + NOTE: recordNum may equal or exceed the number of records in the node + number nodeNum. If so, then the next attempt to get a record will move + to a new node number. +*/ +struct BTScanState +{ + // The following fields are set up once at initialization time. + // They are not changed during a scan. + u_int32_t bufferSize; + void * bufferPtr; + BTreeControlBlock * btcb; + + // The following fields are the dynamic state of the current scan. + u_int32_t nodeNum; // zero is first node + u_int32_t recordNum; // zero is first record + BTNodeDescriptor * currentNodePtr; // points to current node within buffer + int32_t nodesLeftInBuffer; // number of valid nodes still in the buffer + int64_t recordsFound; // number of leaf records seen so far +}; +typedef struct BTScanState BTScanState; + + +/* *********************** PROTOTYPES *********************** */ + +int BTScanInitialize( const SFCB * btreeFile, + BTScanState * scanState ); + +int BTScanNextRecord( BTScanState * scanState, + void * * key, + void * * data, + u_int32_t * dataSize ); + +int BTScanTerminate( BTScanState * scanState ); + +#endif /* !_BTREESCANNER_H_ */ diff --git a/fsck_hfs/dfalib/BTreeTreeOps.c b/fsck_hfs/dfalib/BTreeTreeOps.c new file mode 100644 index 0000000..3453de8 --- /dev/null +++ b/fsck_hfs/dfalib/BTreeTreeOps.c @@ -0,0 +1,1844 @@ +/* + * Copyright (c) 1999-2000, 2002, 2007-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreeTreeOps.c + + Contains: Multi-node tree operations for the BTree Module. + + Version: xxx put the technology version here xxx + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1997, 1999 by Apple Computer, Inc., all rights reserved. +*/ + +#include "BTreePrivate.h" +#include "../fsck_debug.h" +extern char debug; +extern void plog(const char *fmt, ...); + +#define DEBUG_TREEOPS 0 + +/////////////////////// Routines Internal To BTree Module /////////////////////// +// +// SearchTree +// InsertTree +// +////////////////////// Routines Internal To BTreeTreeOps.c ////////////////////// + +static OSStatus AddNewRootNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ); + +static OSStatus CollapseTree (BTreeControlBlockPtr btreePtr, + BlockDescriptor *blockPtr ); + +static OSStatus RotateLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode, + UInt16 rightInsertIndex, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + UInt16 *insertIndex, + UInt32 *insertNodeNum, + Boolean *recordFit, + UInt16 *recsRotated ); + +static Boolean RotateRecordLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ); + +#if 0 +static OSStatus SplitLeft (BTreeControlBlockPtr btreePtr, + BlockDescriptor *leftNode, + BlockDescriptor *rightNode, + UInt32 rightNodeNum, + UInt16 index, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + UInt16 *insertIndex, + UInt32 *insertNodeNum, + UInt16 *recsRotated ); +#endif + + +static OSStatus InsertLevel (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + InsertKey *primaryKey, + InsertKey *secondaryKey, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level, + UInt32 *insertNode ); + +static OSErr InsertNode (BTreeControlBlockPtr btreePtr, + InsertKey *key, + BlockDescriptor *targetNode, + UInt32 node, + UInt16 index, + UInt32 *newNode, + UInt16 *newIndex, + BlockDescriptor *leftNode, + Boolean *updateParent, + Boolean *insertParent, + Boolean *rootSplit ); + +static UInt16 GetKeyLength (const BTreeControlBlock *btreePtr, + const BTreeKey *key, + Boolean forLeafNode ); + +static Boolean RotateRecordRight( BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ); + +static OSStatus RotateRight( BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode, + UInt16 leftInsertIndex, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + UInt16 *insertIndex, + UInt32 *insertNodeNum, + Boolean *recordFit, + UInt16 *recsRotated ); + +static OSStatus SplitRight( BTreeControlBlockPtr btreePtr, + BlockDescriptor *nodePtr, + BlockDescriptor *rightNodePtr, + UInt32 nodeNum, + UInt16 index, + KeyPtr keyPtr, + UInt8 *recPtr, + UInt16 recSize, + UInt16 *insertIndexPtr, + UInt32 *newNodeNumPtr, + UInt16 *recsRotatedPtr ); + +#if DEBUG_TREEOPS +static int DoKeyCheck( NodeDescPtr nodeP, BTreeControlBlock *btcb ); +static int DoKeyCheckAcrossNodes( NodeDescPtr theLeftNodePtr, + NodeDescPtr theRightNodePtr, + BTreeControlBlock *theTreePtr, + Boolean printKeys ); +static void PrintNodeDescriptor( NodeDescPtr thePtr ); +static void PrintKey( UInt8 * myPtr, int mySize ); +#endif // DEBUG_TREEOPS + + +/* used by InsertLevel (and called routines) to communicate the state of an insert operation */ +enum +{ + kInsertParent = 0x0001, + kUpdateParent = 0x0002, + kNewRoot = 0x0004, + kInsertedInRight = 0x0008, + kRecordFits = 0x0010 +}; + + +//////////////////////// BTree Multi-node Tree Operations /////////////////////// + + +/*------------------------------------------------------------------------------- + +Routine: SearchTree - Search BTree for key and set up Tree Path Table. + +Function: Searches BTree for specified key, setting up the Tree Path Table to + reflect the search path. + + +Input: btreePtr - pointer to control block of BTree to search + keyPtr - pointer to the key to search for + treePathTable - pointer to the tree path table to construct + +Output: nodeNum - number of the node containing the key position + iterator - BTreeIterator specifying record or insert position + +Result: noErr - key found, index is record index + fsBTRecordNotFoundErr - key not found, index is insert index + fsBTEmptyErr - key not found, return params are nil + otherwise - catastrophic failure (GetNode/ReleaseNode failed) +-------------------------------------------------------------------------------*/ + +OSStatus SearchTree (BTreeControlBlockPtr btreePtr, + BTreeKeyPtr searchKey, + TreePathTable treePathTable, + UInt32 *nodeNum, + BlockDescriptor *nodePtr, + UInt16 *returnIndex ) +{ + OSStatus err; + SInt16 level; + UInt32 curNodeNum; + NodeRec nodeRec; + UInt16 index; + Boolean keyFound; + SInt8 nodeKind; // Kind of current node (index/leaf) + KeyPtr keyPtr; + UInt8 * dataPtr; + UInt16 dataSize; + + + curNodeNum = btreePtr->rootNode; + level = btreePtr->treeDepth; + + if (level == 0) // is the tree empty? + { + err = fsBTEmptyErr; + goto ErrorExit; + } + + //¥¥ for debugging... + treePathTable [0].node = 0; + treePathTable [0].index = 0; + + while (true) + { + // + // [2550929] Node number 0 is the header node. It is never a valid + // index or leaf node. If we're ever asked to search through node 0, + // something has gone wrong (typically a bad child node number, or + // we found a node full of zeroes that we thought was an index node). + // + if (curNodeNum == 0) + { +// Panic("\pSearchTree: curNodeNum is zero!"); + if (debug) fprintf(stderr, "%s(%d): curNodeNum is 0\n", __FUNCTION__, __LINE__); + err = fsBTInvalidNodeErr; + goto ErrorExit; + } + + err = GetNode (btreePtr, curNodeNum, &nodeRec); + if (err != noErr) + { + goto ErrorExit; + } + + // + // [2550929] Sanity check the node height and node type. We expect + // particular values at each iteration in the search. This checking + // quickly finds bad pointers, loops, and other damage to the + // hierarchy of the B-tree. + // + if (((BTNodeDescriptor*)nodeRec.buffer)->height != level) + { + if (debug) + { + fprintf(stderr, "%s(line %d): height %d != level %d\n", __FUNCTION__, __LINE__, ((BTNodeDescriptor*)nodeRec.buffer)->height, level); + fprintf(stderr, " curNodeNum = %u\n", curNodeNum); + if (cur_debug_level & d_dump_node) + { + HexDump(nodeRec.buffer, nodeRec.blockSize, true); + } + } + err = fsBTInvalidNodeErr; + goto ReleaseAndExit; + } + nodeKind = ((BTNodeDescriptor*)nodeRec.buffer)->kind; + if (level == 1) + { + // Nodes at level 1 must be leaves, by definition + if (nodeKind != kBTLeafNode) + { + if (debug) fprintf(stderr, "%s(%d): wrong kind of node\n", __FUNCTION__, __LINE__); + err = fsBTInvalidNodeErr; + goto ReleaseAndExit; + } + } + else + { + // A node at any other depth must be an index node + if (nodeKind != kBTIndexNode) + { + if (debug) fprintf(stderr, "%s(%d): other wrong kind of node\n", __FUNCTION__, __LINE__); + err = fsBTInvalidNodeErr; + goto ReleaseAndExit; + } + } + + keyFound = SearchNode (btreePtr, nodeRec.buffer, searchKey, &index); + + treePathTable [level].node = curNodeNum; + + if (nodeKind == kBTLeafNode) + { + treePathTable [level].index = index; + break; // were done... + } + + if ( (keyFound != true) && (index != 0)) + --index; + + treePathTable [level].index = index; + + err = GetRecordByIndex (btreePtr, nodeRec.buffer, index, &keyPtr, &dataPtr, &dataSize); + if (err != noErr) + { + // [2550929] If we got an error, it is probably because the index was bad + // (typically a corrupt node that confused SearchNode). Invalidate the node + // so we won't accidentally use the corrupted contents. NOTE: the Mac OS 9 + // sources call this InvalidateNode. + + (void) TrashNode(btreePtr, &nodeRec); + goto ErrorExit; + } + + // Get the child pointer out of this index node. We're now done with the current + // node and can continue the search with the child node. + curNodeNum = *(UInt32 *)dataPtr; + err = ReleaseNode (btreePtr, &nodeRec); + if (err != noErr) + { + goto ErrorExit; + } + + // The child node should be at a level one less than the parent. + --level; + } + + *nodeNum = curNodeNum; + *nodePtr = nodeRec; + *returnIndex = index; + + if (keyFound) + return noErr; // searchKey found, index identifies record in node + else + return fsBTRecordNotFoundErr; // searchKey not found, index identifies insert point + +ReleaseAndExit: + (void) ReleaseNode(btreePtr, &nodeRec); + // fall into ErrorExit + +ErrorExit: + + *nodeNum = 0; + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + *returnIndex = 0; + + return err; +} + + + + +////////////////////////////////// InsertTree /////////////////////////////////// + +OSStatus InsertTree ( BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level, + Boolean replacingKey, + UInt32 *insertNode ) +{ + InsertKey primaryKey; + OSStatus err; + + primaryKey.keyPtr = keyPtr; + primaryKey.keyLength = GetKeyLength(btreePtr, primaryKey.keyPtr, (level == 1)); + primaryKey.recPtr = recPtr; + primaryKey.recSize = recSize; + primaryKey.replacingKey = replacingKey; + primaryKey.skipRotate = false; + + err = InsertLevel (btreePtr, treePathTable, &primaryKey, nil, + targetNode, index, level, insertNode ); + + return err; + +} // End of InsertTree + + +////////////////////////////////// InsertLevel ////////////////////////////////// + +OSStatus InsertLevel (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + InsertKey *primaryKey, + InsertKey *secondaryKey, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level, + UInt32 *insertNode ) +{ + OSStatus err; + BlockDescriptor siblingNode; + UInt32 targetNodeNum; + UInt32 newNodeNum; + UInt16 newIndex; + Boolean insertParent; + Boolean updateParent; + Boolean newRoot; + +#if defined(applec) && !defined(__SC__) + PanicIf ((level == 1) && (((NodeDescPtr)targetNode->buffer)->kind != kBTLeafNode), "\P InsertLevel: non-leaf at level 1! "); +#endif + siblingNode.buffer = nil; + targetNodeNum = treePathTable [level].node; + + insertParent = false; + updateParent = false; + + ////// process first insert ////// + + err = InsertNode (btreePtr, primaryKey, targetNode, targetNodeNum, index, + &newNodeNum, &newIndex, &siblingNode, &updateParent, &insertParent, &newRoot ); + M_ExitOnError (err); + + if ( newRoot ) + { + // Extend the treePathTable by adding an entry for the new + // root node that references the current targetNode. + // + // When we split right the index in the new root is 0 if the new + // node is the same as the target node or 1 otherwise. When the + // new node number and the target node number are the same then + // we inserted the new record into the left node (the orignial target) + // after the split. + + treePathTable [level + 1].node = btreePtr->rootNode; + if ( targetNodeNum == newNodeNum ) + treePathTable [level + 1].index = 0; // + else + treePathTable [level + 1].index = 1; + } + + if ( level == 1 ) + *insertNode = newNodeNum; + + ////// process second insert (if any) ////// + + if ( secondaryKey != nil ) + { + Boolean temp; + + // NOTE - we only get here if we have split a child node to the right and + // we are currently updating the child's parent. newIndex + 1 refers to + // the location in the parent node where we insert the new index record that + // represents the new child node (the new right node). + err = InsertNode (btreePtr, secondaryKey, targetNode, newNodeNum, newIndex + 1, + &newNodeNum, &newIndex, &siblingNode, &updateParent, &insertParent, &temp); + M_ExitOnError (err); + + if ( DEBUG_BUILD && updateParent && newRoot ) + DebugStr("\p InsertLevel: New root from primary key, update from secondary key..."); + } + + //////////////////////// Update Parent(s) /////////////////////////////// + + if ( insertParent || updateParent ) + { + BlockDescriptor parentNode; + UInt32 parentNodeNum; + KeyPtr keyPtr; + UInt8 * recPtr; + UInt16 recSize; + + secondaryKey = nil; + + PanicIf ( (level == btreePtr->treeDepth), "\p InsertLevel: unfinished insert!?"); + + ++level; + + // Get Parent Node data... + index = treePathTable [level].index; + parentNodeNum = treePathTable [level].node; + + PanicIf ( parentNodeNum == 0, "\p InsertLevel: parent node is zero!?"); + + err = GetNode (btreePtr, parentNodeNum, &parentNode); // released as target node in next level up + M_ExitOnError (err); +#if defined(applec) && !defined(__SC__) + if (DEBUG_BUILD && level > 1) + PanicIf ( ((NodeDescPtr)parentNode.buffer)->kind != kBTIndexNode, "\P InsertLevel: parent node not an index node! "); +#endif + ////////////////////////// Update Parent Index ////////////////////////////// + + if ( updateParent ) + { + //¥¥Êdebug: check if ptr == targetNodeNum + GetRecordByIndex (btreePtr, parentNode.buffer, index, &keyPtr, &recPtr, &recSize); + PanicIf( (*(UInt32 *) recPtr) != targetNodeNum, "\p InsertLevel: parent ptr doesn't match target node!"); + + // need to delete and re-insert this parent key/ptr + // we delete it here and it gets re-inserted in the + // InsertLevel call below. + DeleteRecord (btreePtr, parentNode.buffer, index); + + primaryKey->keyPtr = (KeyPtr) GetRecordAddress( btreePtr, targetNode->buffer, 0 ); + primaryKey->keyLength = GetKeyLength(btreePtr, primaryKey->keyPtr, false); + primaryKey->recPtr = (UInt8 *) &targetNodeNum; + primaryKey->recSize = sizeof(targetNodeNum); + primaryKey->replacingKey = kReplaceRecord; + primaryKey->skipRotate = insertParent; // don't rotate left if we have two inserts occuring + } + + ////////////////////////// Add New Parent Index ///////////////////////////// + + if ( insertParent ) + { + InsertKey *insertKeyPtr; + InsertKey insertKey; + + if ( updateParent ) + { + insertKeyPtr = &insertKey; + secondaryKey = &insertKey; + } + else + { + insertKeyPtr = primaryKey; + // split right but not updating parent for our left node then + // we want to insert the key for the new right node just after + // the key for our left node. + index++; + } + + insertKeyPtr->keyPtr = (KeyPtr) GetRecordAddress (btreePtr, siblingNode.buffer, 0); + insertKeyPtr->keyLength = GetKeyLength(btreePtr, insertKeyPtr->keyPtr, false); + insertKeyPtr->recPtr = (UInt8 *) &((NodeDescPtr)targetNode->buffer)->fLink; + insertKeyPtr->recSize = sizeof(UInt32); + insertKeyPtr->replacingKey = kInsertRecord; + insertKeyPtr->skipRotate = false; // a rotate is OK during second insert + } + + err = InsertLevel (btreePtr, treePathTable, primaryKey, secondaryKey, + &parentNode, index, level, insertNode ); + M_ExitOnError (err); + } + + err = UpdateNode (btreePtr, targetNode); // all done with target + M_ExitOnError (err); + + err = UpdateNode (btreePtr, &siblingNode); // all done with left sibling + M_ExitOnError (err); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, targetNode); + (void) ReleaseNode (btreePtr, &siblingNode); + + Panic ("\p InsertLevel: an error occured!"); + + return err; + +} // End of InsertLevel + + + +////////////////////////////////// InsertNode /////////////////////////////////// + +static OSErr InsertNode (BTreeControlBlockPtr btreePtr, + InsertKey *key, + + BlockDescriptor *targetNode, + UInt32 nodeNum, + UInt16 index, + + UInt32 *newNodeNumPtr, + UInt16 *newIndex, + + BlockDescriptor *siblingNode, + Boolean *updateParent, + Boolean *insertParent, + Boolean *rootSplit ) +{ + BlockDescriptor *tempNode; + UInt32 leftNodeNum; + UInt32 rightNodeNum; + UInt16 recsRotated; + OSErr err; + Boolean recordFit; + + *rootSplit = false; + + PanicIf ( targetNode->buffer == siblingNode->buffer, "\p InsertNode: targetNode == siblingNode, huh?"); + + leftNodeNum = ((NodeDescPtr) targetNode->buffer)->bLink; + rightNodeNum = ((NodeDescPtr) targetNode->buffer)->fLink; + + + /////////////////////// Try Simple Insert /////////////////////////////// + + if ( nodeNum == leftNodeNum ) + tempNode = siblingNode; + else + tempNode = targetNode; + + recordFit = InsertKeyRecord (btreePtr, tempNode->buffer, index, key->keyPtr, key->keyLength, key->recPtr, key->recSize); + + if ( recordFit ) + { + *newNodeNumPtr = nodeNum; + *newIndex = index; + +#if DEBUG_TREEOPS + if ( DoKeyCheck( tempNode->buffer, btreePtr ) != noErr ) + { + plog( "\n%s - bad key order in node num %d: \n", __FUNCTION__ , nodeNum ); + PrintNodeDescriptor( tempNode->buffer ); + err = fsBTBadRotateErr; + goto ErrorExit; + } +#endif // DEBUG_TREEOPS + + if ( (index == 0) && (((NodeDescPtr) tempNode->buffer)->height != btreePtr->treeDepth) ) + *updateParent = true; // the first record changed so we need to update the parent + goto ExitThisRoutine; + } + + + //////////////////////// Try Rotate Left //////////////////////////////// + + if ( leftNodeNum > 0 ) + { + PanicIf ( siblingNode->buffer != nil, "\p InsertNode: siblingNode already aquired!"); + + if ( siblingNode->buffer == nil ) + { + err = GetNode (btreePtr, leftNodeNum, siblingNode); // will be released by caller or a split below + M_ExitOnError (err); + } + + PanicIf ( ((NodeDescPtr) siblingNode->buffer)->fLink != nodeNum, "\p InsertNode, RotateLeft: invalid sibling link!" ); + + if ( !key->skipRotate ) // are rotates allowed? + { + err = RotateLeft (btreePtr, siblingNode->buffer, targetNode->buffer, index, key->keyPtr, key->recPtr, + key->recSize, newIndex, newNodeNumPtr, &recordFit, &recsRotated ); + M_ExitOnError (err); + + if ( recordFit ) + { + if ( key->replacingKey || (recsRotated > 1) || (index > 0) ) + *updateParent = true; + goto ExitThisRoutine; + } + } + } + + + //////////////////////// Try Split Right ///////////////////////////////// + + (void) ReleaseNode( btreePtr, siblingNode ); + err = SplitRight( btreePtr, targetNode, siblingNode, nodeNum, index, key->keyPtr, + key->recPtr, key->recSize, newIndex, newNodeNumPtr, &recsRotated ); + M_ExitOnError (err); + + // if we split root node - add new root + if ( ((NodeDescPtr) targetNode->buffer)->height == btreePtr->treeDepth ) + { + err = AddNewRootNode( btreePtr, targetNode->buffer, siblingNode->buffer ); // Note: does not update TPT + M_ExitOnError (err); + *rootSplit = true; + } + else + { + *insertParent = true; + + // update parent index node when replacingKey is true or when + // we inserted a new record at the beginning of our target node. + if ( key->replacingKey || ( index == 0 && *newIndex == 0 ) ) + *updateParent = true; + } + +ExitThisRoutine: + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, siblingNode); + return err; + +} // End of InsertNode + + +/*------------------------------------------------------------------------------- +Routine: DeleteTree - One_line_description. + +Function: Brief_description_of_the_function_and_any_side_effects + +ToDo: + +Input: btreePtr - description + treePathTable - description + targetNode - description + index - description + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus DeleteTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level ) +{ + OSStatus err; + BlockDescriptor parentNode; + BTNodeDescriptor *targetNodePtr; + UInt32 targetNodeNum; + Boolean deleteRequired; + Boolean updateRequired; + + + deleteRequired = false; + updateRequired = false; + + targetNodeNum = treePathTable[level].node; + targetNodePtr = targetNode->buffer; + PanicIf (targetNodePtr == nil, "\pDeleteTree: targetNode has nil buffer!"); + + DeleteRecord (btreePtr, targetNodePtr, index); + + //¥¥ coalesce remaining records? + + if ( targetNodePtr->numRecords == 0 ) // did we delete the last record? + { + BlockDescriptor siblingNode; + UInt32 siblingNodeNum; + + deleteRequired = true; + + ////////////////// Get Siblings & Update Links ////////////////////////// + + siblingNodeNum = targetNodePtr->bLink; // Left Sibling Node + if ( siblingNodeNum != 0 ) + { + err = GetNode (btreePtr, siblingNodeNum, &siblingNode); + M_ExitOnError (err); + ((NodeDescPtr)siblingNode.buffer)->fLink = targetNodePtr->fLink; + err = UpdateNode (btreePtr, &siblingNode); + M_ExitOnError (err); + } + else if ( targetNodePtr->kind == kBTLeafNode ) // update firstLeafNode + { + btreePtr->firstLeafNode = targetNodePtr->fLink; + } + + siblingNodeNum = targetNodePtr->fLink; // Right Sibling Node + if ( siblingNodeNum != 0 ) + { + err = GetNode (btreePtr, siblingNodeNum, &siblingNode); + M_ExitOnError (err); + ((NodeDescPtr)siblingNode.buffer)->bLink = targetNodePtr->bLink; + err = UpdateNode (btreePtr, &siblingNode); + M_ExitOnError (err); + } + else if ( targetNodePtr->kind == kBTLeafNode ) // update lastLeafNode + { + btreePtr->lastLeafNode = targetNodePtr->bLink; + } + + //////////////////////// Free Empty Node //////////////////////////////// + + ClearNode (btreePtr, targetNodePtr); + + err = UpdateNode (btreePtr, targetNode); + M_ExitOnError (err); + err = FreeNode (btreePtr, targetNodeNum); + M_ExitOnError (err); + } + else if ( index == 0 ) // did we delete the first record? + { + updateRequired = true; // yes, so we need to update parent + } + + + if ( level == btreePtr->treeDepth ) // then targetNode->buffer is the root node + { + deleteRequired = false; + updateRequired = false; + + if ( targetNode->buffer == nil ) // then root was freed and the btree is empty + { + btreePtr->rootNode = 0; + btreePtr->treeDepth = 0; + } + else if ( ((NodeDescPtr)targetNode->buffer)->numRecords == 1 ) + { + err = CollapseTree (btreePtr, targetNode); + M_ExitOnError (err); + } + } + + + if ( updateRequired || deleteRequired ) + { + ++level; // next level + + //// Get Parent Node and index + index = treePathTable [level].index; + err = GetNode (btreePtr, treePathTable[level].node, &parentNode); + M_ExitOnError (err); + + if ( updateRequired ) + { + KeyPtr keyPtr; + UInt8 * recPtr; + UInt16 recSize; + UInt32 insertNode; + + //¥¥Êdebug: check if ptr == targetNodeNum + GetRecordByIndex (btreePtr, parentNode.buffer, index, &keyPtr, &recPtr, &recSize); + PanicIf( (*(UInt32 *) recPtr) != targetNodeNum, "\p DeleteTree: parent ptr doesn't match targetNodeNum!!"); + + // need to delete and re-insert this parent key/ptr + DeleteRecord (btreePtr, parentNode.buffer, index); + + keyPtr = (KeyPtr) GetRecordAddress( btreePtr, targetNode->buffer, 0 ); + recPtr = (UInt8 *) &targetNodeNum; + recSize = sizeof(targetNodeNum); + + err = InsertTree (btreePtr, treePathTable, keyPtr, recPtr, recSize, + &parentNode, index, level, kReplaceRecord, &insertNode); + M_ExitOnError (err); + } + else // deleteRequired + { + err = DeleteTree (btreePtr, treePathTable, &parentNode, index, level); + M_ExitOnError (err); + } + } + + + err = UpdateNode (btreePtr, targetNode); + M_ExitOnError (err); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, targetNode); + (void) ReleaseNode (btreePtr, &parentNode); + + return err; + +} // end DeleteTree + + + +///////////////////////////////// CollapseTree ////////////////////////////////// + +static OSStatus CollapseTree (BTreeControlBlockPtr btreePtr, + BlockDescriptor *blockPtr ) +{ + OSStatus err; + UInt32 originalRoot; + UInt32 nodeNum; + + originalRoot = btreePtr->rootNode; + + while (true) + { + if ( ((NodeDescPtr)blockPtr->buffer)->numRecords > 1) + break; // this will make a fine root node + + if ( ((NodeDescPtr)blockPtr->buffer)->kind == kBTLeafNode) + break; // we've hit bottom + + nodeNum = btreePtr->rootNode; + btreePtr->rootNode = GetChildNodeNum (btreePtr, blockPtr->buffer, 0); + --btreePtr->treeDepth; + + //// Clear and Free Current Old Root Node //// + ClearNode (btreePtr, blockPtr->buffer); + err = UpdateNode (btreePtr, blockPtr); + M_ExitOnError (err); + err = FreeNode (btreePtr, nodeNum); + M_ExitOnError (err); + + //// Get New Root Node + err = GetNode (btreePtr, btreePtr->rootNode, blockPtr); + M_ExitOnError (err); + } + + if (btreePtr->rootNode != originalRoot) + M_BTreeHeaderDirty (btreePtr); + + err = UpdateNode (btreePtr, blockPtr); // always update! + M_ExitOnError (err); + + return noErr; + + +/////////////////////////////////// ErrorExit /////////////////////////////////// + +ErrorExit: + (void) ReleaseNode (btreePtr, blockPtr); + return err; +} + + + +////////////////////////////////// RotateLeft /////////////////////////////////// + +/*------------------------------------------------------------------------------- + +Routine: RotateLeft - One_line_description. + +Function: Brief_description_of_the_function_and_any_side_effects + +Algorithm: if rightIndex > insertIndex, subtract 1 for actual rightIndex + +Input: btreePtr - description + leftNode - description + rightNode - description + rightInsertIndex - description + keyPtr - description + recPtr - description + recSize - description + +Output: insertIndex + insertNodeNum - description + recordFit - description + recsRotated + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +static OSStatus RotateLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode, + UInt16 rightInsertIndex, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + UInt16 *insertIndex, + UInt32 *insertNodeNum, + Boolean *recordFit, + UInt16 *recsRotated ) +{ + OSStatus err; + SInt32 insertSize; + SInt32 nodeSize; + SInt32 leftSize, rightSize; + SInt32 moveSize; + UInt16 keyLength; + UInt16 lengthFieldSize; + UInt16 index, moveIndex; + Boolean didItFit; + + ///////////////////// Determine If Record Will Fit ////////////////////////// + + keyLength = GetKeyLength(btreePtr, keyPtr, (rightNode->kind == kBTLeafNode)); + + // the key's length field is 8-bits in HFS and 16-bits in HFS+ + if ( btreePtr->attributes & kBTBigKeysMask ) + lengthFieldSize = sizeof(UInt16); + else + lengthFieldSize = sizeof(UInt8); + + insertSize = keyLength + lengthFieldSize + recSize + sizeof(UInt16); + + if ( M_IsOdd (insertSize) ) + ++insertSize; // add pad byte; + + nodeSize = btreePtr->nodeSize; + + // add size of insert record to right node + rightSize = nodeSize - GetNodeFreeSize (btreePtr, rightNode) + insertSize; + leftSize = nodeSize - GetNodeFreeSize (btreePtr, leftNode); + + moveIndex = 0; + moveSize = 0; + + while ( leftSize < rightSize ) + { + if ( moveIndex < rightInsertIndex ) + { + moveSize = GetRecordSize (btreePtr, rightNode, moveIndex) + 2; + } + else if ( moveIndex == rightInsertIndex ) + { + moveSize = insertSize; + } + else // ( moveIndex > rightInsertIndex ) + { + moveSize = GetRecordSize (btreePtr, rightNode, moveIndex - 1) + 2; + } + + leftSize += moveSize; + rightSize -= moveSize; + ++moveIndex; + } + + if ( leftSize > nodeSize ) // undo last move + { + leftSize -= moveSize; + rightSize += moveSize; + --moveIndex; + } + + if ( rightSize > nodeSize ) // record won't fit - failure, but not error + { + *insertIndex = 0; + *insertNodeNum = 0; + *recordFit = false; + *recsRotated = 0; + + return noErr; + } + + // we've found balance point, moveIndex == number of records moved into leftNode + + + //////////////////////////// Rotate Records ///////////////////////////////// + + *recsRotated = moveIndex; + *recordFit = true; + index = 0; + + while ( index < moveIndex ) + { + if ( index == rightInsertIndex ) // insert new record in left node + { + UInt16 leftInsertIndex; + + leftInsertIndex = leftNode->numRecords; + + didItFit = InsertKeyRecord (btreePtr, leftNode, leftInsertIndex, + keyPtr, keyLength, recPtr, recSize); + if ( !didItFit ) + { + Panic ("\pRotateLeft: InsertKeyRecord (left) returned false!"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + + *insertIndex = leftInsertIndex; + *insertNodeNum = rightNode->bLink; + } + else + { + didItFit = RotateRecordLeft (btreePtr, leftNode, rightNode); + if ( !didItFit ) + { + Panic ("\pRotateLeft: RotateRecordLeft returned false!"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + } + + ++index; + } + + if ( moveIndex <= rightInsertIndex ) // then insert new record in right node + { + rightInsertIndex -= index; // adjust for records already rotated + + didItFit = InsertKeyRecord (btreePtr, rightNode, rightInsertIndex, + keyPtr, keyLength, recPtr, recSize); + if ( !didItFit ) + { + Panic ("\pRotateLeft: InsertKeyRecord (right) returned false!"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + + *insertIndex = rightInsertIndex; + *insertNodeNum = leftNode->fLink; + } + +#if DEBUG_TREEOPS + if ( DoKeyCheck( leftNode, btreePtr ) != noErr ) + { + plog( "\n%s - bad key order in left node num %d: \n", __FUNCTION__ , rightNode->bLink ); + PrintNodeDescriptor( leftNode ); + err = fsBTBadRotateErr; + goto ErrorExit; + } + if ( DoKeyCheck( rightNode, btreePtr ) != noErr ) + { + plog( "\n%s - bad key order in left node num %d: \n", __FUNCTION__ , leftNode->fLink ); + PrintNodeDescriptor( rightNode ); + err = fsBTBadRotateErr; + goto ErrorExit; + } +#endif // DEBUG_TREEOPS + + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + *insertIndex = 0; + *insertNodeNum = 0; + *recordFit = false; + *recsRotated = 0; + + return err; +} + + +#if 0 +/////////////////////////////////// SplitLeft /////////////////////////////////// + +static OSStatus SplitLeft (BTreeControlBlockPtr btreePtr, + BlockDescriptor *leftNode, + BlockDescriptor *rightNode, + UInt32 rightNodeNum, + UInt16 index, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + UInt16 *insertIndex, + UInt32 *insertNodeNum, + UInt16 *recsRotated ) +{ + OSStatus err; + NodeDescPtr left, right; + UInt32 newNodeNum; + Boolean recordFit; + + + ///////////////////////////// Compare Nodes ///////////////////////////////// + + right = rightNode->buffer; + left = leftNode->buffer; + + PanicIf ( right->bLink != 0 && left == 0, "\p SplitLeft: left sibling missing!?" ); + + //¥¥ type should be kLeafNode or kIndexNode + + if ( (right->height == 1) && (right->kind != kBTLeafNode) ) + return fsBTInvalidNodeErr; + + if ( left != nil ) + { + if ( left->fLink != rightNodeNum ) + return fsBTInvalidNodeErr; //¥¥ E_BadSibling ? + + if ( left->height != right->height ) + return fsBTInvalidNodeErr; //¥¥ E_BadNodeHeight ? + + if ( left->kind != right->kind ) + return fsBTInvalidNodeErr; //¥¥ E_BadNodeType ? + } + + + ///////////////////////////// Allocate Node ///////////////////////////////// + + err = AllocateNode (btreePtr, &newNodeNum); + M_ExitOnError (err); + + + /////////////// Update Forward Link In Original Left Node /////////////////// + + if ( left != nil ) + { + left->fLink = newNodeNum; + err = UpdateNode (btreePtr, leftNode); + M_ExitOnError (err); + } + + + /////////////////////// Initialize New Left Node //////////////////////////// + + err = GetNewNode (btreePtr, newNodeNum, leftNode); + M_ExitOnError (err); + + left = leftNode->buffer; + left->fLink = rightNodeNum; + + + // Steal Info From Right Node + + left->bLink = right->bLink; + left->kind = right->kind; + left->height = right->height; + + right->bLink = newNodeNum; // update Right bLink + + if ( (left->kind == kBTLeafNode) && (left->bLink == 0) ) + { + // if we're adding a new first leaf node - update BTreeInfoRec + + btreePtr->firstLeafNode = newNodeNum; + M_BTreeHeaderDirty (btreePtr); //¥¥ AllocateNode should have set the bit already... + } + + ////////////////////////////// Rotate Left ////////////////////////////////// + + err = RotateLeft (btreePtr, left, right, index, keyPtr, recPtr, recSize, + insertIndex, insertNodeNum, &recordFit, recsRotated); + M_ExitOnError (err); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, leftNode); + (void) ReleaseNode (btreePtr, rightNode); + + //¥¥ Free new node if allocated? + + *insertIndex = 0; + *insertNodeNum = 0; + *recsRotated = 0; + + return err; +} +#endif + + + +/////////////////////////////// RotateRecordLeft //////////////////////////////// + +static Boolean RotateRecordLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ) +{ + UInt16 size; + UInt8 * recPtr; + Boolean recordFit; + + size = GetRecordSize (btreePtr, rightNode, 0); + recPtr = GetRecordAddress (btreePtr, rightNode, 0); + + recordFit = InsertRecord (btreePtr, leftNode, leftNode->numRecords, recPtr, size); + + if ( !recordFit ) + return false; + + DeleteRecord (btreePtr, rightNode, 0); + + return true; +} + + +//////////////////////////////// AddNewRootNode ///////////////////////////////// + +static OSStatus AddNewRootNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ) +{ + OSStatus err; + BlockDescriptor rootNode; + UInt32 rootNum; + KeyPtr keyPtr; + Boolean didItFit; + UInt16 keyLength; + + PanicIf (leftNode == nil, "\pAddNewRootNode: leftNode == nil"); + PanicIf (rightNode == nil, "\pAddNewRootNode: rightNode == nil"); + + + /////////////////////// Initialize New Root Node //////////////////////////// + + err = AllocateNode (btreePtr, &rootNum); + M_ExitOnError (err); + + err = GetNewNode (btreePtr, rootNum, &rootNode); + M_ExitOnError (err); + + ((NodeDescPtr)rootNode.buffer)->kind = kBTIndexNode; + ((NodeDescPtr)rootNode.buffer)->height = ++btreePtr->treeDepth; + + + ///////////////////// Insert Left Node Index Record ///////////////////////// + + keyPtr = (KeyPtr) GetRecordAddress (btreePtr, leftNode, 0); + keyLength = GetKeyLength(btreePtr, keyPtr, false); + + didItFit = InsertKeyRecord ( btreePtr, rootNode.buffer, 0, keyPtr, keyLength, + (UInt8 *) &rightNode->bLink, 4 ); + + PanicIf ( !didItFit, "\pAddNewRootNode:InsertKeyRecord failed for left index record"); + + + //////////////////// Insert Right Node Index Record ///////////////////////// + + keyPtr = (KeyPtr) GetRecordAddress (btreePtr, rightNode, 0); + keyLength = GetKeyLength(btreePtr, keyPtr, false); + + didItFit = InsertKeyRecord ( btreePtr, rootNode.buffer, 1, keyPtr, keyLength, + (UInt8 *) &leftNode->fLink, 4 ); + + PanicIf ( !didItFit, "\pAddNewRootNode:InsertKeyRecord failed for right index record"); + + +#if DEBUG_TREEOPS + if ( DoKeyCheck( rootNode.buffer, btreePtr ) != noErr ) + { + plog( "\n%s - bad key order in root node num %d: \n", __FUNCTION__ , rootNum ); + PrintNodeDescriptor( rootNode.buffer ); + err = fsBTBadRotateErr; + goto ErrorExit; + } +#endif // DEBUG_TREEOPS + + + /////////////////////////// Release Root Node /////////////////////////////// + + err = UpdateNode (btreePtr, &rootNode); + M_ExitOnError (err); + + // update BTreeInfoRec + + btreePtr->rootNode = rootNum; + btreePtr->flags |= kBTHeaderDirty; + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + return err; +} + + +static UInt16 GetKeyLength ( const BTreeControlBlock *btreePtr, const BTreeKey *key, Boolean forLeafNode ) +{ + UInt16 length; + + if ( forLeafNode || btreePtr->attributes & kBTVariableIndexKeysMask ) + length = KeyLength (btreePtr, key); // just use actual key length + else + length = btreePtr->maxKeyLength; // fixed sized index key (i.e. HFS) //¥¥ shouldn't we clear the pad bytes? + + return length; +} + + + +/////////////////////////////////// SplitRight /////////////////////////////////// + +static OSStatus SplitRight (BTreeControlBlockPtr btreePtr, + BlockDescriptor *nodePtr, + BlockDescriptor *rightNodePtr, + UInt32 nodeNum, + UInt16 index, + KeyPtr keyPtr, + UInt8 *recPtr, + UInt16 recSize, + UInt16 *insertIndexPtr, + UInt32 *newNodeNumPtr, + UInt16 *recsRotatedPtr ) +{ + OSStatus err; + NodeDescPtr leftPtr, rightPtr; + UInt32 newNodeNum; + Boolean recordFit; + + + ///////////////////////////// Compare Nodes ///////////////////////////////// + + leftPtr = nodePtr->buffer; + + if ( leftPtr->fLink != 0 ) + { + err = GetNode( btreePtr, leftPtr->fLink, rightNodePtr ); + M_ExitOnError( err ); + } + rightPtr = rightNodePtr->buffer; + + PanicIf ( leftPtr->fLink != 0 && rightPtr == 0, "\p SplitRight: right sibling missing!?" ); + + //¥¥ type should be kLeafNode or kIndexNode + + if ( (leftPtr->height == 1) && (leftPtr->kind != kBTLeafNode) ) + return fsBTInvalidNodeErr; + + if ( rightPtr != nil ) + { + if ( rightPtr->bLink != nodeNum ) + return fsBTInvalidNodeErr; //¥¥ E_BadSibling ? + + if ( rightPtr->height != leftPtr->height ) + return fsBTInvalidNodeErr; //¥¥ E_BadNodeHeight ? + + if ( rightPtr->kind != leftPtr->kind ) + return fsBTInvalidNodeErr; //¥¥ E_BadNodeType ? + } + + + ///////////////////////////// Allocate Node ///////////////////////////////// + + err = AllocateNode (btreePtr, &newNodeNum); + M_ExitOnError (err); + + /////////////// Update backward Link In Original Right Node /////////////////// + + if ( rightPtr != nil ) + { + rightPtr->bLink = newNodeNum; + err = UpdateNode (btreePtr, rightNodePtr); + M_ExitOnError (err); + } + + /////////////////////// Initialize New Right Node //////////////////////////// + + err = GetNewNode (btreePtr, newNodeNum, rightNodePtr ); + M_ExitOnError (err); + + rightPtr = rightNodePtr->buffer; + rightPtr->bLink = nodeNum; + + + // Steal Info From Left Node + + rightPtr->fLink = leftPtr->fLink; + rightPtr->kind = leftPtr->kind; + rightPtr->height = leftPtr->height; + + leftPtr->fLink = newNodeNum; // update Left fLink + + if ( (rightPtr->kind == kBTLeafNode) && (rightPtr->fLink == 0) ) + { + // if we're adding a new last leaf node - update BTreeInfoRec + + btreePtr->lastLeafNode = newNodeNum; + M_BTreeHeaderDirty (btreePtr); //¥¥ AllocateNode should have set the bit already... + } + + ////////////////////////////// Rotate Right ////////////////////////////////// + + err = RotateRight (btreePtr, leftPtr, rightPtr, index, keyPtr, recPtr, recSize, + insertIndexPtr, newNodeNumPtr, &recordFit, recsRotatedPtr); + M_ExitOnError (err); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, nodePtr); + (void) ReleaseNode (btreePtr, rightNodePtr); + + //¥¥ Free new node if allocated? + + *insertIndexPtr = 0; + *newNodeNumPtr = 0; + *recsRotatedPtr = 0; + + return err; + +} /* SplitRight */ + + + +////////////////////////////////// RotateRight /////////////////////////////////// + +/*------------------------------------------------------------------------------- + +Routine: RotateRight - rotate half of . + +Function: Brief_description_of_the_function_and_any_side_effects + +Algorithm: if rightIndex > insertIndex, subtract 1 for actual rightIndex + +Input: btreePtr - description + leftNode - description + rightNode - description + leftInsertIndex - description + keyPtr - description + recPtr - description + recSize - description + +Output: insertIndex + insertNodeNum - description + recordFit - description + recsRotated + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +static OSStatus RotateRight (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNodePtr, + NodeDescPtr rightNodePtr, + UInt16 leftInsertIndex, + KeyPtr keyPtr, + UInt8 *recPtr, + UInt16 recSize, + UInt16 *insertIndexPtr, + UInt32 *newNodeNumPtr, + Boolean *didRecordFitPtr, + UInt16 *recsRotatedPtr ) +{ + OSStatus err; + SInt32 insertSize; + SInt32 nodeSize; + SInt32 leftSize, rightSize; + SInt32 moveSize; + UInt16 keyLength; + UInt16 lengthFieldSize; + SInt16 index, moveIndex, myInsertIndex; + Boolean didItFit; + Boolean doIncrement = false; + + ///////////////////// Determine If Record Will Fit ////////////////////////// + + keyLength = GetKeyLength( btreePtr, keyPtr, (leftNodePtr->kind == kBTLeafNode) ); + + // the key's length field is 8-bits in HFS and 16-bits in HFS+ + if ( btreePtr->attributes & kBTBigKeysMask ) + lengthFieldSize = sizeof(UInt16); + else + lengthFieldSize = sizeof(UInt8); + + /* + * A record size in a node is the size of the key, the size of the key length field, + * the size of the record, and the size of the record offset index. + */ + insertSize = keyLength + lengthFieldSize + recSize + sizeof(UInt16); + if ( M_IsOdd (insertSize) ) + ++insertSize; // add pad byte; + nodeSize = btreePtr->nodeSize; + + // add size of insert record to left node + rightSize = nodeSize - GetNodeFreeSize( btreePtr, rightNodePtr ); + leftSize = nodeSize - GetNodeFreeSize( btreePtr, leftNodePtr ) + insertSize; + + moveIndex = leftNodePtr->numRecords; // start at last record in the node + moveSize = 0; + + /* + * The goal here is to attempt to make the nodes as balanced as + * possible. We do this by "moving" records from the left node to + * the right node, until the right node is larger than the left + * node. + * + * We also need to factor in the new record for this; what we are + * trying to do, as a result, is consider a virtual node that has + * all of the old records in it, plus the new record inserted at + * the proper place. (This is the reason for the if cases in the + * loop.) + */ + while ( rightSize < leftSize ) + { + /* + * We set moveSize to the size of the record being moved in this + * pass. We need to add sizeof(UInt16) because we need to account + * for the record offset index, which is two bytes. This was already + * added to insertSize above. + */ + if (moveIndex > leftInsertIndex) + moveSize = GetRecordSize( btreePtr, leftNodePtr, moveIndex - 1) + sizeof(UInt16); + else if (moveIndex == leftInsertIndex) + moveSize = insertSize; + else // (moveIndex < leftInsertIndex) + moveSize = GetRecordSize( btreePtr, leftNodePtr, moveIndex) + sizeof(UInt16); + + leftSize -= moveSize; + rightSize += moveSize; + --moveIndex; + } + + if ( rightSize > nodeSize ) // undo last move + { + leftSize += moveSize; + rightSize -= moveSize; + ++moveIndex; + } + + if ( leftSize > nodeSize ) // record won't fit - failure, but not error + { + *insertIndexPtr = 0; + *newNodeNumPtr = 0; + *didRecordFitPtr = false; + *recsRotatedPtr = 0; + + return noErr; + } + + // we've found balance point, we rotate up to moveIndex into right node + + //////////////////////////// Rotate Records ///////////////////////////////// + + *didRecordFitPtr = true; + index = leftNodePtr->numRecords; + *recsRotatedPtr = index - moveIndex; + myInsertIndex = 0; + + // handle case where the new record is inserting after the last + // record in our left node. + if ( leftNodePtr->numRecords == leftInsertIndex ) + { + didItFit = InsertKeyRecord (btreePtr, rightNodePtr, 0, + keyPtr, keyLength, recPtr, recSize); + if ( !didItFit ) + { + if (debug) plog ("RotateRight: InsertKeyRecord (left) returned false!\n"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + + // NOTE - our insert location will slide as we insert more records + doIncrement = true; + *newNodeNumPtr = leftNodePtr->fLink; + index--; + } + + while ( index > moveIndex ) + { + if ( index == leftInsertIndex ) // insert new record in right node + { + didItFit = InsertKeyRecord (btreePtr, rightNodePtr, 0, + keyPtr, keyLength, recPtr, recSize); + if ( !didItFit ) + { + if (debug) plog ("RotateRight: InsertKeyRecord (right) returned false!\n"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + + // NOTE - our insert index will slide as we insert more records + doIncrement = true; + myInsertIndex = -1; + *newNodeNumPtr = leftNodePtr->fLink; + } + else + { + didItFit = RotateRecordRight( btreePtr, leftNodePtr, rightNodePtr ); + if ( !didItFit ) + { + if (debug) plog ("RotateRight: RotateRecordRight returned false!\n"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + } + + if ( doIncrement ) + myInsertIndex++; + --index; + } + + *insertIndexPtr = myInsertIndex; + + if ( moveIndex >= leftInsertIndex ) // then insert new record in left node + { + didItFit = InsertKeyRecord (btreePtr, leftNodePtr, leftInsertIndex, + keyPtr, keyLength, recPtr, recSize); + if ( !didItFit ) + { + if (debug) plog ("RotateRight: InsertKeyRecord (left) returned false!\n"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + + *insertIndexPtr = leftInsertIndex; + *newNodeNumPtr = rightNodePtr->bLink; + } + +#if DEBUG_TREEOPS + if ( DoKeyCheck( rightNodePtr, btreePtr ) != noErr ) + { + plog( "\n%s - bad key order in right node num %d: \n", __FUNCTION__ , leftNodePtr->fLink); + PrintNodeDescriptor( rightNodePtr ); + err = fsBTBadRotateErr; + goto ErrorExit; + } + if ( DoKeyCheck( leftNodePtr, btreePtr ) != noErr ) + { + plog( "\n%s - bad key order in left node num %d: \n", __FUNCTION__ , rightNodePtr->bLink); + PrintNodeDescriptor( leftNodePtr ); + err = fsBTBadRotateErr; + goto ErrorExit; + } + if ( DoKeyCheckAcrossNodes( leftNodePtr, rightNodePtr, btreePtr, false ) != noErr ) + { + plog( "\n%s - bad key order across nodes left %d right %d: \n", + __FUNCTION__ , rightNodePtr->bLink, leftNodePtr->fLink ); + PrintNodeDescriptor( leftNodePtr ); + PrintNodeDescriptor( rightNodePtr ); + err = fsBTBadRotateErr; + goto ErrorExit; + } +#endif // DEBUG_TREEOPS + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + *insertIndexPtr = 0; + *newNodeNumPtr = 0; + *didRecordFitPtr = false; + *recsRotatedPtr = 0; + + return err; + +} /* RotateRight */ + + + +/////////////////////////////// RotateRecordRight //////////////////////////////// + +static Boolean RotateRecordRight (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNodePtr, + NodeDescPtr rightNodePtr ) +{ + UInt16 size; + UInt8 * recPtr; + Boolean didRecordFit; + + size = GetRecordSize( btreePtr, leftNodePtr, leftNodePtr->numRecords - 1 ) ; + recPtr = GetRecordAddress( btreePtr, leftNodePtr, leftNodePtr->numRecords - 1 ) ; + + didRecordFit = InsertRecord( btreePtr, rightNodePtr, 0, recPtr, size ); + if ( !didRecordFit ) + return false; + + DeleteRecord( btreePtr, leftNodePtr, leftNodePtr->numRecords - 1 ); + + return true; + +} /* RotateRecordRight */ + + + +#if DEBUG_TREEOPS +static int DoKeyCheckAcrossNodes( NodeDescPtr theLeftNodePtr, + NodeDescPtr theRightNodePtr, + BTreeControlBlock *theTreePtr, + Boolean printKeys ) +{ + UInt16 myLeftDataSize; + UInt16 myRightDataSize; + UInt16 myRightKeyLen; + UInt16 myLeftKeyLen; + KeyPtr myLeftKeyPtr; + KeyPtr myRightKeyPtr; + UInt8 * myLeftDataPtr; + UInt8 * myRightDataPtr; + + + GetRecordByIndex( theTreePtr, theLeftNodePtr, (theLeftNodePtr->numRecords - 1), + &myLeftKeyPtr, &myLeftDataPtr, &myLeftDataSize ); + GetRecordByIndex( theTreePtr, theRightNodePtr, 0, + &myRightKeyPtr, &myRightDataPtr, &myRightDataSize ); + + if ( theTreePtr->attributes & kBTBigKeysMask ) + { + myRightKeyLen = myRightKeyPtr->length16; + myLeftKeyLen = myLeftKeyPtr->length16; + } + else + { + myRightKeyLen = myRightKeyPtr->length8; + myLeftKeyLen = myLeftKeyPtr->length8; + } + + if ( printKeys ) + { + plog( "%s - left and right keys:\n", __FUNCTION__ ); + PrintKey( (UInt8 *) myLeftKeyPtr, myLeftKeyLen ); + PrintKey( (UInt8 *) myRightKeyPtr, myRightKeyLen ); + } + + if ( CompareKeys( theTreePtr, myLeftKeyPtr, myRightKeyPtr ) >= 0 ) + return( -1 ); + + return( noErr ); + +} /* DoKeyCheckAcrossNodes */ + + +static int DoKeyCheck( NodeDescPtr nodeP, BTreeControlBlock *btcb ) +{ + SInt16 index; + UInt16 dataSize; + UInt16 keyLength; + KeyPtr keyPtr; + UInt8 *dataPtr; + KeyPtr prevkeyP = nil; + + + if ( nodeP->numRecords == 0 ) + { + if ( (nodeP->fLink == 0) && (nodeP->bLink == 0) ) + return( -1 ); + } + else + { + /* + * Loop on number of records in node + */ + for ( index = 0; index < nodeP->numRecords; index++) + { + GetRecordByIndex( (BTreeControlBlock *)btcb, nodeP, (UInt16) index, &keyPtr, &dataPtr, &dataSize ); + + if (btcb->attributes & kBTBigKeysMask) + keyLength = keyPtr->length16; + else + keyLength = keyPtr->length8; + + if ( keyLength > btcb->maxKeyLength ) + { + return( -1 ); + } + + if ( prevkeyP != nil ) + { + if ( CompareKeys( (BTreeControlBlockPtr)btcb, prevkeyP, keyPtr ) >= 0 ) + { + return( -1 ); + } + } + prevkeyP = keyPtr; + } + } + + return( noErr ); + +} /* DoKeyCheck */ + +static void PrintNodeDescriptor( NodeDescPtr thePtr ) +{ + plog( " fLink %d bLink %d kind %d height %d numRecords %d \n", + thePtr->fLink, thePtr->bLink, thePtr->kind, thePtr->height, thePtr->numRecords ); +} + + +static void PrintKey( UInt8 * myPtr, int mySize ) +{ + int i; + + for ( i = 0; i < mySize+2; i++ ) + { + plog("%02X", *(myPtr + i) ); + } + plog("\n" ); +} /* PrintKey */ + + +#endif // DEBUG_TREEOPS diff --git a/fsck_hfs/dfalib/BlockCache.c b/fsck_hfs/dfalib/BlockCache.c new file mode 100644 index 0000000..cb58f89 --- /dev/null +++ b/fsck_hfs/dfalib/BlockCache.c @@ -0,0 +1,410 @@ +/* + * Copyright (c) 2000-2003, 2005, 2007-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "SRuntime.h" +#include "Scavenger.h" +#include "../cache.h" + + + +extern Cache_t fscache; + + +static OSStatus ReadFragmentedBlock (SFCB *file, UInt32 blockNum, BlockDescriptor *block); +static OSStatus WriteFragmentedBlock( SFCB *file, + BlockDescriptor *block, + int age, + uint32_t writeOptions ); +static OSStatus ReleaseFragmentedBlock (SFCB *file, BlockDescriptor *block, int age); + + +void +InitBlockCache(SVCB *volume) +{ + volume->vcbBlockCache = (void *) &fscache; +} + + +/* + * kGetBlock + * kForceReadBlock + * kGetEmptyBlock + * kSkipEndianSwap + */ +OSStatus +GetVolumeBlock (SVCB *volume, UInt64 blockNum, GetBlockOptions options, BlockDescriptor *block) +{ + UInt32 blockSize; + SInt64 offset; + UInt16 signature; + OSStatus result; + Buf_t * buffer; + Cache_t * cache; + + buffer = NULL; + cache = (Cache_t *) volume->vcbBlockCache; + blockSize = 512; + + offset = (SInt64) ((UInt64) blockNum) << kSectorShift; + + result = CacheRead (cache, offset, blockSize, &buffer); + + if (result == 0) { + block->blockHeader = buffer; + block->buffer = buffer->Buffer; + block->blockNum = blockNum; + block->blockSize = blockSize; + block->blockReadFromDisk = 0; + block->fragmented = 0; + } else { + block->blockHeader = NULL; + block->buffer = NULL; + } + + if (!(options & kSkipEndianSwap) && (result == 0)) { + HFSMasterDirectoryBlock *mdb; + + mdb = (HFSMasterDirectoryBlock *)block->buffer; + signature = SWAP_BE16(mdb->drSigWord); + if (signature == kHFSPlusSigWord || signature == kHFSXSigWord) + SWAP_HFSPLUSVH(block->buffer); + else if (signature == kHFSSigWord) + SWAP_HFSMDB(block->buffer); + } + return (result); +} + + +/* + * kReleaseBlock + * kForceWriteBlock + * kMarkBlockDirty + * kTrashBlock + * kSkipEndianSwap + */ +OSStatus +ReleaseVolumeBlock (SVCB *volume, BlockDescriptor *block, ReleaseBlockOptions options) +{ + OSStatus result = 0; + Cache_t * cache; + Buf_t * buffer; + int age; + UInt16 signature; + + cache = (Cache_t *) volume->vcbBlockCache; + buffer = (Buf_t *) block->blockHeader; + age = ((options & kTrashBlock) != 0); + + /* + * Always leave the blocks in the cache in big endian + */ + if (!(options & kSkipEndianSwap)) { + signature = ((HFSMasterDirectoryBlock *)block->buffer)->drSigWord; + if (signature == kHFSPlusSigWord || signature == kHFSXSigWord) + SWAP_HFSPLUSVH(block->buffer); + else if (signature == kHFSSigWord) + SWAP_HFSMDB(block->buffer); + } + + if (options & (kMarkBlockDirty | kForceWriteBlock)) { + result = CacheWrite(cache, buffer, age, 0); + } else { /* not dirty */ + result = CacheRelease (cache, buffer, age); + } + return (result); +} + + +/* + * kGetBlock + * kForceReadBlock + * kGetEmptyBlock + */ +OSStatus +GetFileBlock (SFCB *file, UInt32 blockNum, GetBlockOptions options, BlockDescriptor *block) +{ + UInt64 diskBlock; + UInt32 contiguousBytes; + SInt64 offset; + + OSStatus result; + Buf_t * buffer; + Cache_t * cache; + + buffer = NULL; + block->buffer = NULL; + block->blockHeader = NULL; + cache = (Cache_t *)file->fcbVolume->vcbBlockCache; + + /* Map file block to volume block */ + result = MapFileBlockC(file->fcbVolume, file, file->fcbBlockSize, + (((UInt64)blockNum * (UInt64)file->fcbBlockSize) >> kSectorShift), + &diskBlock, &contiguousBytes); + if (result) return (result); + + if (contiguousBytes < file->fcbBlockSize) + return ( ReadFragmentedBlock(file, blockNum, block) ); + + offset = (SInt64) ((UInt64) diskBlock) << kSectorShift; + + result = CacheRead (cache, offset, file->fcbBlockSize, &buffer); + if (result) return (result); + + block->blockHeader = buffer; + block->buffer = buffer->Buffer; + block->blockNum = blockNum; + block->blockSize = file->fcbBlockSize; + block->blockReadFromDisk = 0; + block->fragmented = 0; + + return (noErr); +} + + +/* + * kReleaseBlock + * kForceWriteBlock + * kMarkBlockDirty + * kTrashBlock + */ +OSStatus +ReleaseFileBlock (SFCB *file, BlockDescriptor *block, ReleaseBlockOptions options) +{ + OSStatus result = 0; + Cache_t * cache; + Buf_t * buffer; + int age; + uint32_t writeOptions = 0; + + cache = (Cache_t *)file->fcbVolume->vcbBlockCache; + buffer = (Buf_t *) block->blockHeader; + age = ((options & kTrashBlock) != 0); + + if ( (options & kForceWriteBlock) == 0 ) + /* only write if we're forced to */ + writeOptions |= kLazyWrite; + + if (options & (kMarkBlockDirty | kForceWriteBlock)) { + if (block->fragmented) + result = WriteFragmentedBlock(file, block, age, writeOptions); + else + result = CacheWrite(cache, buffer, age, writeOptions); + } else { /* not dirty */ + + if (block->fragmented) + result = ReleaseFragmentedBlock(file, block, age); + else + result = CacheRelease (cache, buffer, age); + } + return (result); +} + + +/* + * + */ +OSStatus +SetFileBlockSize (SFCB *file, ByteCount blockSize) +{ + file->fcbBlockSize = blockSize; + + return (0); +} + + +/* + * Read a block that is fragmented across 2 or more allocation blocks + * + * - a block descriptor buffer is allocated here + * - the blockHeader field holds a list of Buf_t buffers. + * - the fragmented flag is set + */ +static OSStatus +ReadFragmentedBlock (SFCB *file, UInt32 blockNum, BlockDescriptor *block) +{ + UInt64 sector; + UInt32 fragSize, blockSize; + UInt64 fileOffset; + SInt64 diskOffset; + SVCB * volume; + int i, maxFrags; + OSStatus result; + Buf_t ** bufs; /* list of Buf_t pointers */ + Cache_t * cache; + char * buffer; + + volume = file->fcbVolume; + cache = (Cache_t *)volume->vcbBlockCache; + + blockSize = file->fcbBlockSize; + maxFrags = blockSize / volume->vcbBlockSize; + fileOffset = (UInt64)blockNum * (UInt64)blockSize; + + buffer = (char *) AllocateMemory(blockSize); + bufs = (Buf_t **) AllocateClearMemory(maxFrags * sizeof(Buf_t *)); + if (buffer == NULL || bufs == NULL) { + result = memFullErr; + return (result); + } + + block->buffer = buffer; + block->blockHeader = bufs; + block->blockNum = blockNum; + block->blockSize = blockSize; + block->blockReadFromDisk = false; + block->fragmented = true; + + for (i = 0; (i < maxFrags) && (blockSize > 0); ++i) { + result = MapFileBlockC (volume, file, blockSize, + fileOffset >> kSectorShift, + §or, &fragSize); + if (result) goto ErrorExit; + + diskOffset = (SInt64) (sector) << kSectorShift; + result = CacheRead (cache, diskOffset, fragSize, &bufs[i]); + if (result) goto ErrorExit; + + if (bufs[i]->Length != fragSize) { + plog("ReadFragmentedBlock: cache failure (Length != fragSize)\n"); + result = -1; + goto ErrorExit; + } + + CopyMemory(bufs[i]->Buffer, buffer, fragSize); + buffer += fragSize; + fileOffset += fragSize; + blockSize -= fragSize; + } + + return (noErr); + +ErrorExit: + i = 0; + while (bufs[i] != NULL) { + (void) CacheRelease (cache, bufs[i], true); + ++i; + } + + DisposeMemory(block->buffer); + DisposeMemory(block->blockHeader); + + block->blockHeader = NULL; + block->buffer = NULL; + + return (result); +} + + +/* + * Write a block that is fragmented across 2 or more allocation blocks + * + */ +static OSStatus +WriteFragmentedBlock( SFCB *file, BlockDescriptor *block, int age, uint32_t writeOptions ) +{ + Cache_t * cache; + Buf_t ** bufs; /* list of Buf_t pointers */ + char * buffer; + char * bufEnd; + UInt32 fragSize; + OSStatus result; + int i = 0; + + result = 0; + cache = (Cache_t *) file->fcbVolume->vcbBlockCache; + bufs = (Buf_t **) block->blockHeader; + buffer = (char *) block->buffer; + bufEnd = buffer + file->fcbBlockSize; + + if (bufs == NULL) { + plog("WriteFragmentedBlock: NULL bufs list!\n"); + return (-1); + } + + while ((bufs[i] != NULL) && (buffer < bufEnd)) { + fragSize = bufs[i]->Length; + + /* copy data for this fragment */ + CopyMemory(buffer, bufs[i]->Buffer, fragSize); + + /* write it back to cache */ + result = CacheWrite(cache, bufs[i], age, writeOptions); + if (result) break; + + buffer += fragSize; + ++i; + } + + DisposeMemory(block->buffer); + DisposeMemory(block->blockHeader); + + block->buffer = NULL; + block->blockHeader = NULL; + block->fragmented = false; + + return (result); +} + + +/* + * Release a block that is fragmented across 2 or more allocation blocks + * + */ +static OSStatus +ReleaseFragmentedBlock (SFCB *file, BlockDescriptor *block, int age) +{ + Cache_t * cache; + Buf_t ** bufs; /* list of Buf_t pointers */ + char *buffer; + char *bufEnd; + UInt32 fragSize; + int i = 0; + + cache = (Cache_t *)file->fcbVolume->vcbBlockCache; + bufs = (Buf_t **) block->blockHeader; + + if (bufs == NULL) { + plog("ReleaseFragmentedBlock: NULL buf list!\n"); + return (-1); + } + + buffer = (char*)block->buffer; + bufEnd = buffer + file->fcbBlockSize; + + while (bufs[i] != NULL && (buffer < bufEnd)) { + fragSize = bufs[i]->Length; + buffer += fragSize; + (void) CacheRelease (cache, bufs[i], true); + ++i; + } + + DisposeMemory(block->buffer); + DisposeMemory(block->blockHeader); + + block->buffer = NULL; + block->blockHeader = NULL; + block->fragmented = false; + + return (noErr); +} + diff --git a/fsck_hfs/dfalib/CaseFolding.h b/fsck_hfs/dfalib/CaseFolding.h new file mode 100644 index 0000000..eeb61fe --- /dev/null +++ b/fsck_hfs/dfalib/CaseFolding.h @@ -0,0 +1,478 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: CaseFolding.h + + Contains: Case folding tables for HFS filenames + + Version: HFS Plus 1.0 + + Copyright: © 1997 by Apple Computer, Inc., all rights reserved. +*/ + + +/* This lower case table consists of a 256-entry high-byte table followed + * by some number of 256-entry subtables. The high-byte table contains + * either an offset to the subtable for characters with that high byte or + * zero, which means that there are no case mappings or ignored characters + * in that block. Ignored characters are mapped to zero. + */ + +UInt16 gLowerCaseTable[] = { + + /* High-byte indices ( == 0 iff no case mapping and no ignorables ) */ + + /* 0 */ 0x0100, 0x0200, 0x0000, 0x0300, 0x0400, 0x0500, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 1 */ 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 2 */ 0x0700, 0x0800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 3 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 4 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 5 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 6 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 7 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 9 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* A */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* B */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* C */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* D */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* E */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* F */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0900, 0x0A00, + + /* Table 1 (for high byte 0x00) */ + + /* 0 */ 0xFFFF, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, + 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F, + /* 1 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, + 0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F, + /* 2 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, + 0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F, + /* 3 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, + 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F, + /* 4 */ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, + 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, + /* 5 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, + 0x0078, 0x0079, 0x007A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F, + /* 6 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, + 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, + /* 7 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, + 0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F, + /* 8 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, + 0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F, + /* 9 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, + 0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F, + /* A */ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7, + 0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF, + /* B */ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7, + 0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF, + /* C */ 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00E6, 0x00C7, + 0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF, + /* D */ 0x00F0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7, + 0x00F8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00FE, 0x00DF, + /* E */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7, + 0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF, + /* F */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7, + 0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF, + + /* Table 2 (for high byte 0x01) */ + + /* 0 */ 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107, + 0x0108, 0x0109, 0x010A, 0x010B, 0x010C, 0x010D, 0x010E, 0x010F, + /* 1 */ 0x0111, 0x0111, 0x0112, 0x0113, 0x0114, 0x0115, 0x0116, 0x0117, + 0x0118, 0x0119, 0x011A, 0x011B, 0x011C, 0x011D, 0x011E, 0x011F, + /* 2 */ 0x0120, 0x0121, 0x0122, 0x0123, 0x0124, 0x0125, 0x0127, 0x0127, + 0x0128, 0x0129, 0x012A, 0x012B, 0x012C, 0x012D, 0x012E, 0x012F, + /* 3 */ 0x0130, 0x0131, 0x0133, 0x0133, 0x0134, 0x0135, 0x0136, 0x0137, + 0x0138, 0x0139, 0x013A, 0x013B, 0x013C, 0x013D, 0x013E, 0x0140, + /* 4 */ 0x0140, 0x0142, 0x0142, 0x0143, 0x0144, 0x0145, 0x0146, 0x0147, + 0x0148, 0x0149, 0x014B, 0x014B, 0x014C, 0x014D, 0x014E, 0x014F, + /* 5 */ 0x0150, 0x0151, 0x0153, 0x0153, 0x0154, 0x0155, 0x0156, 0x0157, + 0x0158, 0x0159, 0x015A, 0x015B, 0x015C, 0x015D, 0x015E, 0x015F, + /* 6 */ 0x0160, 0x0161, 0x0162, 0x0163, 0x0164, 0x0165, 0x0167, 0x0167, + 0x0168, 0x0169, 0x016A, 0x016B, 0x016C, 0x016D, 0x016E, 0x016F, + /* 7 */ 0x0170, 0x0171, 0x0172, 0x0173, 0x0174, 0x0175, 0x0176, 0x0177, + 0x0178, 0x0179, 0x017A, 0x017B, 0x017C, 0x017D, 0x017E, 0x017F, + /* 8 */ 0x0180, 0x0253, 0x0183, 0x0183, 0x0185, 0x0185, 0x0254, 0x0188, + 0x0188, 0x0256, 0x0257, 0x018C, 0x018C, 0x018D, 0x01DD, 0x0259, + /* 9 */ 0x025B, 0x0192, 0x0192, 0x0260, 0x0263, 0x0195, 0x0269, 0x0268, + 0x0199, 0x0199, 0x019A, 0x019B, 0x026F, 0x0272, 0x019E, 0x0275, + /* A */ 0x01A0, 0x01A1, 0x01A3, 0x01A3, 0x01A5, 0x01A5, 0x01A6, 0x01A8, + 0x01A8, 0x0283, 0x01AA, 0x01AB, 0x01AD, 0x01AD, 0x0288, 0x01AF, + /* B */ 0x01B0, 0x028A, 0x028B, 0x01B4, 0x01B4, 0x01B6, 0x01B6, 0x0292, + 0x01B9, 0x01B9, 0x01BA, 0x01BB, 0x01BD, 0x01BD, 0x01BE, 0x01BF, + /* C */ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C6, 0x01C6, 0x01C6, 0x01C9, + 0x01C9, 0x01C9, 0x01CC, 0x01CC, 0x01CC, 0x01CD, 0x01CE, 0x01CF, + /* D */ 0x01D0, 0x01D1, 0x01D2, 0x01D3, 0x01D4, 0x01D5, 0x01D6, 0x01D7, + 0x01D8, 0x01D9, 0x01DA, 0x01DB, 0x01DC, 0x01DD, 0x01DE, 0x01DF, + /* E */ 0x01E0, 0x01E1, 0x01E2, 0x01E3, 0x01E5, 0x01E5, 0x01E6, 0x01E7, + 0x01E8, 0x01E9, 0x01EA, 0x01EB, 0x01EC, 0x01ED, 0x01EE, 0x01EF, + /* F */ 0x01F0, 0x01F3, 0x01F3, 0x01F3, 0x01F4, 0x01F5, 0x01F6, 0x01F7, + 0x01F8, 0x01F9, 0x01FA, 0x01FB, 0x01FC, 0x01FD, 0x01FE, 0x01FF, + + /* Table 3 (for high byte 0x03) */ + + /* 0 */ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307, + 0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F, + /* 1 */ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317, + 0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F, + /* 2 */ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327, + 0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F, + /* 3 */ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337, + 0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F, + /* 4 */ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0345, 0x0346, 0x0347, + 0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F, + /* 5 */ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357, + 0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F, + /* 6 */ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367, + 0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F, + /* 7 */ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377, + 0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F, + /* 8 */ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386, 0x0387, + 0x0388, 0x0389, 0x038A, 0x038B, 0x038C, 0x038D, 0x038E, 0x038F, + /* 9 */ 0x0390, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, + 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, + /* A */ 0x03C0, 0x03C1, 0x03A2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7, + 0x03C8, 0x03C9, 0x03AA, 0x03AB, 0x03AC, 0x03AD, 0x03AE, 0x03AF, + /* B */ 0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, + 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, + /* C */ 0x03C0, 0x03C1, 0x03C2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7, + 0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03CC, 0x03CD, 0x03CE, 0x03CF, + /* D */ 0x03D0, 0x03D1, 0x03D2, 0x03D3, 0x03D4, 0x03D5, 0x03D6, 0x03D7, + 0x03D8, 0x03D9, 0x03DA, 0x03DB, 0x03DC, 0x03DD, 0x03DE, 0x03DF, + /* E */ 0x03E0, 0x03E1, 0x03E3, 0x03E3, 0x03E5, 0x03E5, 0x03E7, 0x03E7, + 0x03E9, 0x03E9, 0x03EB, 0x03EB, 0x03ED, 0x03ED, 0x03EF, 0x03EF, + /* F */ 0x03F0, 0x03F1, 0x03F2, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7, + 0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF, + + /* Table 4 (for high byte 0x04) */ + + /* 0 */ 0x0400, 0x0401, 0x0452, 0x0403, 0x0454, 0x0455, 0x0456, 0x0407, + 0x0458, 0x0459, 0x045A, 0x045B, 0x040C, 0x040D, 0x040E, 0x045F, + /* 1 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, + 0x0438, 0x0419, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, + /* 2 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, + 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, + /* 3 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, + 0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, + /* 4 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, + 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, + /* 5 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457, + 0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F, + /* 6 */ 0x0461, 0x0461, 0x0463, 0x0463, 0x0465, 0x0465, 0x0467, 0x0467, + 0x0469, 0x0469, 0x046B, 0x046B, 0x046D, 0x046D, 0x046F, 0x046F, + /* 7 */ 0x0471, 0x0471, 0x0473, 0x0473, 0x0475, 0x0475, 0x0476, 0x0477, + 0x0479, 0x0479, 0x047B, 0x047B, 0x047D, 0x047D, 0x047F, 0x047F, + /* 8 */ 0x0481, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, + 0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048D, 0x048E, 0x048F, + /* 9 */ 0x0491, 0x0491, 0x0493, 0x0493, 0x0495, 0x0495, 0x0497, 0x0497, + 0x0499, 0x0499, 0x049B, 0x049B, 0x049D, 0x049D, 0x049F, 0x049F, + /* A */ 0x04A1, 0x04A1, 0x04A3, 0x04A3, 0x04A5, 0x04A5, 0x04A7, 0x04A7, + 0x04A9, 0x04A9, 0x04AB, 0x04AB, 0x04AD, 0x04AD, 0x04AF, 0x04AF, + /* B */ 0x04B1, 0x04B1, 0x04B3, 0x04B3, 0x04B5, 0x04B5, 0x04B7, 0x04B7, + 0x04B9, 0x04B9, 0x04BB, 0x04BB, 0x04BD, 0x04BD, 0x04BF, 0x04BF, + /* C */ 0x04C0, 0x04C1, 0x04C2, 0x04C4, 0x04C4, 0x04C5, 0x04C6, 0x04C8, + 0x04C8, 0x04C9, 0x04CA, 0x04CC, 0x04CC, 0x04CD, 0x04CE, 0x04CF, + /* D */ 0x04D0, 0x04D1, 0x04D2, 0x04D3, 0x04D4, 0x04D5, 0x04D6, 0x04D7, + 0x04D8, 0x04D9, 0x04DA, 0x04DB, 0x04DC, 0x04DD, 0x04DE, 0x04DF, + /* E */ 0x04E0, 0x04E1, 0x04E2, 0x04E3, 0x04E4, 0x04E5, 0x04E6, 0x04E7, + 0x04E8, 0x04E9, 0x04EA, 0x04EB, 0x04EC, 0x04ED, 0x04EE, 0x04EF, + /* F */ 0x04F0, 0x04F1, 0x04F2, 0x04F3, 0x04F4, 0x04F5, 0x04F6, 0x04F7, + 0x04F8, 0x04F9, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF, + + /* Table 5 (for high byte 0x05) */ + + /* 0 */ 0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507, + 0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F, + /* 1 */ 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517, + 0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F, + /* 2 */ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527, + 0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F, + /* 3 */ 0x0530, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567, + 0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F, + /* 4 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577, + 0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F, + /* 5 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0557, + 0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F, + /* 6 */ 0x0560, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567, + 0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F, + /* 7 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577, + 0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F, + /* 8 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0587, + 0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F, + /* 9 */ 0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597, + 0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F, + /* A */ 0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7, + 0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF, + /* B */ 0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7, + 0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF, + /* C */ 0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7, + 0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF, + /* D */ 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7, + 0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF, + /* E */ 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7, + 0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF, + /* F */ 0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7, + 0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF, + + /* Table 6 (for high byte 0x10) */ + + /* 0 */ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, + 0x1008, 0x1009, 0x100A, 0x100B, 0x100C, 0x100D, 0x100E, 0x100F, + /* 1 */ 0x1010, 0x1011, 0x1012, 0x1013, 0x1014, 0x1015, 0x1016, 0x1017, + 0x1018, 0x1019, 0x101A, 0x101B, 0x101C, 0x101D, 0x101E, 0x101F, + /* 2 */ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027, + 0x1028, 0x1029, 0x102A, 0x102B, 0x102C, 0x102D, 0x102E, 0x102F, + /* 3 */ 0x1030, 0x1031, 0x1032, 0x1033, 0x1034, 0x1035, 0x1036, 0x1037, + 0x1038, 0x1039, 0x103A, 0x103B, 0x103C, 0x103D, 0x103E, 0x103F, + /* 4 */ 0x1040, 0x1041, 0x1042, 0x1043, 0x1044, 0x1045, 0x1046, 0x1047, + 0x1048, 0x1049, 0x104A, 0x104B, 0x104C, 0x104D, 0x104E, 0x104F, + /* 5 */ 0x1050, 0x1051, 0x1052, 0x1053, 0x1054, 0x1055, 0x1056, 0x1057, + 0x1058, 0x1059, 0x105A, 0x105B, 0x105C, 0x105D, 0x105E, 0x105F, + /* 6 */ 0x1060, 0x1061, 0x1062, 0x1063, 0x1064, 0x1065, 0x1066, 0x1067, + 0x1068, 0x1069, 0x106A, 0x106B, 0x106C, 0x106D, 0x106E, 0x106F, + /* 7 */ 0x1070, 0x1071, 0x1072, 0x1073, 0x1074, 0x1075, 0x1076, 0x1077, + 0x1078, 0x1079, 0x107A, 0x107B, 0x107C, 0x107D, 0x107E, 0x107F, + /* 8 */ 0x1080, 0x1081, 0x1082, 0x1083, 0x1084, 0x1085, 0x1086, 0x1087, + 0x1088, 0x1089, 0x108A, 0x108B, 0x108C, 0x108D, 0x108E, 0x108F, + /* 9 */ 0x1090, 0x1091, 0x1092, 0x1093, 0x1094, 0x1095, 0x1096, 0x1097, + 0x1098, 0x1099, 0x109A, 0x109B, 0x109C, 0x109D, 0x109E, 0x109F, + /* A */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7, + 0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF, + /* B */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7, + 0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF, + /* C */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10C6, 0x10C7, + 0x10C8, 0x10C9, 0x10CA, 0x10CB, 0x10CC, 0x10CD, 0x10CE, 0x10CF, + /* D */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7, + 0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF, + /* E */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7, + 0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF, + /* F */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10F6, 0x10F7, + 0x10F8, 0x10F9, 0x10FA, 0x10FB, 0x10FC, 0x10FD, 0x10FE, 0x10FF, + + /* Table 7 (for high byte 0x20) */ + + /* 0 */ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, + 0x2008, 0x2009, 0x200A, 0x200B, 0x0000, 0x0000, 0x0000, 0x0000, + /* 1 */ 0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2016, 0x2017, + 0x2018, 0x2019, 0x201A, 0x201B, 0x201C, 0x201D, 0x201E, 0x201F, + /* 2 */ 0x2020, 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027, + 0x2028, 0x2029, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x202F, + /* 3 */ 0x2030, 0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037, + 0x2038, 0x2039, 0x203A, 0x203B, 0x203C, 0x203D, 0x203E, 0x203F, + /* 4 */ 0x2040, 0x2041, 0x2042, 0x2043, 0x2044, 0x2045, 0x2046, 0x2047, + 0x2048, 0x2049, 0x204A, 0x204B, 0x204C, 0x204D, 0x204E, 0x204F, + /* 5 */ 0x2050, 0x2051, 0x2052, 0x2053, 0x2054, 0x2055, 0x2056, 0x2057, + 0x2058, 0x2059, 0x205A, 0x205B, 0x205C, 0x205D, 0x205E, 0x205F, + /* 6 */ 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x2065, 0x2066, 0x2067, + 0x2068, 0x2069, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 7 */ 0x2070, 0x2071, 0x2072, 0x2073, 0x2074, 0x2075, 0x2076, 0x2077, + 0x2078, 0x2079, 0x207A, 0x207B, 0x207C, 0x207D, 0x207E, 0x207F, + /* 8 */ 0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087, + 0x2088, 0x2089, 0x208A, 0x208B, 0x208C, 0x208D, 0x208E, 0x208F, + /* 9 */ 0x2090, 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, 0x2097, + 0x2098, 0x2099, 0x209A, 0x209B, 0x209C, 0x209D, 0x209E, 0x209F, + /* A */ 0x20A0, 0x20A1, 0x20A2, 0x20A3, 0x20A4, 0x20A5, 0x20A6, 0x20A7, + 0x20A8, 0x20A9, 0x20AA, 0x20AB, 0x20AC, 0x20AD, 0x20AE, 0x20AF, + /* B */ 0x20B0, 0x20B1, 0x20B2, 0x20B3, 0x20B4, 0x20B5, 0x20B6, 0x20B7, + 0x20B8, 0x20B9, 0x20BA, 0x20BB, 0x20BC, 0x20BD, 0x20BE, 0x20BF, + /* C */ 0x20C0, 0x20C1, 0x20C2, 0x20C3, 0x20C4, 0x20C5, 0x20C6, 0x20C7, + 0x20C8, 0x20C9, 0x20CA, 0x20CB, 0x20CC, 0x20CD, 0x20CE, 0x20CF, + /* D */ 0x20D0, 0x20D1, 0x20D2, 0x20D3, 0x20D4, 0x20D5, 0x20D6, 0x20D7, + 0x20D8, 0x20D9, 0x20DA, 0x20DB, 0x20DC, 0x20DD, 0x20DE, 0x20DF, + /* E */ 0x20E0, 0x20E1, 0x20E2, 0x20E3, 0x20E4, 0x20E5, 0x20E6, 0x20E7, + 0x20E8, 0x20E9, 0x20EA, 0x20EB, 0x20EC, 0x20ED, 0x20EE, 0x20EF, + /* F */ 0x20F0, 0x20F1, 0x20F2, 0x20F3, 0x20F4, 0x20F5, 0x20F6, 0x20F7, + 0x20F8, 0x20F9, 0x20FA, 0x20FB, 0x20FC, 0x20FD, 0x20FE, 0x20FF, + + /* Table 8 (for high byte 0x21) */ + + /* 0 */ 0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107, + 0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F, + /* 1 */ 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117, + 0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F, + /* 2 */ 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127, + 0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F, + /* 3 */ 0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137, + 0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F, + /* 4 */ 0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147, + 0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F, + /* 5 */ 0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157, + 0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F, + /* 6 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177, + 0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F, + /* 7 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177, + 0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F, + /* 8 */ 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187, + 0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F, + /* 9 */ 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197, + 0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F, + /* A */ 0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7, + 0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF, + /* B */ 0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7, + 0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF, + /* C */ 0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7, + 0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF, + /* D */ 0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7, + 0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF, + /* E */ 0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7, + 0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF, + /* F */ 0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7, + 0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF, + + /* Table 9 (for high byte 0xFE) */ + + /* 0 */ 0xFE00, 0xFE01, 0xFE02, 0xFE03, 0xFE04, 0xFE05, 0xFE06, 0xFE07, + 0xFE08, 0xFE09, 0xFE0A, 0xFE0B, 0xFE0C, 0xFE0D, 0xFE0E, 0xFE0F, + /* 1 */ 0xFE10, 0xFE11, 0xFE12, 0xFE13, 0xFE14, 0xFE15, 0xFE16, 0xFE17, + 0xFE18, 0xFE19, 0xFE1A, 0xFE1B, 0xFE1C, 0xFE1D, 0xFE1E, 0xFE1F, + /* 2 */ 0xFE20, 0xFE21, 0xFE22, 0xFE23, 0xFE24, 0xFE25, 0xFE26, 0xFE27, + 0xFE28, 0xFE29, 0xFE2A, 0xFE2B, 0xFE2C, 0xFE2D, 0xFE2E, 0xFE2F, + /* 3 */ 0xFE30, 0xFE31, 0xFE32, 0xFE33, 0xFE34, 0xFE35, 0xFE36, 0xFE37, + 0xFE38, 0xFE39, 0xFE3A, 0xFE3B, 0xFE3C, 0xFE3D, 0xFE3E, 0xFE3F, + /* 4 */ 0xFE40, 0xFE41, 0xFE42, 0xFE43, 0xFE44, 0xFE45, 0xFE46, 0xFE47, + 0xFE48, 0xFE49, 0xFE4A, 0xFE4B, 0xFE4C, 0xFE4D, 0xFE4E, 0xFE4F, + /* 5 */ 0xFE50, 0xFE51, 0xFE52, 0xFE53, 0xFE54, 0xFE55, 0xFE56, 0xFE57, + 0xFE58, 0xFE59, 0xFE5A, 0xFE5B, 0xFE5C, 0xFE5D, 0xFE5E, 0xFE5F, + /* 6 */ 0xFE60, 0xFE61, 0xFE62, 0xFE63, 0xFE64, 0xFE65, 0xFE66, 0xFE67, + 0xFE68, 0xFE69, 0xFE6A, 0xFE6B, 0xFE6C, 0xFE6D, 0xFE6E, 0xFE6F, + /* 7 */ 0xFE70, 0xFE71, 0xFE72, 0xFE73, 0xFE74, 0xFE75, 0xFE76, 0xFE77, + 0xFE78, 0xFE79, 0xFE7A, 0xFE7B, 0xFE7C, 0xFE7D, 0xFE7E, 0xFE7F, + /* 8 */ 0xFE80, 0xFE81, 0xFE82, 0xFE83, 0xFE84, 0xFE85, 0xFE86, 0xFE87, + 0xFE88, 0xFE89, 0xFE8A, 0xFE8B, 0xFE8C, 0xFE8D, 0xFE8E, 0xFE8F, + /* 9 */ 0xFE90, 0xFE91, 0xFE92, 0xFE93, 0xFE94, 0xFE95, 0xFE96, 0xFE97, + 0xFE98, 0xFE99, 0xFE9A, 0xFE9B, 0xFE9C, 0xFE9D, 0xFE9E, 0xFE9F, + /* A */ 0xFEA0, 0xFEA1, 0xFEA2, 0xFEA3, 0xFEA4, 0xFEA5, 0xFEA6, 0xFEA7, + 0xFEA8, 0xFEA9, 0xFEAA, 0xFEAB, 0xFEAC, 0xFEAD, 0xFEAE, 0xFEAF, + /* B */ 0xFEB0, 0xFEB1, 0xFEB2, 0xFEB3, 0xFEB4, 0xFEB5, 0xFEB6, 0xFEB7, + 0xFEB8, 0xFEB9, 0xFEBA, 0xFEBB, 0xFEBC, 0xFEBD, 0xFEBE, 0xFEBF, + /* C */ 0xFEC0, 0xFEC1, 0xFEC2, 0xFEC3, 0xFEC4, 0xFEC5, 0xFEC6, 0xFEC7, + 0xFEC8, 0xFEC9, 0xFECA, 0xFECB, 0xFECC, 0xFECD, 0xFECE, 0xFECF, + /* D */ 0xFED0, 0xFED1, 0xFED2, 0xFED3, 0xFED4, 0xFED5, 0xFED6, 0xFED7, + 0xFED8, 0xFED9, 0xFEDA, 0xFEDB, 0xFEDC, 0xFEDD, 0xFEDE, 0xFEDF, + /* E */ 0xFEE0, 0xFEE1, 0xFEE2, 0xFEE3, 0xFEE4, 0xFEE5, 0xFEE6, 0xFEE7, + 0xFEE8, 0xFEE9, 0xFEEA, 0xFEEB, 0xFEEC, 0xFEED, 0xFEEE, 0xFEEF, + /* F */ 0xFEF0, 0xFEF1, 0xFEF2, 0xFEF3, 0xFEF4, 0xFEF5, 0xFEF6, 0xFEF7, + 0xFEF8, 0xFEF9, 0xFEFA, 0xFEFB, 0xFEFC, 0xFEFD, 0xFEFE, 0x0000, + + /* Table 10 (for high byte 0xFF) */ + + /* 0 */ 0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07, + 0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F, + /* 1 */ 0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17, + 0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F, + /* 2 */ 0xFF20, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47, + 0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F, + /* 3 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57, + 0xFF58, 0xFF59, 0xFF5A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F, + /* 4 */ 0xFF40, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47, + 0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F, + /* 5 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57, + 0xFF58, 0xFF59, 0xFF5A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F, + /* 6 */ 0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67, + 0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F, + /* 7 */ 0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77, + 0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F, + /* 8 */ 0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87, + 0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F, + /* 9 */ 0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97, + 0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F, + /* A */ 0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7, + 0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF, + /* B */ 0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7, + 0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF, + /* C */ 0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7, + 0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF, + /* D */ 0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7, + 0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF, + /* E */ 0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7, + 0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF, + /* F */ 0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7, + 0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF +}; + + +/* RelString case folding table */ + +unsigned short gCompareTable[] = { + + /* 0 */ 0x0000, 0x0100, 0x0200, 0x0300, 0x0400, 0x0500, 0x0600, 0x0700, + 0x0800, 0x0900, 0x0A00, 0x0B00, 0x0C00, 0x0D00, 0x0E00, 0x0F00, + /* 1 */ 0x1000, 0x1100, 0x1200, 0x1300, 0x1400, 0x1500, 0x1600, 0x1700, + 0x1800, 0x1900, 0x1A00, 0x1B00, 0x1C00, 0x1D00, 0x1E00, 0x1F00, + /* 2 */ 0x2000, 0x2100, 0x2200, 0x2300, 0x2400, 0x2500, 0x2600, 0x2700, + 0x2800, 0x2900, 0x2A00, 0x2B00, 0x2C00, 0x2D00, 0x2E00, 0x2F00, + /* 3 */ 0x3000, 0x3100, 0x3200, 0x3300, 0x3400, 0x3500, 0x3600, 0x3700, + 0x3800, 0x3900, 0x3A00, 0x3B00, 0x3C00, 0x3D00, 0x3E00, 0x3F00, + /* 4 */ 0x4000, 0x4100, 0x4200, 0x4300, 0x4400, 0x4500, 0x4600, 0x4700, + 0x4800, 0x4900, 0x4A00, 0x4B00, 0x4C00, 0x4D00, 0x4E00, 0x4F00, + /* 5 */ 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, 0x5500, 0x5600, 0x5700, + 0x5800, 0x5900, 0x5A00, 0x5B00, 0x5C00, 0x5D00, 0x5E00, 0x5F00, + + /* + * 0x60 maps to 'a' + * range 0x61 to 0x7a ('a' to 'z') map to upper case + */ + + /* 6 */ 0x4180, 0x4100, 0x4200, 0x4300, 0x4400, 0x4500, 0x4600, 0x4700, + 0x4800, 0x4900, 0x4A00, 0x4B00, 0x4C00, 0x4D00, 0x4E00, 0x4F00, + /* 7 */ 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, 0x5500, 0x5600, 0x5700, + 0x5800, 0x5900, 0x5A00, 0x7B00, 0x7C00, 0x7D00, 0x7E00, 0x7F00, + + /* range 0x80 to 0xd8 gets mapped... */ + + /* 8 */ 0x4108, 0x410C, 0x4310, 0x4502, 0x4E0A, 0x4F08, 0x5508, 0x4182, + 0x4104, 0x4186, 0x4108, 0x410A, 0x410C, 0x4310, 0x4502, 0x4584, + /* 9 */ 0x4586, 0x4588, 0x4982, 0x4984, 0x4986, 0x4988, 0x4E0A, 0x4F82, + 0x4F84, 0x4F86, 0x4F08, 0x4F0A, 0x5582, 0x5584, 0x5586, 0x5508, + /* A */ 0xA000, 0xA100, 0xA200, 0xA300, 0xA400, 0xA500, 0xA600, 0x5382, + 0xA800, 0xA900, 0xAA00, 0xAB00, 0xAC00, 0xAD00, 0x4114, 0x4F0E, + /* B */ 0xB000, 0xB100, 0xB200, 0xB300, 0xB400, 0xB500, 0xB600, 0xB700, + 0xB800, 0xB900, 0xBA00, 0x4192, 0x4F92, 0xBD00, 0x4114, 0x4F0E, + /* C */ 0xC000, 0xC100, 0xC200, 0xC300, 0xC400, 0xC500, 0xC600, 0x2206, + 0x2208, 0xC900, 0x2000, 0x4104, 0x410A, 0x4F0A, 0x4F14, 0x4F14, + /* D */ 0xD000, 0xD100, 0x2202, 0x2204, 0x2702, 0x2704, 0xD600, 0xD700, + 0x5988, 0xD900, 0xDA00, 0xDB00, 0xDC00, 0xDD00, 0xDE00, 0xDF00, + + /* E */ 0xE000, 0xE100, 0xE200, 0xE300, 0xE400, 0xE500, 0xE600, 0xE700, + 0xE800, 0xE900, 0xEA00, 0xEB00, 0xEC00, 0xED00, 0xEE00, 0xEF00, + /* F */ 0xF000, 0xF100, 0xF200, 0xF300, 0xF400, 0xF500, 0xF600, 0xF700, + 0xF800, 0xF900, 0xFA00, 0xFB00, 0xFC00, 0xFD00, 0xFE00, 0xFF00 + +}; + diff --git a/fsck_hfs/dfalib/CatalogCheck.c b/fsck_hfs/dfalib/CatalogCheck.c new file mode 100644 index 0000000..ae0c983 --- /dev/null +++ b/fsck_hfs/dfalib/CatalogCheck.c @@ -0,0 +1,2118 @@ +/* + * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "Scavenger.h" +#include "DecompDataEnums.h" +#include "DecompData.h" + +#include <sys/stat.h> + +extern int RcdFCntErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID); +extern int RcdHsFldCntErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID); + +/* + * information collected when visiting catalog records + */ +struct CatalogIterationSummary { + UInt32 parentID; + UInt32 rootDirCount; /* hfs only */ + UInt32 rootFileCount; /* hfs only */ + UInt32 dirCount; + UInt32 dirThreads; + UInt32 fileCount; + UInt32 filesWithThreads; /* hfs only */ + UInt32 fileThreads; + UInt32 nextCNID; + UInt64 encodings; + void * hardLinkRef; +}; + +/* Globals used during Catalog record checks */ +struct CatalogIterationSummary gCIS; + +SGlobPtr gScavGlobals; + +/* Local routines for checking catalog structures */ +static int CheckCatalogRecord(SGlobPtr GPtr, const HFSPlusCatalogKey *key, + const CatalogRecord *rec, UInt16 reclen); +static int CheckCatalogRecord_HFS(const HFSCatalogKey *key, + const CatalogRecord *rec, UInt16 reclen); + +static int CheckDirectory(const HFSPlusCatalogKey * key, const HFSPlusCatalogFolder * dir); +static int CheckFile(const HFSPlusCatalogKey * key, const HFSPlusCatalogFile * file); +static int CheckThread(const HFSPlusCatalogKey * key, const HFSPlusCatalogThread * thread); + +static int CheckDirectory_HFS(const HFSCatalogKey * key, const HFSCatalogFolder * dir); +static int CheckFile_HFS(const HFSCatalogKey * key, const HFSCatalogFile * file); +static int CheckThread_HFS(const HFSCatalogKey * key, const HFSCatalogThread * thread); + +static void CheckBSDInfo(const HFSPlusCatalogKey * key, const HFSPlusBSDInfo * bsdInfo, int isdir); +static int CheckCatalogName(u_int16_t charCount, const u_int16_t *uniChars, + u_int32_t parentID, Boolean thread); +static int CheckCatalogName_HFS(u_int16_t charCount, const u_char *filename, + u_int32_t parentID, Boolean thread); + +static int CaptureMissingThread(UInt32 threadID, const HFSPlusCatalogKey *nextKey); +static OSErr UniqueDotName( SGlobPtr GPtr, + CatalogName * theNewNamePtr, + UInt32 theParID, + Boolean isSingleDotName, + Boolean isHFSPlus ); +static Boolean FixDecomps( u_int16_t charCount, const u_int16_t *inFilename, HFSUniStr255 *outFilename ); + +/* + * This structure is used to keep track of the folderCount field in + * HFSPlusCatalogFolder records. For now, this is only done on HFSX volumes. + */ +struct folderCountInfo { + UInt32 folderID; + UInt32 recordedCount; + UInt32 computedCount; + struct folderCountInfo *next; +}; + +/* + * Print a symbolic link name given the fileid + */ +static void +printSymLinkName(SGlobPtr GPtr, UInt32 fid) +{ + char pathname[PATH_MAX+1], filename[PATH_MAX+1]; + unsigned int path_len = sizeof(pathname), fname_len = sizeof(filename); + u_int16_t status; + + if (GetFileNamePathByID(GPtr, fid, pathname, &path_len, filename, &fname_len, &status) == 0) { + fsckPrint(GPtr->context, E_BadSymLinkName, pathname); + } + return; +} + +/* + * CountFolderRecords - Counts the number of folder records contained within a + * given folder. That is, how many direct subdirectories it has. This is used + * to update the folderCount field, if necessary. + * + * CountFolderRecords is a straight-forward iteration: given a HFSPlusCatalogFolder + * record, it iterates through the catalog BTree until it runs out of records that + * belong to it. For each folder record it finds, it increments a count. When it's + * done, it compares the two, and if there is a mismatch, requests a repair to be + * done. + */ +static OSErr +CountFolderRecords(HFSPlusCatalogKey *myKey, HFSPlusCatalogFolder *folder, SGlobPtr GPtr) +{ + SFCB *fcb = GPtr->calculatedCatalogFCB; + OSErr err = 0; + BTreeIterator iterator; + FSBufferDescriptor btRecord; + union { + HFSPlusCatalogFolder catRecord; + HFSPlusCatalogFile catFile; + } catRecord; + HFSPlusCatalogKey *key; + UInt16 recordSize = 0; + UInt32 folderCount = 0; + + ClearMemory(&iterator, sizeof(iterator)); + + key = (HFSPlusCatalogKey*)&iterator.key; + BuildCatalogKey(folder->folderID, NULL, true, (CatalogKey*)key); + btRecord.bufferAddress = &catRecord; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(catRecord); + + for (err = BTSearchRecord(fcb, &iterator, kNoHint, &btRecord, &recordSize, &iterator); + err == 0; + err = BTIterateRecord(fcb, kBTreeNextRecord, &iterator, &btRecord, &recordSize)) { + switch (catRecord.catRecord.recordType) { + case kHFSPlusFolderThreadRecord: + case kHFSPlusFileThreadRecord: + continue; + } + if (key->parentID != folder->folderID) + break; + if (catRecord.catRecord.recordType == kHFSPlusFolderRecord) { + folderCount++; + } else if ((catRecord.catRecord.recordType == kHFSPlusFileRecord) && + (catRecord.catFile.flags & kHFSHasLinkChainMask) && + (catRecord.catFile.userInfo.fdType == kHFSAliasType) && + (catRecord.catFile.userInfo.fdCreator == kHFSAliasCreator) && + (key->parentID != GPtr->filelink_priv_dir_id)) { + /* A directory hard link is treated as normal directory + * for calculation of folder count. + */ + folderCount++; + } + } + if (err == btNotFound) + err = 0; + if (err == 0) { + if (folderCount != folder->folderCount) { + err = RcdFCntErr( GPtr, + E_FldCount, + folderCount, + folder->folderCount, + folder->folderID); + } + } + return err; +} + +static void +releaseFolderCountInfo(struct folderCountInfo *fcip, int numFolders) +{ + int i; + + for (i = 0; i < numFolders; i++) { + struct folderCountInfo *f = &fcip[i]; + + f = f->next; + while (f) { + struct folderCountInfo *t = f->next; + free(f); + f = t; + } + } + free(fcip); +} + +static struct folderCountInfo * +findFolderEntry(struct folderCountInfo *fcip, int numFolders, UInt32 fid) +{ + struct folderCountInfo *retval = NULL; + int indx; + + indx = fid % numFolders; // Slot index + + retval = &fcip[indx]; + if (retval->folderID == fid) { + goto done; + } + while (retval->next != NULL) { + retval = retval->next; + if (retval->folderID == fid) + goto done; + } + retval = NULL; +done: + return retval; +} + +static struct folderCountInfo * +addFolderEntry(struct folderCountInfo *fcip, int numFolders, UInt32 fid) +{ + struct folderCountInfo *retval = NULL; + int indx; + + indx = fid % numFolders; + retval = &fcip[indx]; + + if (retval->folderID == fid) + goto done; + while (retval->folderID != 0) { + if (retval->next == NULL) { + retval->next = calloc(1, sizeof(struct folderCountInfo)); + if (retval->next == NULL) { + retval = NULL; + goto done; + } else + retval = retval->next; + } else if (retval->folderID == fid) { + goto done; + } else + retval = retval->next; + } + + retval->folderID = fid; + +done: + return retval; +} + +/* + * folderCountAdd - Accounts for given folder record or directory hard link + * for folder count of the given parent directory. For directory hard links, + * the folder ID and count should be zero. For a folder record, the values + * read from the catalog record are provided which are used to add the + * given folderID to the cache (folderCountInfo *ficp). + */ +static int +folderCountAdd(struct folderCountInfo *fcip, int numFolders, UInt32 parentID, UInt32 folderID, UInt32 count) +{ + int retval = 0; + struct folderCountInfo *curp = NULL; + + + /* Only add directories represented by folder record to the cache */ + if (folderID != 0) { + /* + * We track two things here. + * First, we need to find the entry matching this folderID. If we don't find it, + * we add it. If we do find it, or if we add it, we set the recordedCount. + */ + + curp = findFolderEntry(fcip, numFolders, folderID); + if (curp == NULL) { + curp = addFolderEntry(fcip, numFolders, folderID); + if (curp == NULL) { + retval = ENOMEM; + goto done; + } + } + curp->recordedCount = count; + + } + + /* + * After that, we try to find the parent to this entry. When we find it + * (or if we add it to the list), we increment the computedCount. + */ + curp = findFolderEntry(fcip, numFolders, parentID); + if (curp == NULL) { + curp = addFolderEntry(fcip, numFolders, parentID); + if (curp == NULL) { + retval = ENOMEM; + goto done; + } + } + curp->computedCount++; + +done: + return retval; +} + +/* + * CheckFolderCount - Verify the folderCount fields of the HFSPlusCatalogFolder records + * in the catalog BTree. This is currently only done for HFSX. + * + * Conceptually, this is a fairly simple routine: simply iterate through the catalog + * BTree, and count the number of subfolders contained in each folder. This value + * is used for the stat.st_nlink field, on HFSX. + * + * However, since scanning the entire catalog can be a very costly operation, we dot + * it one of two ways. The first way is to simply iterate through the catalog once, + * and keep track of each folder ID we come across. This uses a fair bit of memory, + * so we limit the cache to 5MBytes, which works out to some 400k folderCountInfo + * entries (at the current size of three 4-byte entries per folderCountInfo entry). + * If the filesystem has more than that, we instead use the slower (but significantly + * less memory-intensive) method in CountFolderRecords: for each folder ID we + * come across, we call CountFolderRecords, which does its own iteration through the + * catalog, looking for children of the given folder. + */ + +OSErr +CheckFolderCount( SGlobPtr GPtr ) +{ + OSErr err = 0; + int numFolders; + BTreeIterator iterator; + FSBufferDescriptor btRecord; + HFSPlusCatalogKey *key; + union { + HFSPlusCatalogFolder catRecord; + HFSPlusCatalogFile catFile; + } catRecord; + UInt16 recordSize = 0; + struct folderCountInfo *fcip = NULL; + + ClearMemory(&iterator, sizeof(iterator)); + if (!VolumeObjectIsHFSX(GPtr)) { + goto done; + } + + if (GPtr->calculatedVCB == NULL) { + err = EINVAL; + goto done; + } + +#if 0 + /* + * We add two so we can account for the root folder, and + * the root folder's parent. Neither of which is real, + * but they show up as parent IDs in the catalog. + */ + numFolders = GPtr->calculatedVCB->vcbFolderCount + 2; +#else + /* + * Since we're using a slightly smarter hash method, + * we don't care so much about the number of folders + * allegedly on the volume; instead, we'll pick a nice + * prime number to use as the number of buckets. + * This bears some performance checking later. + */ + numFolders = 257; +#endif + + /* + * Limit the size of the folder count cache to 5Mbytes; + * if the requested number of folders don't fit, then + * we don't use the cache at all. + */ +#define MAXCACHEMEM (5 * 1024 * 1024) /* 5Mbytes */ +#define LCALLOC(c, s, l) \ + ({ __typeof(c) _count = (c); __typeof(s) _size = (s); __typeof(l) _lim = (l); \ + ((_count * _size) > _lim) ? NULL : calloc(_count, _size); }) + + fcip = LCALLOC(numFolders, sizeof(*fcip), MAXCACHEMEM); +#undef MAXCACHEMEM +#undef LCALLOC + +restart: + /* these objects are used by the BT* functions to iterate through the catalog */ + key = (HFSPlusCatalogKey*)&iterator.key; + BuildCatalogKey(kHFSRootFolderID, NULL, true, (CatalogKey*)key); + btRecord.bufferAddress = &catRecord; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(catRecord); + + /* + * Iterate through the catalog BTree until the end. + * For each folder we either cache the value, or we call CheckFolderCount. + * We also check the kHFSHasFolderCountMask flag in the folder flags field; + * if it's not set, we set it. (When migrating a volume from an older version. + * this will affect every folder entry; after that, it will only affect any + * corrupted areas.) + */ + for (err = BTIterateRecord(GPtr->calculatedCatalogFCB, kBTreeFirstRecord, + &iterator, &btRecord, &recordSize); + err == 0; + err = BTIterateRecord(GPtr->calculatedCatalogFCB, kBTreeNextRecord, + &iterator, &btRecord, &recordSize)) { + + switch (catRecord.catRecord.recordType) { + case kHFSPlusFolderRecord: + if (!(catRecord.catRecord.flags & kHFSHasFolderCountMask)) { + /* RcdHsFldCntErr requests a repair order to fix up the flags field */ + err = RcdHsFldCntErr( GPtr, + E_HsFldCount, + catRecord.catRecord.flags | kHFSHasFolderCountMask, + catRecord.catRecord.flags, + catRecord.catRecord.folderID ); + if (err != 0) + goto done; + } + if (fcip) { + if (folderCountAdd(fcip, numFolders, + key->parentID, + catRecord.catRecord.folderID, + catRecord.catRecord.folderCount)) { + /* + * We got an error -- this only happens if folderCountAdd() + * cannot allocate memory for a new node. In that case, we + * need to bail on the whole cache, and use the slow method. + * This also lets us release the memory, which will hopefully + * let some later allocations succeed. We restart just after + * the cache was allocated, and start over as if we had never + * allocated a cache in the first place. + */ + releaseFolderCountInfo(fcip, numFolders); + fcip = NULL; + goto restart; + } + } else { + err = CountFolderRecords(key, &catRecord.catRecord, GPtr); + if (err != 0) + goto done; + } + break; + case kHFSPlusFileRecord: + /* If this file record is a directory hard link, count + * it towards our folder count calculations. + */ + if ((catRecord.catFile.flags & kHFSHasLinkChainMask) && + (catRecord.catFile.userInfo.fdType == kHFSAliasType) && + (catRecord.catFile.userInfo.fdCreator == kHFSAliasCreator) && + (key->parentID != GPtr->filelink_priv_dir_id)) { + /* If we are using folder count cache, account + * for directory hard links by incrementing + * associated parentID in the cache. If an + * extensive search for catalog is being + * performed, account for directory hard links + * in CountFolderRecords() + */ + if (fcip) { + if (folderCountAdd(fcip, numFolders, + key->parentID, 0, 0)) { + /* See above for why we release & restart */ + releaseFolderCountInfo(fcip, numFolders); + fcip = NULL; + goto restart; + } + } + } + break; + } + } + + if (err == btNotFound) + err = 0; // We hit the end of the file, which is okay + if (err == 0 && fcip != NULL) { + int i; + + /* + * At this point, we are itereating through the cache, looking for + * mis-counts. (If we're not using the cache, then CountFolderRecords has + * already dealt with any miscounts.) + */ + for (i = 0; i < numFolders; i++) { + struct folderCountInfo *curp; + + for (curp = &fcip[i]; curp; curp = curp->next) { + if (curp->folderID == 0) { + // fplog(stderr, "fcip[%d] has a folderID of 0?\n", i); + } else if (curp->folderID == kHFSRootParentID) { + // Root's parent doesn't really exist + continue; + } else { + if (curp->recordedCount != curp->computedCount) { + /* RcdFCntErr requests a repair order to correct the folder count */ + err = RcdFCntErr( GPtr, + E_FldCount, + curp->computedCount, + curp->recordedCount, + curp->folderID ); + if (err != 0) + goto done; + } + } + } + } + } +done: + if (fcip) { + releaseFolderCountInfo(fcip, numFolders); + fcip = NULL; + } + return err; +} + +/* + * CheckCatalogBTree - Verifies the catalog B-tree structure + * + * Causes CheckCatalogRecord to be called for every leaf record + */ +OSErr +CheckCatalogBTree( SGlobPtr GPtr ) +{ + OSErr err; + int hfsplus; + + gScavGlobals = GPtr; + hfsplus = VolumeObjectIsHFSPlus( ); + + ClearMemory(&gCIS, sizeof(gCIS)); + gCIS.parentID = kHFSRootParentID; + gCIS.nextCNID = kHFSFirstUserCatalogNodeID; + + if (hfsplus) { + /* Initialize check for file hard links */ + HardLinkCheckBegin(gScavGlobals, &gCIS.hardLinkRef); + + /* Initialize check for directory hard links */ + dirhardlink_init(gScavGlobals); + } + + GPtr->journal_file_id = GPtr->jib_file_id = 0; + + if (CheckIfJournaled(GPtr, true)) { + CatalogName fname; + CatalogKey key; + CatalogRecord rec; + UInt16 recSize; + int i; + +#define HFS_JOURNAL_FILE ".journal" +#define HFS_JOURNAL_INFO ".journal_info_block" + + fname.ustr.length = strlen(HFS_JOURNAL_FILE); + for (i = 0; i < fname.ustr.length; i++) + fname.ustr.unicode[i] = HFS_JOURNAL_FILE[i]; + BuildCatalogKey(kHFSRootFolderID, &fname, true, &key); + if (SearchBTreeRecord(GPtr->calculatedCatalogFCB, &key, kNoHint, NULL, &rec, &recSize, NULL) == noErr && + rec.recordType == kHFSPlusFileRecord) { + GPtr->journal_file_id = rec.hfsPlusFile.fileID; + } + fname.ustr.length = strlen(HFS_JOURNAL_INFO); + for (i = 0; i < fname.ustr.length; i++) + fname.ustr.unicode[i] = HFS_JOURNAL_INFO[i]; + BuildCatalogKey(kHFSRootFolderID, &fname, true, &key); + if (SearchBTreeRecord(GPtr->calculatedCatalogFCB, &key, kNoHint, NULL, &rec, &recSize, NULL) == noErr && + rec.recordType == kHFSPlusFileRecord) { + GPtr->jib_file_id = rec.hfsPlusFile.fileID; + } + } + + /* for compatibility, init these globals */ + gScavGlobals->TarID = kHFSCatalogFileID; + GetVolumeObjectBlockNum( &gScavGlobals->TarBlock ); + + /* + * Check out the BTree structure + */ + err = BTCheck(gScavGlobals, kCalculatedCatalogRefNum, (CheckLeafRecordProcPtr)CheckCatalogRecord); + if (err) goto exit; + + if (gCIS.dirCount != gCIS.dirThreads) { + RcdError(gScavGlobals, E_IncorrectNumThdRcd); + gScavGlobals->CBTStat |= S_Orphan; /* a directory record is missing */ + if (fsckGetVerbosity(gScavGlobals->context) >= kDebugLog) { + plog ("\t%s: dirCount = %u, dirThread = %u\n", __FUNCTION__, gCIS.dirCount, gCIS.dirThreads); + } + } + + if (hfsplus && (gCIS.fileCount != gCIS.fileThreads)) { + RcdError(gScavGlobals, E_IncorrectNumThdRcd); + gScavGlobals->CBTStat |= S_Orphan; + if (fsckGetVerbosity(gScavGlobals->context) >= kDebugLog) { + plog ("\t%s: fileCount = %u, fileThread = %u\n", __FUNCTION__, gCIS.fileCount, gCIS.fileThreads); + } + } + + if (!hfsplus && (gCIS.fileThreads != gCIS.filesWithThreads)) { + RcdError(gScavGlobals, E_IncorrectNumThdRcd); + gScavGlobals->CBTStat |= S_Orphan; + if (fsckGetVerbosity(gScavGlobals->context) >= kDebugLog) { + plog ("\t%s: fileThreads = %u, filesWithThread = %u\n", __FUNCTION__, gCIS.fileThreads, gCIS.filesWithThreads); + } + } + + gScavGlobals->calculatedVCB->vcbEncodingsBitmap = gCIS.encodings; + gScavGlobals->calculatedVCB->vcbNextCatalogID = gCIS.nextCNID; + gScavGlobals->calculatedVCB->vcbFolderCount = gCIS.dirCount - 1; + gScavGlobals->calculatedVCB->vcbFileCount = gCIS.fileCount; + if (!hfsplus) { + gScavGlobals->calculatedVCB->vcbNmRtDirs = gCIS.rootDirCount; + gScavGlobals->calculatedVCB->vcbNmFls = gCIS.rootFileCount; + } + + /* + * Check out the allocation map structure + */ + err = BTMapChk(gScavGlobals, kCalculatedCatalogRefNum); + if (err) goto exit; + + /* + * Make sure unused nodes in the B-tree are zero filled. + */ + err = BTCheckUnusedNodes(gScavGlobals, kCalculatedCatalogRefNum, &gScavGlobals->CBTStat); + if (err) goto exit; + + /* + * Compare BTree header record on disk with scavenger's BTree header record + */ + err = CmpBTH(gScavGlobals, kCalculatedCatalogRefNum); + if (err) goto exit; + + /* + * Compare BTree map on disk with scavenger's BTree map + */ + err = CmpBTM(gScavGlobals, kCalculatedCatalogRefNum); + + if (hfsplus) { + if (scanflag == 0) { + (void) CheckHardLinks(gCIS.hardLinkRef); + + /* If any unrepairable corruption was detected for file + * hard links, stop the verification process by returning + * negative value. + */ + if (gScavGlobals->CatStat & S_LinkErrNoRepair) { + err = -1; + goto exit; + } + } + } + + exit: + if (hfsplus) + HardLinkCheckEnd(gCIS.hardLinkRef); + + return (err); +} + +/* + * CheckCatalogRecord - verify a catalog record + * + * Called in leaf-order for every leaf record in the Catalog B-tree + */ +static int +CheckCatalogRecord(SGlobPtr GPtr, const HFSPlusCatalogKey *key, const CatalogRecord *rec, UInt16 reclen) +{ + int result = 0; + Boolean isHFSPlus; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + ++gScavGlobals->itemsProcessed; + + if (!isHFSPlus) + return CheckCatalogRecord_HFS((HFSCatalogKey *)key, rec, reclen); + + gScavGlobals->CNType = rec->recordType; + + switch (rec->recordType) { + case kHFSPlusFolderRecord: + ++gCIS.dirCount; + if (reclen != sizeof(HFSPlusCatalogFolder)){ + RcdError(gScavGlobals, E_LenDir); + result = E_LenDir; + break; + } + if (key->parentID != gCIS.parentID) { + result = CaptureMissingThread(key->parentID, key); + if (result) break; + /* Pretend thread record was there */ + ++gCIS.dirThreads; + gCIS.parentID = key->parentID; + } + result = CheckDirectory(key, (HFSPlusCatalogFolder *)rec); + break; + + case kHFSPlusFileRecord: + ++gCIS.fileCount; + if (reclen != sizeof(HFSPlusCatalogFile)){ + RcdError(gScavGlobals, E_LenFil); + result = E_LenFil; + break; + } + if (key->parentID != gCIS.parentID) { + result = CaptureMissingThread(key->parentID, key); + if (result) break; + /* Pretend thread record was there */ + ++gCIS.dirThreads; + gCIS.parentID = key->parentID; + } + result = CheckFile(key, (HFSPlusCatalogFile *)rec); + break; + + case kHFSPlusFolderThreadRecord: + ++gCIS.dirThreads; + gCIS.parentID = key->parentID; + /* Fall through */ + + case kHFSPlusFileThreadRecord: + if (rec->recordType == kHFSPlusFileThreadRecord) + ++gCIS.fileThreads; + + if (reclen > sizeof(HFSPlusCatalogThread) || + reclen < sizeof(HFSPlusCatalogThread) - sizeof(HFSUniStr255)) { + RcdError(gScavGlobals, E_LenThd); + result = E_LenThd; + break; + } else if (reclen == sizeof(HFSPlusCatalogThread)) { + gScavGlobals->VeryMinorErrorsStat |= S_BloatedThreadRecordFound; + } + result = CheckThread(key, (HFSPlusCatalogThread *)rec); + break; + + default: + RcdError(gScavGlobals, E_CatRec); + result = E_CatRec; + } + + return (result); +} + +/* + * CheckCatalogRecord_HFS - verify an HFS catalog record + * + * Called in leaf-order for every leaf record in the Catalog B-tree + */ +static int +CheckCatalogRecord_HFS(const HFSCatalogKey *key, const CatalogRecord *rec, UInt16 reclen) +{ + int result = 0; + + gScavGlobals->CNType = rec->recordType; + + switch (rec->recordType) { + case kHFSFolderRecord: + ++gCIS.dirCount; + if (key->parentID == kHFSRootFolderID ) + ++gCIS.rootDirCount; + if (reclen != sizeof(HFSCatalogFolder)){ + RcdError(gScavGlobals, E_LenDir); + result = E_LenDir; + break; + } + if (key->parentID != gCIS.parentID) { + result = CaptureMissingThread(key->parentID, (HFSPlusCatalogKey *)key); + if (result) break; + /* Pretend thread record was there */ + ++gCIS.dirThreads; + gCIS.parentID = key->parentID; + } + result = CheckDirectory_HFS(key, (HFSCatalogFolder *)rec); + break; + + case kHFSFileRecord: + ++gCIS.fileCount; + if (key->parentID == kHFSRootFolderID ) + ++gCIS.rootFileCount; + if (reclen != sizeof(HFSCatalogFile)){ + RcdError(gScavGlobals, E_LenFil); + result = E_LenFil; + break; + } + if (key->parentID != gCIS.parentID) { + result = CaptureMissingThread(key->parentID, (HFSPlusCatalogKey *)key); + if (result) break; + /* Pretend thread record was there */ + ++gCIS.dirThreads; + gCIS.parentID = key->parentID; + } + result = CheckFile_HFS(key, (HFSCatalogFile *)rec); + break; + + case kHFSFolderThreadRecord: + ++gCIS.dirThreads; + gCIS.parentID = key->parentID; + /* Fall through */ + case kHFSFileThreadRecord: + if (rec->recordType == kHFSFileThreadRecord) + ++gCIS.fileThreads; + + if (reclen != sizeof(HFSCatalogThread)) { + RcdError(gScavGlobals, E_LenThd); + result = E_LenThd; + break; + } + result = CheckThread_HFS(key, (HFSCatalogThread *)rec); + break; + + + default: + RcdError(gScavGlobals, E_CatRec); + result = E_CatRec; + } + + return (result); +} + +/* + * CheckDirectory - verify a catalog directory record + * + * Also collects info for later processing. + * Called in leaf-order for every directory record in the Catalog B-tree + */ +static int +CheckDirectory(const HFSPlusCatalogKey * key, const HFSPlusCatalogFolder * dir) +{ + UInt32 dirID; + int result = 0; + + dirID = dir->folderID; + + /* Directory cannot have these two flags set */ + if ((dir->flags & (kHFSFileLockedMask | kHFSThreadExistsMask)) != 0) { + RcdError(gScavGlobals, E_CatalogFlagsNotZero); + gScavGlobals->CBTStat |= S_ReservedNotZero; + } + + RecordXAttrBits(gScavGlobals, dir->flags, dir->folderID, kCalculatedCatalogRefNum); +#if DEBUG_XATTR + plog ("%s: Record folderID=%d for prime modulus calculations\n", __FUNCTION__, dir->folderID); +#endif + + if (dirID < kHFSFirstUserCatalogNodeID && + dirID != kHFSRootFolderID) { + RcdError(gScavGlobals, E_InvalidID); + return (E_InvalidID); + } + if (dirID >= gCIS.nextCNID ) + gCIS.nextCNID = dirID + 1; + + gCIS.encodings |= (u_int64_t)(1ULL << MapEncodingToIndex(dir->textEncoding & 0x7F)); + + CheckBSDInfo(key, &dir->bsdInfo, true); + + CheckCatalogName(key->nodeName.length, &key->nodeName.unicode[0], key->parentID, false); + + /* Keep track of the directory inodes found */ + if (dir->flags & kHFSHasLinkChainMask) { + gScavGlobals->calculated_dirinodes++; + } + + return (result); +} + +/* + * CheckFile - verify a HFS+ catalog file record + * - sanity check values + * - collect info for later processing + * + * Called in leaf-order for every file record in the Catalog B-tree + */ +static int +CheckFile(const HFSPlusCatalogKey * key, const HFSPlusCatalogFile * file) +{ + UInt32 fileID; + UInt32 blocks; + UInt64 bytes; + int result = 0; + int islink = 0; + int isjrnl = 0; + size_t len; + unsigned char filename[256 * 3]; + + (void) utf_encodestr(key->nodeName.unicode, + key->nodeName.length * 2, + filename, &len, sizeof(filename)); + filename[len] = '\0'; + + RecordXAttrBits(gScavGlobals, file->flags, file->fileID, kCalculatedCatalogRefNum); +#if DEBUG_XATTR + plog ("%s: Record fileID=%d for prime modulus calculations\n", __FUNCTION__, file->fileID); +#endif + + fileID = file->fileID; + if (fileID < kHFSFirstUserCatalogNodeID) { + RcdError(gScavGlobals, E_InvalidID); + result = E_InvalidID; + return (result); + } + if (fileID >= gCIS.nextCNID ) + gCIS.nextCNID = fileID + 1; + + gCIS.encodings |= (u_int64_t)(1ULL << MapEncodingToIndex(file->textEncoding & 0x7F)); + + CheckBSDInfo(key, &file->bsdInfo, false); + + /* check out data fork extent info */ + result = CheckFileExtents(gScavGlobals, file->fileID, kDataFork, NULL, + file->dataFork.extents, &blocks); + if (result != noErr) + return (result); + + if (file->dataFork.totalBlocks != blocks) { + result = RecordBadAllocation(key->parentID, filename, kDataFork, + file->dataFork.totalBlocks, blocks); + if (result) + return (result); + } else { + bytes = (UInt64)blocks * (UInt64)gScavGlobals->calculatedVCB->vcbBlockSize; + if (file->dataFork.logicalSize > bytes) { + result = RecordTruncation(key->parentID, filename, kDataFork, + file->dataFork.logicalSize, bytes); + if (result) + return (result); + } + } + /* check out resource fork extent info */ + result = CheckFileExtents(gScavGlobals, file->fileID, kRsrcFork, NULL, + file->resourceFork.extents, &blocks); + if (result != noErr) + return (result); + + if (file->resourceFork.totalBlocks != blocks) { + result = RecordBadAllocation(key->parentID, filename, kRsrcFork, + file->resourceFork.totalBlocks, blocks); + if (result) + return (result); + } else { + bytes = (UInt64)blocks * (UInt64)gScavGlobals->calculatedVCB->vcbBlockSize; + if (file->resourceFork.logicalSize > bytes) { + result = RecordTruncation(key->parentID, filename, kRsrcFork, + file->resourceFork.logicalSize, bytes); + if (result) + return (result); + } + } + + /* Collect indirect link info for later */ + if (file->userInfo.fdType == kHardLinkFileType && + file->userInfo.fdCreator == kHFSPlusCreator) { + islink = 1; + CaptureHardLink(gCIS.hardLinkRef, file); + } + + CheckCatalogName(key->nodeName.length, &key->nodeName.unicode[0], key->parentID, false); + + /* Keep track of the directory hard links found */ + if ((file->flags & kHFSHasLinkChainMask) && + ((file->userInfo.fdType == kHFSAliasType) || + (file->userInfo.fdCreator == kHFSAliasCreator)) && + (key->parentID != gScavGlobals->filelink_priv_dir_id && + key->parentID != gScavGlobals->dirlink_priv_dir_id)) { + gScavGlobals->calculated_dirlinks++; + islink = 1; + } + + /* For non-journaled filesystems, the cached journal file IDs will be 0 */ + if (file->fileID && + (file->fileID == gScavGlobals->journal_file_id || + file->fileID == gScavGlobals->jib_file_id)) { + isjrnl = 1; + } + + if (islink == 0) { + if (file->flags & kHFSHasLinkChainMask && + (gScavGlobals->filelink_priv_dir_id != key->parentID && + gScavGlobals->dirlink_priv_dir_id != key->parentID)) { + RepairOrderPtr p; + fsckPrint(gScavGlobals->context, E_LinkChainNonLink, file->fileID); + p = AllocMinorRepairOrder(gScavGlobals, 0); + if (p) { + p->type = E_LinkChainNonLink; + p->correct = 0; + p->incorrect = 0; + p->parid = file->fileID; + p->hint = 0; + } else { + result = memFullErr; + } + gScavGlobals->CatStat |= S_LinkErrRepair; + } + + if (((file->bsdInfo.fileMode & S_IFMT) == S_IFREG) && + gScavGlobals->filelink_priv_dir_id != key->parentID && + file->bsdInfo.special.linkCount > 1 && + isjrnl == 0) { + RepairOrderPtr p; + char badstr[16]; + fsckPrint(gScavGlobals->context, E_FileLinkCountError, file->fileID); + snprintf(badstr, sizeof(badstr), "%u", file->bsdInfo.special.linkCount); + fsckPrint(gScavGlobals->context, E_BadValue, "1", badstr); + + p = AllocMinorRepairOrder(gScavGlobals, 0); + if (p) { + p->type = E_FileLinkCountError; + p->correct = 1; + p->incorrect = file->bsdInfo.special.linkCount; + p->parid = file->fileID; + p->hint = 0; + } else { + result = memFullErr; + } + gScavGlobals->CatStat |= S_LinkErrRepair; + } + /* + * Check for symlinks. + * Currently, d_check_slink is 0x1000, so -D 0x1000 on the command line. + */ + if ((cur_debug_level & d_check_slink) != 0) { + if (((file->bsdInfo.fileMode & S_IFMT) == S_IFLNK) || + file->userInfo.fdType == kSymLinkFileType || + file->userInfo.fdCreator == kSymLinkCreator) { + // Okay, it claims to be a symlink, at least somehow. + // Check all the info + if (((file->bsdInfo.fileMode & S_IFMT) != S_IFLNK) || + file->userInfo.fdType != kSymLinkFileType || + file->userInfo.fdCreator != kSymLinkCreator) { + fsckPrint(gScavGlobals->context, E_BadSymLink, file->fileID); + // Should find a way to print out the path, no? + } + if (file->dataFork.logicalSize > PATH_MAX) { + fsckPrint(gScavGlobals->context, E_BadSymLinkLength, file->fileID, (unsigned int)file->dataFork.logicalSize, (unsigned int)PATH_MAX); + printSymLinkName(gScavGlobals, file->fileID); + } else { + /* + * Reading is hard. + * It's made easier by PATH_MAX being so small, so we can assume + * (for now) that the file is entirely in the 8 extents in the catalog + * record. (In most cases, it'll be only one extent; in the worst + * case, it will only be 2, at least until PATH_MAX is increased.) + */ + uint8_t *dataBuffer = malloc(file->dataFork.totalBlocks * gScavGlobals->calculatedVCB->vcbBlockSize + 1); + + if (dataBuffer == NULL) { + plog("Unable to allocate %llu bytes for reading symlink", file->dataFork.logicalSize); + } else { + char *curPtr = (char*)dataBuffer; + size_t nread = 0; + size_t dataLen; + HFSPlusExtentDescriptor *ep = (HFSPlusExtentDescriptor*)&file->dataFork.extents[0]; + + while (nread < file->dataFork.logicalSize) { + Buf_t *bufp; + int rv = CacheRead(&fscache, ep->startBlock * (off_t)gScavGlobals->calculatedVCB->vcbBlockSize, ep->blockCount * gScavGlobals->calculatedVCB->vcbBlockSize, &bufp); + if (rv) { + abort(); // do something better + } + memcpy(curPtr, bufp->Buffer, bufp->Length); + curPtr += bufp->Length; + nread += bufp->Length; + CacheRelease(&fscache, bufp, 0); + } + dataLen = strnlen((char*)dataBuffer, file->dataFork.totalBlocks * gScavGlobals->calculatedVCB->vcbBlockSize); + if (dataLen != file->dataFork.logicalSize) { + fsckPrint(gScavGlobals->context, E_BadSymLinkLength, file->fileID, (unsigned int)dataLen, (unsigned int)file->dataFork.logicalSize); + printSymLinkName(gScavGlobals, file->fileID); + if (debug) + plog("Symlink for file id %u has bad data length\n", file->fileID); + } + free(dataBuffer); + } + } + } + } + } + if (islink == 1 && file->dataFork.totalBlocks != 0) { + fsckPrint(gScavGlobals->context, E_LinkHasData, file->fileID); + gScavGlobals->CatStat |= S_LinkErrNoRepair; + } + + return (result); +} + +/* + * CheckThread - verify a catalog thread + * + * Called in leaf-order for every thread record in the Catalog B-tree + */ +static int +CheckThread(const HFSPlusCatalogKey * key, const HFSPlusCatalogThread * thread) +{ + int result = 0; + + if (key->nodeName.length != 0) { + RcdError(gScavGlobals, E_ThdKey); + return (E_ThdKey); + } + + result = CheckCatalogName(thread->nodeName.length, &thread->nodeName.unicode[0], + thread->parentID, true); + if (result != noErr) { + RcdError(gScavGlobals, E_ThdCN); + return (E_ThdCN); + } + + if (key->parentID < kHFSFirstUserCatalogNodeID && + key->parentID != kHFSRootParentID && + key->parentID != kHFSRootFolderID) { + RcdError(gScavGlobals, E_InvalidID); + return (E_InvalidID); + } + + if (thread->parentID == kHFSRootParentID) { + if (key->parentID != kHFSRootFolderID) { + RcdError(gScavGlobals, E_InvalidID); + return (E_InvalidID); + } + } else if (thread->parentID < kHFSFirstUserCatalogNodeID && + thread->parentID != kHFSRootFolderID) { + RcdError(gScavGlobals, E_InvalidID); + return (E_InvalidID); + } + + return (0); +} + +/* + * CheckDirectory - verify an HFS catalog directory record + * + * Also collects info for later processing. + * Called in leaf-order for every directory record in the Catalog B-tree + */ +static int +CheckDirectory_HFS(const HFSCatalogKey * key, const HFSCatalogFolder * dir) +{ + UInt32 dirID; + int result = 0; + + dirID = dir->folderID; + + /* Directory cannot have these two flags set */ + if ((dir->flags & (kHFSFileLockedMask | kHFSThreadExistsMask)) != 0) { + RcdError(gScavGlobals, E_CatalogFlagsNotZero); + gScavGlobals->CBTStat |= S_ReservedNotZero; + } + + if (dirID < kHFSFirstUserCatalogNodeID && + dirID != kHFSRootFolderID) { + RcdError(gScavGlobals, E_InvalidID); + return (E_InvalidID); + } + if (dirID >= gCIS.nextCNID ) + gCIS.nextCNID = dirID + 1; + + CheckCatalogName_HFS(key->nodeName[0], &key->nodeName[1], key->parentID, false); + + return (result); +} + +/* + * CheckFile_HFS - verify a HFS catalog file record + * - sanity check values + * - collect info for later processing + * + * Called in b-tree leaf order for every HFS file + * record in the Catalog B-tree. + */ +static int +CheckFile_HFS(const HFSCatalogKey * key, const HFSCatalogFile * file) +{ + UInt32 fileID; + UInt32 blocks; + char idstr[20]; + int result = 0; + + if (file->flags & kHFSThreadExistsMask) + ++gCIS.filesWithThreads; + + /* 3843017 : Check for reserved field removed to support new bits in future */ + if ((file->dataStartBlock) || + (file->rsrcStartBlock) || + (file->reserved)) + { + RcdError(gScavGlobals, E_CatalogFlagsNotZero); + gScavGlobals->CBTStat |= S_ReservedNotZero; + } + + fileID = file->fileID; + if (fileID < kHFSFirstUserCatalogNodeID) { + RcdError(gScavGlobals, E_InvalidID); + result = E_InvalidID; + return (result); + } + if (fileID >= gCIS.nextCNID ) + gCIS.nextCNID = fileID + 1; + + /* check out data fork extent info */ + result = CheckFileExtents(gScavGlobals, file->fileID, kDataFork, NULL, + file->dataExtents, &blocks); + if (result != noErr) + return (result); + if (file->dataPhysicalSize > ((UInt64)blocks * (UInt64)gScavGlobals->calculatedVCB->vcbBlockSize)) { + snprintf (idstr, sizeof(idstr), "id=%u", fileID); + fsckPrint(gScavGlobals->context, E_PEOF, idstr); + return (noErr); /* we don't fix this, ignore the error */ + } + if (file->dataLogicalSize > file->dataPhysicalSize) { + snprintf (idstr, sizeof(idstr), "id=%u", fileID); + fsckPrint(gScavGlobals->context, E_LEOF, idstr); + return (noErr); /* we don't fix this, ignore the error */ + } + + /* check out resource fork extent info */ + result = CheckFileExtents(gScavGlobals, file->fileID, kRsrcFork, NULL, + file->rsrcExtents, &blocks); + if (result != noErr) + return (result); + if (file->rsrcPhysicalSize > ((UInt64)blocks * (UInt64)gScavGlobals->calculatedVCB->vcbBlockSize)) { + snprintf (idstr, sizeof(idstr), "id=%u", fileID); + fsckPrint(gScavGlobals->context, E_PEOF, idstr); + return (noErr); /* we don't fix this, ignore the error */ + } + if (file->rsrcLogicalSize > file->rsrcPhysicalSize) { + snprintf (idstr, sizeof(idstr), "id=%u", fileID); + fsckPrint(gScavGlobals->context, E_LEOF, idstr); + return (noErr); /* we don't fix this, ignore the error */ + } +#if 1 + /* Keeping handle in globals of file ID's for HFS volume only */ + if (PtrAndHand(&file->fileID, (Handle)gScavGlobals->validFilesList, sizeof(UInt32) ) ) + return (R_NoMem); +#endif + CheckCatalogName_HFS(key->nodeName[0], &key->nodeName[1], key->parentID, false); + + return (result); +} + +/* + * CheckThread - verify a catalog thread + * + * Called in leaf-order for every thread record in the Catalog B-tree + */ +static int +CheckThread_HFS(const HFSCatalogKey * key, const HFSCatalogThread * thread) +{ + int result = 0; + + if (key->nodeName[0] != 0) { + RcdError(gScavGlobals, E_ThdKey); + return (E_ThdKey); + } + + result = CheckCatalogName_HFS(thread->nodeName[0], &thread->nodeName[1], + thread->parentID, true); + if (result != noErr) { + RcdError(gScavGlobals, E_ThdCN); + return (E_ThdCN); + } + + if (key->parentID < kHFSFirstUserCatalogNodeID && + key->parentID != kHFSRootParentID && + key->parentID != kHFSRootFolderID) { + RcdError(gScavGlobals, E_InvalidID); + return (E_InvalidID); + } + + if (thread->parentID == kHFSRootParentID) { + if (key->parentID != kHFSRootFolderID) { + RcdError(gScavGlobals, E_InvalidID); + return (E_InvalidID); + } + } else if (thread->parentID < kHFSFirstUserCatalogNodeID && + thread->parentID != kHFSRootFolderID) { + RcdError(gScavGlobals, E_InvalidID); + return (E_InvalidID); + } + + return (0); +} + + +/* File types from BSD Mode */ +#define FT_MASK 0170000 /* Mask of file type. */ +#define FT_FIFO 0010000 /* Named pipe (fifo). */ +#define FT_CHR 0020000 /* Character device. */ +#define FT_DIR 0040000 /* Directory file. */ +#define FT_BLK 0060000 /* Block device. */ +#define FT_REG 0100000 /* Regular file. */ +#define FT_LNK 0120000 /* Symbolic link. */ +#define FT_SOCK 0140000 /* BSD domain socket. */ + +/* + * CheckBSDInfo - Check BSD Permissions data + * (HFS Plus volumes only) + * + * if repairable then log the error and create a repair order + */ +static void +CheckBSDInfo(const HFSPlusCatalogKey * key, const HFSPlusBSDInfo * bsdInfo, int isdir) +{ + + Boolean reset = false; + + /* skip uninitialized BSD info */ + if (bsdInfo->fileMode == 0) + return; + + switch (bsdInfo->fileMode & FT_MASK) { + case FT_DIR: + if (!isdir) + reset = true; + break; + case FT_REG: + case FT_BLK: + case FT_CHR: + case FT_LNK: + case FT_SOCK: + case FT_FIFO: + if (isdir) + reset = true; + break; + default: + reset = true; + } + + if (reset) { + RepairOrderPtr p; + int n; + + gScavGlobals->TarBlock = bsdInfo->fileMode & FT_MASK; + RcdError(gScavGlobals, E_InvalidPermissions); + + n = CatalogNameSize( (CatalogName *) &key->nodeName, true ); + + p = AllocMinorRepairOrder(gScavGlobals, n); + if (p == NULL) return; + + CopyCatalogName((const CatalogName *)&key->nodeName, + (CatalogName*)&p->name, true); + + p->type = E_InvalidPermissions; + p->correct = 0; + p->incorrect = bsdInfo->fileMode; + p->parid = key->parentID; + p->hint = 0; + + gScavGlobals->CatStat |= S_Permissions; + } +} + +/* + * Validate a Unicode filename for HFS+ volumes + * + * check character count + * check for illegal names + * + * if repairable then log the error and create a repair order + */ +static int +CheckCatalogName(u_int16_t charCount, const u_int16_t *uniChars, u_int32_t parentID, Boolean thread) +{ + OSErr result; + u_int16_t * myPtr; + RepairOrderPtr roPtr; + int myLength; + CatalogName newName; + + if ((charCount == 0) || (charCount > kHFSPlusMaxFileNameChars)) + return( E_CName ); + + // only do the remaining checks for files or directories + if ( thread ) + return( noErr ); + + // look for objects with illegal names of "." or "..". We only do this for + // file or folder catalog records (the thread records will be taken care of + // in the repair routines). + if ( charCount < 3 && *uniChars == 0x2E ) + { + if ( charCount == 1 || (charCount == 2 && *(uniChars + 1) == 0x2E) ) + { + fsckPrint(gScavGlobals->context, E_IllegalName); + if ( fsckGetVerbosity(gScavGlobals->context) >= kDebugLog ) { + plog( "\tillegal name is 0x" ); + PrintName( charCount, (UInt8 *) uniChars, true ); + } + + // get a new name to use when we rename the file system object + result = UniqueDotName( gScavGlobals, &newName, parentID, + ((charCount == 1) ? true : false), true ); + if ( result != noErr ) + return( noErr ); + + // we will copy the old and new names to our RepairOrder. The names will + // look like this: + // 2 byte length of old name + // unicode characters for old name + // 2 byte length of new name + // unicode characters for new name + myLength = (charCount + 1) * 2; // bytes needed for old name + myLength += ((newName.ustr.length + 1) * 2); // bytes needed for new name + + roPtr = AllocMinorRepairOrder( gScavGlobals, myLength ); + if ( roPtr == NULL ) + return( noErr ); + + myPtr = (u_int16_t *) &roPtr->name; + *myPtr++ = charCount; // copy in length of old name and bump past it + CopyMemory( uniChars, myPtr, (charCount * 2) ); // copy in old name + myPtr += charCount; // bump past old name + *myPtr++ = newName.ustr.length; // copy in length of new name and bump past it + CopyMemory( newName.ustr.unicode, myPtr, (newName.ustr.length * 2) ); // copy in new name + if ( fsckGetVerbosity(gScavGlobals->context) >= kDebugLog ) { + plog( "\treplacement name is 0x" ); + PrintName( newName.ustr.length, (UInt8 *) &newName.ustr.unicode, true ); + } + + roPtr->type = E_IllegalName; + roPtr->parid = parentID; + gScavGlobals->CatStat |= S_IllName; + return( E_IllegalName ); + } + } + + // look for Unicode decomposition errors in file system object names created before Jaguar (10.2) + if ( FixDecomps( charCount, uniChars, &newName.ustr ) ) + { + fsckPrint(gScavGlobals->context, E_IllegalName); + if ( fsckGetVerbosity(gScavGlobals->context) >= kDebugLog ) { + plog( "\tillegal name is 0x" ); + PrintName( charCount, (UInt8 *) uniChars, true ); + } + + // we will copy the old and new names to our RepairOrder. The names will + // look like this: + // 2 byte length of old name + // unicode characters for old name + // 2 byte length of new name + // unicode characters for new name + myLength = (charCount + 1) * 2; // bytes needed for old name + myLength += ((newName.ustr.length + 1) * 2); // bytes needed for new name + + roPtr = AllocMinorRepairOrder( gScavGlobals, myLength ); + if ( roPtr == NULL ) + return( noErr ); + + myPtr = (u_int16_t *) &roPtr->name; + *myPtr++ = charCount; // copy in length of old name and bump past it + CopyMemory( uniChars, myPtr, (charCount * 2) ); // copy in old name + myPtr += charCount; // bump past old name + *myPtr++ = newName.ustr.length; // copy in length of new name and bump past it + CopyMemory( newName.ustr.unicode, myPtr, (newName.ustr.length * 2) ); // copy in new name + if ( fsckGetVerbosity(gScavGlobals->context) >= kDebugLog ) { + plog( "\treplacement name is 0x" ); + PrintName( newName.ustr.length, (UInt8 *) &newName.ustr.unicode, true ); + } + + roPtr->type = E_IllegalName; + roPtr->parid = parentID; + gScavGlobals->CatStat |= S_IllName; + return( E_IllegalName ); + } + + return( noErr ); +} + + +/* + * Validate an HFS filename + * + * check character count + * check for illegal names + * + * if repairable then log the error and create a repair order + */ +static int +CheckCatalogName_HFS(u_int16_t charCount, const u_char *filename, u_int32_t parentID, Boolean thread) +{ + u_char * myPtr; + RepairOrderPtr roPtr; + int myLength; + CatalogName newName; + + if ((charCount == 0) || (charCount > kHFSMaxFileNameChars)) + return( E_CName ); + + // only do the remaining checks for files or directories + if ( thread ) + return( noErr ); + + // look for objects with illegal names of "." or "..". We only do this for + // file or folder catalog records (the thread records will be taken care of + // in the repair routines). + if ( charCount < 3 && *filename == 0x2E ) + { + if ( charCount == 1 || (charCount == 2 && *(filename + 1) == 0x2E) ) + { + OSErr result; + fsckPrint(gScavGlobals->context, E_IllegalName); + if ( fsckGetVerbosity(gScavGlobals->context) >= kDebugLog ) { + plog( "\tillegal name is 0x" ); + PrintName( charCount, filename, false ); + } + + // get a new name to use when we rename the file system object + result = UniqueDotName( gScavGlobals, &newName, parentID, + ((charCount == 1) ? true : false), false ); + if ( result != noErr ) + return( noErr ); + + // we will copy the old and new names to our RepairOrder. The names will + // look like this: + // 1 byte length of old name + // characters for old name + // 1 byte length of new name + // characters for new name + myLength = charCount + 1; // bytes needed for old name + myLength += (newName.pstr[0] + 1); // bytes needed for new name + roPtr = AllocMinorRepairOrder( gScavGlobals, myLength ); + if ( roPtr == NULL ) + return( noErr ); + + myPtr = (u_char *)&roPtr->name[0]; + *myPtr++ = charCount; // copy in length of old name and bump past it + CopyMemory( filename, myPtr, charCount ); + myPtr += charCount; // bump past old name + *myPtr++ = newName.pstr[0]; // copy in length of new name and bump past it + CopyMemory( &newName.pstr[1], myPtr, newName.pstr[0] ); // copy in new name + if ( fsckGetVerbosity(gScavGlobals->context) >= kDebugLog ) { + plog( "\treplacement name is 0x" ); + PrintName( newName.pstr[0], &newName.pstr[1], false ); + } + + roPtr->type = E_IllegalName; + roPtr->parid = parentID; + gScavGlobals->CatStat |= S_IllName; + return( E_IllegalName ); + } + } + + return( noErr ); +} + + +/*------------------------------------------------------------------------------ +UniqueDotName: figure out a unique name we can use to rename a file system +object that has the illegal name of "." or ".." +------------------------------------------------------------------------------*/ +static OSErr +UniqueDotName( SGlobPtr GPtr, + CatalogName * theNewNamePtr, + UInt32 theParID, + Boolean isSingleDotName, + Boolean isHFSPlus ) +{ + u_char newChar; + OSErr result; + size_t nameLen; + UInt16 recSize; + SFCB * fcbPtr; + u_char * myPtr; + CatalogRecord record; + CatalogKey catKey; + u_char dotName[] = {'d', 'o', 't', 'd', 'o', 't', 0x0d, 0x00}; + + fcbPtr = GPtr->calculatedCatalogFCB; + + // create key with new name + if ( isSingleDotName ) + myPtr = &dotName[3]; + else + myPtr = &dotName[0]; + + nameLen = strlen((char *) myPtr ); + if ( isHFSPlus ) + { + int i; + theNewNamePtr->ustr.length = nameLen; + for ( i = 0; i < theNewNamePtr->ustr.length; i++ ) + theNewNamePtr->ustr.unicode[ i ] = (u_int16_t) *(myPtr + i); + } + else + { + theNewNamePtr->pstr[0] = nameLen; + memcpy( &theNewNamePtr->pstr[1], myPtr, nameLen ); + } + + // if the name is already in use we will try appending ascii characters + // from '0' (0x30) up to '~' (0x7E) + for ( newChar = 0x30; newChar < 0x7F; newChar++ ) + { + // make sure new name isn't already there + BuildCatalogKey( theParID, theNewNamePtr, isHFSPlus, &catKey ); + result = SearchBTreeRecord( fcbPtr, &catKey, kNoHint, NULL, &record, &recSize, NULL ); + if ( result != noErr ) + return( noErr ); + + // new name is already there, try another + if ( isHFSPlus ) + { + theNewNamePtr->ustr.unicode[ nameLen ] = (u_int16_t) newChar; + theNewNamePtr->ustr.length = nameLen + 1; + } + else + { + theNewNamePtr->pstr[ 0 ] = nameLen + 1; + theNewNamePtr->pstr[ nameLen + 1 ] = newChar; + } + } + + return( -1 ); + +} /* UniqueDotName */ + +/* Function: RecordBadAllocation + * + * Description: + * Record a repair to adjust a file or extended attribute's allocation size. + * This could also trigger a truncation if the new block count isn't large + * enough to cover the current LEOF. + * + * Note that it stores different values and prints different error message + * for file and extended attribute. + * For files - + * E_PEOF, parentID, filename, forkType (kDataFork/kRsrcFork). + * Prints filename. + * For extended attributes - + * E_PEOAttr, fileID, attribute name, forkType (kEAData). + * Prints attribute name and filename. Since the attribute name is + * passed as parameter, it needs to lookup the filename. + * + * Input: + * For files - + * parID - parent ID of file + * filename - name of the file + * forkType - type of fork (kDataFork/kRsrcFork) + * For extended attributes - + * parID - fileID for attribute + * filename - name of the attribute + * forkType - kEAData + * Common inputs - + * oldBlkCnt - Incorrect block count + * newBlkCnt - Correct block count + * Output: + * On success, zero. + * On failure, non-zero. + * R_NoMem - out of memory + * E_PEOF - Bad allocation error on plain HFS volume + */ +int +RecordBadAllocation(UInt32 parID, unsigned char * filename, UInt32 forkType, UInt32 oldBlkCnt, UInt32 newBlkCnt) +{ + RepairOrderPtr p; + char goodstr[16]; + char badstr[16]; + size_t n; + Boolean isHFSPlus; + int result; + char *real_filename; + unsigned int filenamelen; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + if (forkType == kEAData) { + /* Print attribute name and filename for extended attribute */ + filenamelen = NAME_MAX * 3; + real_filename = malloc(filenamelen); + if (!real_filename) { + return (R_NoMem); + } + + /* Get the name of the file */ + result = GetFileNamePathByID(gScavGlobals, parID, NULL, NULL, + real_filename, &filenamelen, NULL); + if (result) { + /* If error while looking up filename, default to print file ID */ + sprintf(real_filename, "id = %u", parID); + } + + fsckPrint(gScavGlobals->context, E_PEOAttr, filename, real_filename); + free(real_filename); + } else { + fsckPrint(gScavGlobals->context, E_PEOF, filename); + } + sprintf(goodstr, "%d", newBlkCnt); + sprintf(badstr, "%d", oldBlkCnt); + fsckPrint(gScavGlobals->context, E_BadValue, goodstr, badstr); + + /* Only HFS+ is repaired here */ + if ( !isHFSPlus ) + return (E_PEOF); + + n = strlen((char *)filename); + p = AllocMinorRepairOrder(gScavGlobals, n + 1); + if (p == NULL) + return (R_NoMem); + + if (forkType == kEAData) { + p->type = E_PEOAttr; + } else { + p->type = E_PEOF; + } + p->forkType = forkType; + p->incorrect = oldBlkCnt; + p->correct = newBlkCnt; + p->hint = 0; + p->parid = parID; + p->name[0] = n; /* pascal string */ + CopyMemory(filename, &p->name[1], n); + + gScavGlobals->CatStat |= S_FileAllocation; + return (0); +} + +/* Function: RecordTruncation + * + * Description: + * Record a repair to trucate a file's logical size. + * + * Note that it stores different error values and prints + * different error message for file and extended attribute. + * For files - + * E_LEOF, parentID, filename, forkType (kDataFork/kRsrcFork). + * Prints filename. + * For extended attributes - + * E_LEOAttr, fileID, attribute name, forkType (kEAData). + * Prints attribute name and filename. Since the attribute name is + * passed as parameter, it needs to lookup the filename. + * + * Input: + * For files - + * parID - parent ID of file + * filename - name of the file + * forkType - type of fork (kDataFork/kRsrcFork) + * For extended attributes - + * parID - fileID for attribute + * filename - name of the attribute + * forkType - kEAData + * Common inputs - + * oldSize - Incorrect logical size + * newSize - Correct logical size + * Output: + * On success, zero. + * On failure, non-zero. + * R_NoMem - out of memory + * E_LEOF - Truncation error on plain HFS volume + */ +int +RecordTruncation(UInt32 parID, unsigned char * filename, UInt32 forkType, UInt64 oldSize, UInt64 newSize) +{ + RepairOrderPtr p; + char oldSizeStr[48]; + char newSizeStr[48]; + size_t n; + Boolean isHFSPlus; + int result; + char *real_filename; + unsigned int filenamelen; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + if (forkType == kEAData) { + /* Print attribute name and filename for extended attribute */ + filenamelen = NAME_MAX * 3; + real_filename = malloc(filenamelen); + if (!real_filename) { + return (R_NoMem); + } + + /* Get the name of the file */ + result = GetFileNamePathByID(gScavGlobals, parID, NULL, NULL, + real_filename, &filenamelen, NULL); + if (result) { + /* If error while looking up filename, default to print file ID */ + sprintf(real_filename, "id = %u", parID); + } + + fsckPrint(gScavGlobals->context, E_LEOAttr, filename, real_filename); + free(real_filename); + } else { + fsckPrint(gScavGlobals->context, E_LEOF, filename); + } + sprintf(oldSizeStr, "%qd", oldSize); + sprintf(newSizeStr, "%qd", newSize); + fsckPrint(gScavGlobals->context, E_BadValue, newSizeStr, oldSizeStr); + + /* Only HFS+ is repaired here */ + if ( !isHFSPlus ) + return (E_LEOF); + + n = strlen((char *)filename); + p = AllocMinorRepairOrder(gScavGlobals, n + 1); + if (p == NULL) + return (R_NoMem); + + if (forkType == kEAData) { + p->type = E_LEOAttr; + } else { + p->type = E_LEOF; + } + p->forkType = forkType; + p->incorrect = oldSize; + p->correct = newSize; + p->hint = 0; + p->parid = parID; + p->name[0] = n; /* pascal string */ + CopyMemory(filename, &p->name[1], n); + + gScavGlobals->CatStat |= S_FileAllocation; + return (0); +} + + +/* + * CaptureMissingThread + * + * Capture info for a missing thread record so it + * can be repaired later. The next key is saved + * so that the Catalog Hierarchy check can proceed. + * The thread PID/NAME are initialized during the + * Catalog Hierarchy check phase. + */ +static int +CaptureMissingThread(UInt32 threadID, const HFSPlusCatalogKey *nextKey) +{ + MissingThread *mtp; + Boolean isHFSPlus; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + + fsckPrint(gScavGlobals->context, E_NoThd, threadID); + + /* Only HFS+ missing threads are repaired here */ + if ( !isHFSPlus) + return (E_NoThd); + + mtp = (MissingThread *) AllocateClearMemory(sizeof(MissingThread)); + if (mtp == NULL) + return (R_NoMem); + + /* add it to the list of missing threads */ + mtp->link = gScavGlobals->missingThreadList; + gScavGlobals->missingThreadList = mtp; + + mtp->threadID = threadID; + CopyMemory(nextKey, &mtp->nextKey, nextKey->keyLength + 2); + + if (gScavGlobals->RepLevel == repairLevelNoProblemsFound) + gScavGlobals->RepLevel = repairLevelVolumeRecoverable; + + gScavGlobals->CatStat |= S_MissingThread; + return (noErr); +} + + +/* + FixDecomps. Originally written by Peter Edberg for use in fsck_hfs. + + If inFilename needs updating and the function was able to do this without + overflowing the 255-character limit, it returns 1 (true) and outFIlename + contains the update file. If inFilename did not need updating, or an update + would overflow the limit, the function returns 0 (false) and the contents of + outFilename are undefined. + +Function implementation: + +Characters that don't require any special handling have combining class 0 and do +not begin a decomposition sequence (of 1-3 characters) that needs updating. For +these characters, the function just copies them from inFilename to outFilename +and sets the pointer outNameCombSeqPtr to NULL (when this pointer is not NULL, +it points to the beginning of a sequence of combining marks that continues up to +the current character; if the current character is combining, it may need to be +reordered into that sequence). The copying operation in cheap, and postponing it +until we know the filename needs modification would make the code much more +complicated. + +This copying operation may be invoked from many places in the code, some deeply +nested - any time the code determines that the current character needs no +special handling. For this reason it has a label (CopyBaseChar) and is located +at the end of the character processing loop; various places in the code use goto +statements to jump to it (this is a situation where they are justified). + +The main function loop has 4 sections. + +First, it quickly determines if the high 12 bits of the character indicate that +it is in a range that has neither nonzero combining class nor any decomposition +sequences that need updating. If so, the code jumps straight to CopyBaseChar. + +Second, the code determines if the character is part of a sequence that needs +updating. It checks if the current character has a corresponding action in the +replaceData array. If so, depending on the action, it may need to check for +additional matching characters in inFilename. If the sequence of 1-3 characters +is successfully matched, then a replacement sequence of 1-3 characters is +inserted at the corresponding position in outFilename. While this replacement +sequence is being inserted, each character must be checked to see if it has +nonzero combining class and needs reordering (some replacement sequences consist +entirely of combining characters and may interact with combining characters in +the filename before the updated sequence). + +Third, the code handles characters whose high-order 12 bits indicated that some +action was required, but were not part of sequences that needed updating (these +may include characters that were examined in the second part but were part of +sequences that did not completely match, so there are also goto fallthroughs to +this code - labeled CheckCombClass - from the second part). These characters +have to be checked for nonzero combining class; if so, they are reordered as +necessary. Each time a new nonzero class character is encountered, it is added +to outFIlename at the correct point in any active combining character sequence +(with other characters in the sequence moved as necessary), so the sequence +pointed to by outNameCombSeqPtr is always in correct order up to the current +character. + +Finally, the fourth part has the default handlers to just copy characters to +outFilename. + + */ +static Boolean +FixDecomps( u_int16_t charCount, const u_int16_t *inFilename, HFSUniStr255 *outFilename ) +{ + // input filename: address of curr input char, + const u_int16_t * inNamePtr = inFilename; + // and of last input char. + const u_int16_t * inNameLastPtr = &inFilename[charCount - 1]; + // output filename buffer: addr of next output char, + u_int16_t * outNamePtr = outFilename->unicode; + // and of last possible output char. + u_int16_t * outNameLastPtr = &outFilename->unicode[kHFSPlusMaxFileNameChars - 1]; + u_int16_t * outNameCombSeqPtr = NULL; // if non-NULL, start of output combining seq we are processing. + u_int32_t maxClassValueInSeq = 0; + Boolean didModifyName = 0; + + while (inNamePtr <= inNameLastPtr) { + u_int16_t shiftUniChar; // this must be 16 bits for the kShiftUniCharOffset wraparound to work + int32_t rangeIndex; + u_int32_t shiftUniCharLo; + u_int32_t replDataIndex; + u_int32_t currCharClass; + + shiftUniChar = *inNamePtr + kShiftUniCharOffset; + if ( shiftUniChar >= kShiftUniCharLimit ) + goto CopyBaseChar; + rangeIndex = classAndReplIndex[shiftUniChar >> kLoFieldBitSize]; + if ( rangeIndex < 0 ) + goto CopyBaseChar; + shiftUniCharLo = shiftUniChar & kLoFieldMask; + replDataIndex = replaceRanges[rangeIndex][shiftUniCharLo]; + + if ( replDataIndex > 0 ) { + // we have a possible substitution (replDataIndex != 0) + const u_int16_t * replDataPtr; + u_int32_t action; + u_int32_t copyCount = 0; + + replDataPtr = &replaceData[replDataIndex]; + action = *replDataPtr++; + switch (action) { + case kReplaceCurWithTwo: + case kReplaceCurWithThree: + inNamePtr++; + copyCount = (action == kReplaceCurWithTwo)? 2: 3; + break; + // the next 3 cases can have a first char or replacement char with nonzero combining class + case kIfNextOneMatchesReplaceAllWithOne: + case kIfNextOneMatchesReplaceAllWithTwo: + if (inNamePtr + 1 <= inNameLastPtr && *(inNamePtr + 1) == *replDataPtr++) { + inNamePtr += 2; + copyCount = (action == kIfNextOneMatchesReplaceAllWithOne)? 1: 2; + } else { + // No substitution; check for comb class & copy char + goto CheckCombClass; + } + break; + case kIfNextTwoMatchReplaceAllWithOne: + if ( inNamePtr + 2 <= inNameLastPtr && + *(inNamePtr + 1) == *replDataPtr++ && + *(inNamePtr + 2) == *replDataPtr++) + { + inNamePtr += 3; + copyCount = 1; + } else { + // No substitution; check for comb class & copy char + goto CheckCombClass; + } + break; + } + + // now copy copyCount chars (1-3) from replDataPtr to output, checking for comb class etc. + if (outNamePtr + copyCount - 1 > outNameLastPtr) { + didModifyName = 0; + break; + } + while (copyCount-- > 0) { + currCharClass = 0; + shiftUniChar = *replDataPtr + kShiftUniCharOffset; + if ( shiftUniChar < kShiftUniCharLimit ) { + rangeIndex = classAndReplIndex[shiftUniChar >> kLoFieldBitSize]; + if (rangeIndex >= 0) { + shiftUniCharLo = shiftUniChar & kLoFieldMask; + currCharClass = combClassRanges[rangeIndex][shiftUniCharLo]; + } + } + // This part is similar to CheckCombClass below, which has more detailed + // comments; see them for info. + if ( currCharClass == 0 ) { + outNameCombSeqPtr = NULL; + *outNamePtr++ = *replDataPtr++; + } else if ( outNameCombSeqPtr == NULL ) { + outNameCombSeqPtr = outNamePtr; + maxClassValueInSeq = currCharClass; + *outNamePtr++ = *replDataPtr++; + } else if ( currCharClass >= maxClassValueInSeq ) { + // Sequence is already in correct order with current char, + // just update maxClassValueInSeq + maxClassValueInSeq = currCharClass; + *outNamePtr++ = *replDataPtr++; + } else if ( outNamePtr - outNameCombSeqPtr == 1) { + // Here we know we need to reorder. + // If the sequence is two chars, just interchange them + *outNamePtr++ = *outNameCombSeqPtr; + *outNameCombSeqPtr = *replDataPtr++; + } else { + // General reordering case for three or more chars. + u_int16_t * outNameCombCharPtr; + u_int32_t combCharClass; + + outNameCombCharPtr = outNamePtr++; + while (outNameCombCharPtr > outNameCombSeqPtr) { + shiftUniChar = *(outNameCombCharPtr - 1) + kShiftUniCharOffset; + rangeIndex = classAndReplIndex[shiftUniChar >> kLoFieldBitSize]; + shiftUniCharLo = shiftUniChar & kLoFieldMask; + combCharClass = combClassRanges[rangeIndex][shiftUniCharLo]; + if (combCharClass <= currCharClass) + break; + *outNameCombCharPtr = *(outNameCombCharPtr - 1); + outNameCombCharPtr--; + } + *outNameCombCharPtr = *replDataPtr++; + } + } + didModifyName = 1; + continue; + } /* end of replDataIndex > 0 */ + + CheckCombClass: + // check for combining class + currCharClass = combClassRanges[rangeIndex][shiftUniCharLo]; + if ( currCharClass == 0 ) { + goto CopyBaseChar; + } + if ( outNameCombSeqPtr == NULL ) { + // The current char is the first of a possible sequence of chars + // with nonzero combining class. Initialize sequence stuff, then + // just copy char to output. + outNameCombSeqPtr = outNamePtr; + maxClassValueInSeq = currCharClass; + goto CopyChar; + } + if ( currCharClass >= maxClassValueInSeq ) { + // The sequence of chars with nonzero combining class is already + // in correct order through the current char; just update the max + // class value found in the sequence. + maxClassValueInSeq = currCharClass; + goto CopyChar; + } + + // This char is at least the second in a sequence of chars with + // nonzero combining class in the output buffer; outNameCombSeqPtr + // points to the first in the sequence. Need to put the current + // char into the correct place in the sequence (previous chars in + // the sequence are already in correct order, but the current char + // is out of place). + + // First make sure there is room for the new char + if (outNamePtr > outNameLastPtr) { + didModifyName = 0; + break; + } + + if (outNamePtr - outNameCombSeqPtr == 1) { + // If the sequence is two chars, just interchange them + *outNamePtr++ = *outNameCombSeqPtr; + *outNameCombSeqPtr = *inNamePtr++; + } else { + // General case: Starting at previous end of sequence, move chars to + // next position in string as long as their class is higher than current + // char; insert current char where we stop. We could cache the + // combining classes instead of re-determining them, but having multiple + // combining marks is rare enough that it wouldn't be worth the overhead. + // At least we don't have to recheck shiftUniChar < kShiftUniCharLimit, + // rangeIndex != 0, etc.) + u_int16_t * outNameCombCharPtr; + u_int32_t combCharClass; + + outNameCombCharPtr = outNamePtr++; + while (outNameCombCharPtr > outNameCombSeqPtr) { + shiftUniChar = *(outNameCombCharPtr - 1) + kShiftUniCharOffset; + rangeIndex = classAndReplIndex[shiftUniChar >> kLoFieldBitSize]; + shiftUniCharLo = shiftUniChar & kLoFieldMask; + combCharClass = combClassRanges[rangeIndex][shiftUniCharLo]; + if (combCharClass <= currCharClass) + break; + *outNameCombCharPtr = *(outNameCombCharPtr - 1); + outNameCombCharPtr--; + } + *outNameCombCharPtr = *inNamePtr++; + } + didModifyName = 1; + continue; + + CopyBaseChar: + outNameCombSeqPtr = NULL; + CopyChar: + // nothing special happens with this char, just copy to output + if (outNamePtr <= outNameLastPtr) { + *outNamePtr++ = *inNamePtr++; + } else { + didModifyName = 0; + break; + } + } /* end of while( inNamePtr <= inNameLastPtr ) */ + + if (didModifyName) { + outFilename->length = outNamePtr - outFilename->unicode; + } + return didModifyName; + +} /* FixDecomps */ + diff --git a/fsck_hfs/dfalib/CheckHFS.h b/fsck_hfs/dfalib/CheckHFS.h new file mode 100644 index 0000000..ff88c22 --- /dev/null +++ b/fsck_hfs/dfalib/CheckHFS.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 1999-2000, 2002, 2007 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "../fsck_messages.h" + +/* External API to CheckHFS */ + +enum { + kNeverCheck = 0, /* never check (clean/dirty status only) */ + kDirtyCheck = 1, /* only check if dirty */ + kAlwaysCheck = 2, /* always check */ + kPartialCheck = 3, /* used with kForceRepairs in order to set up environment */ + kForceCheck = 4, + kMajorCheck = 5, /* Check for major vs. minor errors */ + + kNeverRepair = 0, /* never repair */ + kMinorRepairs = 1, /* only do minor repairs (fsck preen) */ + kMajorRepairs = 2, /* do all possible repairs */ + kForceRepairs = 3, /* force a repair of catalog B-Tree */ + + kNeverLog = 0, + kFatalLog = 1, /* (fsck preen) */ + kVerboseLog = 2, /* (Disk First Aid) */ + kDebugLog = 3 +}; + +enum { + R_NoMem = 1, /* not enough memory to do scavenge */ + R_IntErr = 2, /* internal Scavenger error */ + R_NoVol = 3, /* no volume in drive */ + R_RdErr = 4, /* unable to read from disk */ + R_WrErr = 5, /* unable to write to disk */ + R_BadSig = 6, /* not HFS/HFS+ signature */ + R_VFail = 7, /* verify failed */ + R_RFail = 8, /* repair failed */ + R_UInt = 9, /* user interrupt */ + R_Modified = 10, /* volume modifed by another app */ + R_BadVolumeHeader = 11, /* Invalid VolumeHeader */ + R_FileSharingIsON = 12, /* File Sharing is on */ + R_Dirty = 13, /* Dirty, but no checks were done */ + + Max_RCode = 13 /* maximum result code */ +}; + +/* Option bits to indicate which type of btree to rebuild */ +#define REBUILD_CATALOG 0x1 +#define REBUILD_EXTENTS 0x2 +#define REBUILD_ATTRIBUTE 0x4 + +extern int gGUIControl; + +extern int CheckHFS( const char *rdevnode, int fsReadRef, int fsWriteRef, + int checkLevel, int repairLevel, + fsck_ctx_t fsckContext, + int lostAndFoundMode, int canWrite, + int *modified, int liveMode, int rebuildOptions ); + +extern int journal_replay(const char *); + diff --git a/fsck_hfs/dfalib/DecompData.h b/fsck_hfs/dfalib/DecompData.h new file mode 100644 index 0000000..ee2464c --- /dev/null +++ b/fsck_hfs/dfalib/DecompData.h @@ -0,0 +1,263 @@ +/* + File: DecompData.h + Contains: Data tables for use in fsckFixDecomps + Note: This file is generated automatically by running DecompMakeData +*/ +#include "DecompDataEnums.h" + +static const int8_t classAndReplIndex[kHiFieldEntryCount] = { + -1, 75, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0xFB00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0xFC00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0xFD00- + -1, -1, 76, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0xFE00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0xFF00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, // uChar 0x0000- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, // uChar 0x0100- + -1, 2, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x0200- + 5, 6, 7, 8, 9, -1, 10, -1, -1, 11, 12, 13, 14, 15, -1, -1, // uChar 0x0300- + 16, -1, -1, -1, -1, 17, -1, -1, 18, -1, -1, -1, -1, -1, 19, -1, // uChar 0x0400- + -1, -1, -1, -1, -1, -1, -1, -1, -1, 20, 21, 22, 23, -1, -1, -1, // uChar 0x0500- + -1, -1, 24, -1, 25, 26, -1, 27, -1, -1, -1, -1, 28, 29, 30, -1, // uChar 0x0600- + -1, 31, -1, 32, 33, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x0700- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x0800- + -1, -1, -1, 34, 35, 36, -1, -1, -1, -1, 37, 38, 39, -1, -1, -1, // uChar 0x0900- + -1, -1, 40, 41, 42, -1, -1, -1, -1, -1, -1, 43, 44, -1, -1, -1, // uChar 0x0A00- + -1, -1, 45, 46, 47, -1, -1, -1, -1, -1, -1, -1, 48, -1, -1, -1, // uChar 0x0B00- + -1, -1, -1, -1, 49, 50, -1, -1, -1, -1, -1, -1, 51, -1, -1, -1, // uChar 0x0C00- + -1, -1, -1, -1, 52, -1, -1, -1, -1, -1, -1, -1, 53, 54, -1, -1, // uChar 0x0D00- + -1, -1, -1, 55, 56, -1, -1, -1, -1, -1, -1, 57, 58, -1, -1, -1, // uChar 0x0E00- + -1, 59, -1, 60, -1, -1, -1, 61, 62, -1, -1, 63, 64, -1, -1, -1, // uChar 0x0F00- + -1, -1, 65, 66, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1000- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1100- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1200- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1300- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1400- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1500- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1600- + -1, 67, -1, 68, -1, -1, -1, -1, -1, -1, -1, -1, -1, 69, -1, -1, // uChar 0x1700- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 70, -1, -1, -1, -1, -1, // uChar 0x1800- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1900- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1A00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1B00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1C00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1D00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1E00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x1F00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 71, 72, -1, // uChar 0x2000- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2100- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2200- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2300- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2400- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2500- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2600- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2700- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2800- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2900- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2A00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2B00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2C00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2D00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2E00- + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // uChar 0x2F00- + -1, -1, 73, -1, -1, -1, -1, -1, -1, 74, -1, -1, -1, -1, -1, -1 // uChar 0x3000- +}; + +static const u_int8_t combClassRanges[][kLoFieldEntryCount] = { + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x00A0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x01F0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0210- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0220- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0230- + { 230,230,230,230,230,230,230,230,230,230,230,230,230,230,230,230 }, // uChar 0x0300- + { 230,230,230,230,230,232,220,220,220,220,232,216,220,220,220,220 }, // uChar 0x0310- + { 220,202,202,220,220,220,220,202,202,220,220,220,220,220,220,220 }, // uChar 0x0320- + { 220,220,220,220, 1, 1, 1, 1, 1,220,220,220,220,230,230,230 }, // uChar 0x0330- + { 230,230,230,230,230,240,230,220,220,220,230,230,230,220,220, 0 }, // uChar 0x0340- + { 234,234,233,230,230,230,230,230,230,230,230,230,230,230,230,230 }, // uChar 0x0360- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0390- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x03A0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x03B0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x03C0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x03D0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0400- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0450- + { 0, 0, 0,230,230,230,230, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0480- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x04E0- + { 0,220,230,230,230,230,220,230,230,230,222,220,230,230,230,230 }, // uChar 0x0590- + { 230,230, 0,220,220,220,220,220,230,230,220,230,230,222,228,230 }, // uChar 0x05A0- + { 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 20, 21, 22, 0, 23 }, // uChar 0x05B0- + { 0, 24, 25, 0,230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x05C0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0620- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 28, 29, 30, 31 }, // uChar 0x0640- + { 32, 33, 34,230,230,220, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0650- + { 35, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0670- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x06C0- + { 0, 0, 0, 0, 0, 0,230,230,230,230,230,230,230, 0, 0,230 }, // uChar 0x06D0- + { 230,230,230,220,230, 0, 0,230,230, 0,220,230,230,220, 0, 0 }, // uChar 0x06E0- + { 0, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0710- + { 230,220,230,230,220,230,230,220,220,220,230,220,220,230,220,230 }, // uChar 0x0730- + { 230,230,220,230,220,230,220,230,220,230,230, 0, 0, 0, 0, 0 }, // uChar 0x0740- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0 }, // uChar 0x0930- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0 }, // uChar 0x0940- + { 0,230,220,230,230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0950- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x09A0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0 }, // uChar 0x09B0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0 }, // uChar 0x09C0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0A20- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0 }, // uChar 0x0A30- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0 }, // uChar 0x0A40- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0 }, // uChar 0x0AB0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0 }, // uChar 0x0AC0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0B20- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0 }, // uChar 0x0B30- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0 }, // uChar 0x0B40- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0 }, // uChar 0x0BC0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0 }, // uChar 0x0C40- + { 0, 0, 0, 0, 0, 84, 91, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0C50- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0 }, // uChar 0x0CC0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0 }, // uChar 0x0D40- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0 }, // uChar 0x0DC0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0DD0- + { 0, 0, 0, 0, 0, 0, 0, 0,103,103, 9, 0, 0, 0, 0, 0 }, // uChar 0x0E30- + { 0, 0, 0, 0, 0, 0, 0, 0,107,107,107,107, 0, 0, 0, 0 }, // uChar 0x0E40- + { 0, 0, 0, 0, 0, 0, 0, 0,118,118, 0, 0, 0, 0, 0, 0 }, // uChar 0x0EB0- + { 0, 0, 0, 0, 0, 0, 0, 0,122,122,122,122, 0, 0, 0, 0 }, // uChar 0x0EC0- + { 0, 0, 0, 0, 0, 0, 0, 0,220,220, 0, 0, 0, 0, 0, 0 }, // uChar 0x0F10- + { 0, 0, 0, 0, 0,220, 0,220, 0,216, 0, 0, 0, 0, 0, 0 }, // uChar 0x0F30- + { 0,129,130, 0,132, 0, 0, 0, 0, 0,130,130,130,130, 0, 0 }, // uChar 0x0F70- + { 130, 0,230,230, 9, 0,230,230, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0F80- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0FB0- + { 0, 0, 0, 0, 0, 0,220, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0FC0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x1020- + { 0, 0, 0, 0, 0, 0, 0, 7, 0, 9, 0, 0, 0, 0, 0, 0 }, // uChar 0x1030- + { 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x1710- + { 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x1730- + { 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x17D0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0,228, 0, 0, 0, 0, 0, 0 }, // uChar 0x18A0- + { 230,230, 1, 1,230,230,230,230, 1, 1, 1,230,230, 0, 0, 0 }, // uChar 0x20D0- + { 0,230, 0, 0, 0, 1, 1,230,220,230, 1, 0, 0, 0, 0, 0 }, // uChar 0x20E0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,218,228,232,222,224,224 }, // uChar 0x3020- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 0, 0, 0, 0, 0 }, // uChar 0x3090- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26, 0 }, // uChar 0xFB10- + { 230,230,230,230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0xFE20- +}; + +static const u_int8_t replaceRanges[][kLoFieldEntryCount] = { + { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x00A0- + { 0, 0, 0, 0, 0, 0, 0, 0, 5, 8, 0, 0, 0, 0, 0, 0 }, // uChar 0x01F0- + { 0, 0, 0, 0, 0, 0, 0, 0, 11, 14, 17, 20, 0, 0, 23, 26 }, // uChar 0x0210- + { 0, 0, 0, 0, 0, 0, 29, 32, 35, 38, 41, 45, 49, 53, 57, 60 }, // uChar 0x0220- + { 63, 67, 71, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0230- + { 0, 0, 0, 0, 0, 0, 77, 0, 80, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0300- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0310- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0320- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0330- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0340- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0360- + { 0, 84, 0, 0, 0, 88, 0, 92, 0, 96, 0, 0, 0, 0, 0,100 }, // uChar 0x0390- + { 0, 0, 0, 0, 0,104, 0, 0, 0,108, 0, 0, 0, 0, 0, 0 }, // uChar 0x03A0- + { 0,112, 0, 0, 0,116, 0,120, 0,124, 0, 0, 0, 0, 0,128 }, // uChar 0x03B0- + { 0, 0, 0, 0, 0,132, 0, 0, 0,136, 0, 0, 0, 0, 0, 0 }, // uChar 0x03C0- + { 0, 0,140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x03D0- + { 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,147, 0, 0 }, // uChar 0x0400- + { 150, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,153, 0, 0 }, // uChar 0x0450- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0480- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,156,159, 0, 0 }, // uChar 0x04E0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0590- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x05A0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x05B0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x05C0- + { 0, 0,162,165,168,171,174, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0620- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0640- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0650- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0670- + { 177, 0,180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x06C0- + { 0, 0, 0,183, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x06D0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x06E0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0710- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0730- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0740- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0930- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0940- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0950- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,186, 0, 0, 0 }, // uChar 0x09A0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x09B0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x09C0- + { 0,189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0A20- + { 0, 0, 0,192, 0, 0,195, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0A30- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0A40- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0AB0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0AC0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,198 }, // uChar 0x0B20- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0B30- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0B40- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0BC0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0C40- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0C50- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0CC0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0D40- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0DC0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,201, 0,204,207,211, 0 }, // uChar 0x0DD0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0E30- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,214, 0, 0 }, // uChar 0x0E40- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0EB0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,217, 0, 0 }, // uChar 0x0EC0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0F10- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0F30- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0F70- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0F80- + { 0, 0,220,224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0FB0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x0FC0- + { 0, 0, 0, 0, 0, 0,228, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x1020- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x1030- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x1710- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x1730- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x17D0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x18A0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x20D0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x20E0- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x3020- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0x3090- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,231, 0, 0 }, // uChar 0xFB10- + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // uChar 0xFE20- +}; + +static const u_int16_t replaceData[] = { + 0x0000, 0x0013, 0x030D, 0x00A8, 0x0301, 0x0002, 0x004E, 0x0300, // index 0- + 0x0002, 0x006E, 0x0300, 0x0002, 0x0053, 0x0326, 0x0002, 0x0073, // index 8- + 0x0326, 0x0002, 0x0054, 0x0326, 0x0002, 0x0074, 0x0326, 0x0002, // index 16- + 0x0048, 0x030C, 0x0002, 0x0068, 0x030C, 0x0002, 0x0041, 0x0307, // index 24- + 0x0002, 0x0061, 0x0307, 0x0002, 0x0045, 0x0327, 0x0002, 0x0065, // index 32- + 0x0327, 0x0003, 0x004F, 0x0308, 0x0304, 0x0003, 0x006F, 0x0308, // index 40- + 0x0304, 0x0003, 0x004F, 0x0303, 0x0304, 0x0003, 0x006F, 0x0303, // index 48- + 0x0304, 0x0002, 0x004F, 0x0307, 0x0002, 0x006F, 0x0307, 0x0003, // index 56- + 0x004F, 0x0307, 0x0304, 0x0003, 0x006F, 0x0307, 0x0304, 0x0002, // index 64- + 0x0059, 0x0304, 0x0002, 0x0079, 0x0304, 0x0012, 0x0307, 0x0310, // index 72- + 0x0013, 0x030D, 0x0308, 0x0301, 0x0013, 0x030D, 0x0391, 0x0301, // index 80- + 0x0013, 0x030D, 0x0395, 0x0301, 0x0013, 0x030D, 0x0397, 0x0301, // index 88- + 0x0013, 0x030D, 0x0399, 0x0301, 0x0013, 0x030D, 0x039F, 0x0301, // index 96- + 0x0013, 0x030D, 0x03A5, 0x0301, 0x0013, 0x030D, 0x03A9, 0x0301, // index 104- + 0x0013, 0x030D, 0x03B1, 0x0301, 0x0013, 0x030D, 0x03B5, 0x0301, // index 112- + 0x0013, 0x030D, 0x03B7, 0x0301, 0x0013, 0x030D, 0x03B9, 0x0301, // index 120- + 0x0013, 0x030D, 0x03BF, 0x0301, 0x0013, 0x030D, 0x03C5, 0x0301, // index 128- + 0x0013, 0x030D, 0x03C9, 0x0301, 0x0013, 0x030D, 0x03D2, 0x0301, // index 136- + 0x0002, 0x0415, 0x0300, 0x0002, 0x0418, 0x0300, 0x0002, 0x0435, // index 144- + 0x0300, 0x0002, 0x0438, 0x0300, 0x0002, 0x042D, 0x0308, 0x0002, // index 152- + 0x044D, 0x0308, 0x0002, 0x0627, 0x0653, 0x0002, 0x0627, 0x0654, // index 160- + 0x0002, 0x0648, 0x0654, 0x0002, 0x0627, 0x0655, 0x0002, 0x064A, // index 168- + 0x0654, 0x0002, 0x06D5, 0x0654, 0x0002, 0x06C1, 0x0654, 0x0002, // index 176- + 0x06D2, 0x0654, 0x0012, 0x09BC, 0x09B0, 0x0012, 0x0A3C, 0x0A5C, // index 184- + 0x0002, 0x0A32, 0x0A3C, 0x0002, 0x0A38, 0x0A3C, 0x0012, 0x0B3C, // index 192- + 0x0B5F, 0x0002, 0x0DD9, 0x0DCA, 0x0002, 0x0DD9, 0x0DCF, 0x0003, // index 200- + 0x0DD9, 0x0DCF, 0x0DCA, 0x0002, 0x0DD9, 0x0DDF, 0x0012, 0x0E32, // index 208- + 0x0E33, 0x0012, 0x0EB2, 0x0EB3, 0x0023, 0x0F80, 0x0F71, 0x0F77, // index 216- + 0x0023, 0x0F80, 0x0F71, 0x0F79, 0x0002, 0x1025, 0x102E, 0x0002, // index 224- + 0x05D9, 0x05B4 // index 232- +}; + +// combClassData: +// trimmed index: kHiFieldEntryCount(= 864) bytes +// ranges: 2 * 77 ranges * kLoFieldEntryCount(= 16) bytes = 2464 +// replData: 234 entries * 2 = 468 +// total: 3796 + diff --git a/fsck_hfs/dfalib/DecompDataEnums.h b/fsck_hfs/dfalib/DecompDataEnums.h new file mode 100644 index 0000000..ba36439 --- /dev/null +++ b/fsck_hfs/dfalib/DecompDataEnums.h @@ -0,0 +1,71 @@ +/* + File: DecompDataEnums.h + + Contains: Constants for data tables used with FixDecomps (CatalogCheck.c) + + Copyright: © 2002 by Apple Computer, Inc., all rights reserved. + + CVS change log: + + $Log: DecompDataEnums.h,v $ + Revision 1.2 2002/12/20 01:20:36 lindak + Merged PR-2937515-2 into ZZ100 + Old HFS+ decompositions need to be repaired + + Revision 1.1.4.1 2002/12/16 18:55:22 jcotting + integrated code from text group (Peter Edberg) that will correct some + illegal names created with obsolete Unicode 2.1.2 decomposition rules + Bug #: 2937515 + Submitted by: jerry cottingham + Reviewed by: don brady + + Revision 1.1.2.1 2002/10/25 17:15:22 jcotting + added code from Peter Edberg that will detect and offer replacement + names for file system object names with pre-Jaguar decomp errors + Bug #: 2937515 + Submitted by: jerry cottingham + Reviewed by: don brady + + Revision 1.1 2002/10/16 06:33:25 pedberg + Initial working version of function and related tools and tables + + +*/ + +#ifndef __DECOMPDATAENUMS__ +#define __DECOMPDATAENUMS__ + +// Basic table parameters for 2-stage trie: +// The high 12 bits of a UniChar provide an index into a first-level table; +// if the entry there is >= 0, it is an index into a table of 16-element +// ranges indexed by the low 4 bits of the UniChar. Since the UniChars of interest +// for combining classes and sequence updates are either in the range 0000-30FF +// or in the range FB00-FFFF, we eliminate the large middle section of the first- +// level table by first adding 0500 to the UniChar to wrap the UniChars of interest +// into the range 0000-35FF. +enum { + kLoFieldBitSize = 4, + kShiftUniCharOffset = 0x0500, // add to UniChar so FB00 & up wraps to 0000 + kShiftUniCharLimit = 0x3600 // if UniChar + offset >= limit, no need to check +}; + +// The following are all derived from kLoFieldBitSize +enum { + kLoFieldEntryCount = 1 << kLoFieldBitSize, + kHiFieldEntryCount = kShiftUniCharLimit >> kLoFieldBitSize, + kLoFieldMask = (1 << kLoFieldBitSize) - 1 +}; + +// Action codes for sequence replacement/updating +enum { // next + repl = total chars + // a value of 0 means no action + kReplaceCurWithTwo = 0x02, // 0 + 2 = 2 + kReplaceCurWithThree = 0x03, // 0 + 3 = 3 + kIfNextOneMatchesReplaceAllWithOne = 0x12, // 1 + 1 = 2 + kIfNextOneMatchesReplaceAllWithTwo = 0x13, // 1 + 2 = 3 + kIfNextTwoMatchReplaceAllWithOne = 0x23 // 2 + 1 = 3 +}; + +#endif // __FSCKFIXDECOMPS__ + + diff --git a/fsck_hfs/dfalib/DecompMakeData.c b/fsck_hfs/dfalib/DecompMakeData.c new file mode 100644 index 0000000..66f2469 --- /dev/null +++ b/fsck_hfs/dfalib/DecompMakeData.c @@ -0,0 +1,586 @@ +/* + File: DecompMakeData.c + + Contains: Tool to generate tables for use by FixDecomps (CatalogCheck.c). It takes raw data on combining classes and decomposition changes, massages it into the trie form needed by + the function, and emits it on stdout (which should be directed to a file DecompData.h). + + Copyright: © 2002 by Apple Computer, Inc., all rights reserved. + + CVS change log: + + $Log: DecompMakeData.c,v $ + Revision 1.2 2002/12/20 01:20:36 lindak + Merged PR-2937515-2 into ZZ100 + Old HFS+ decompositions need to be repaired + + Revision 1.1.4.1 2002/12/16 18:55:22 jcotting + integrated code from text group (Peter Edberg) that will correct some + illegal names created with obsolete Unicode 2.1.2 decomposition rules + Bug #: 2937515 + Submitted by: jerry cottingham + Reviewed by: don brady + + Revision 1.1.2.1 2002/10/25 17:15:22 jcotting + added code from Peter Edberg that will detect and offer replacement + names for file system object names with pre-Jaguar decomp errors + Bug #: 2937515 + Submitted by: jerry cottingham + Reviewed by: don brady + + Revision 1.1 2002/10/16 06:33:26 pedberg + Initial working version of function and related tools and tables + + + Notes: + + 1. To build: + cc DecompMakeData.c -o DecompMakeData -g + + 2. To use: + ./DecompMakeData > DecompData.h + +*/ + +#include <stddef.h> +#include <stdio.h> + +// Internal includes +#include "DecompDataEnums.h" // enums for data tables + +struct UniCharClassAndRepl { + u_int16_t uChar; + u_int16_t combClass; + u_int16_t action; + u_int16_t matchAndReplacement[3]; +}; +typedef struct UniCharClassAndRepl UniCharClassAndRepl; + +// The following is the raw data on +// 1. Current combining classes, derived from the Unicode 3.2.0 data file +// 2. Changes in decomposition sequences, derived by comparing the canonical decompositions derived from +// the Unicode 2.1.2 data file with the decompositions derived from the Unicode 3.2.0 data file (in both +// cases excluding decompositions in the ranges 2000-2FFF, F900-FAFF, etc.). +// These are folded into a single table so we can do one lookup of the high-order 12 bits of the shifted +// UniChar to determine if there is anything of interest. +// +// Note that these ignore non-BMP characters; the new decompositions and combining classes for those are +// not really relevant for the purpose of fixing the HFS+ filenames. + +static const UniCharClassAndRepl uCharClassAndRepl[] = { +// cur char comb replacement next chars that replacement string +// to match class action must also match for cur or all +// -------- ----- ---------------------------- --------------- --------------------- + { 0x00A8, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x00A8, 0x0301 } }, + { 0x01F8, 0, kReplaceCurWithTwo, { 0x004E, 0x0300 } }, + { 0x01F9, 0, kReplaceCurWithTwo, { 0x006E, 0x0300 } }, + { 0x0218, 0, kReplaceCurWithTwo, { 0x0053, 0x0326 } }, + { 0x0219, 0, kReplaceCurWithTwo, { 0x0073, 0x0326 } }, + { 0x021A, 0, kReplaceCurWithTwo, { 0x0054, 0x0326 } }, + { 0x021B, 0, kReplaceCurWithTwo, { 0x0074, 0x0326 } }, + { 0x021E, 0, kReplaceCurWithTwo, { 0x0048, 0x030C } }, + { 0x021F, 0, kReplaceCurWithTwo, { 0x0068, 0x030C } }, + { 0x0226, 0, kReplaceCurWithTwo, { 0x0041, 0x0307 } }, + { 0x0227, 0, kReplaceCurWithTwo, { 0x0061, 0x0307 } }, + { 0x0228, 0, kReplaceCurWithTwo, { 0x0045, 0x0327 } }, + { 0x0229, 0, kReplaceCurWithTwo, { 0x0065, 0x0327 } }, + { 0x022A, 0, kReplaceCurWithThree, { 0x004F, 0x0308, 0x0304 } }, + { 0x022B, 0, kReplaceCurWithThree, { 0x006F, 0x0308, 0x0304 } }, + { 0x022C, 0, kReplaceCurWithThree, { 0x004F, 0x0303, 0x0304 } }, + { 0x022D, 0, kReplaceCurWithThree, { 0x006F, 0x0303, 0x0304 } }, + { 0x022E, 0, kReplaceCurWithTwo, { 0x004F, 0x0307 } }, + { 0x022F, 0, kReplaceCurWithTwo, { 0x006F, 0x0307 } }, + { 0x0230, 0, kReplaceCurWithThree, { 0x004F, 0x0307, 0x0304 } }, + { 0x0231, 0, kReplaceCurWithThree, { 0x006F, 0x0307, 0x0304 } }, + { 0x0232, 0, kReplaceCurWithTwo, { 0x0059, 0x0304 } }, + { 0x0233, 0, kReplaceCurWithTwo, { 0x0079, 0x0304 } }, + { 0x0300, 230, 0, { 0 } }, + { 0x0301, 230, 0, { 0 } }, + { 0x0302, 230, 0, { 0 } }, + { 0x0303, 230, 0, { 0 } }, + { 0x0304, 230, 0, { 0 } }, + { 0x0305, 230, 0, { 0 } }, + { 0x0306, 230, kIfNextOneMatchesReplaceAllWithOne, { 0x0307, 0x0310 } }, + { 0x0307, 230, 0, { 0 } }, + { 0x0308, 230, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x0308, 0x0301 } }, + { 0x0309, 230, 0, { 0 } }, + { 0x030A, 230, 0, { 0 } }, + { 0x030B, 230, 0, { 0 } }, + { 0x030C, 230, 0, { 0 } }, + { 0x030D, 230, 0, { 0 } }, + { 0x030E, 230, 0, { 0 } }, + { 0x030F, 230, 0, { 0 } }, + { 0x0310, 230, 0, { 0 } }, + { 0x0311, 230, 0, { 0 } }, + { 0x0312, 230, 0, { 0 } }, + { 0x0313, 230, 0, { 0 } }, + { 0x0314, 230, 0, { 0 } }, + { 0x0315, 232, 0, { 0 } }, + { 0x0316, 220, 0, { 0 } }, + { 0x0317, 220, 0, { 0 } }, + { 0x0318, 220, 0, { 0 } }, + { 0x0319, 220, 0, { 0 } }, + { 0x031A, 232, 0, { 0 } }, + { 0x031B, 216, 0, { 0 } }, + { 0x031C, 220, 0, { 0 } }, + { 0x031D, 220, 0, { 0 } }, + { 0x031E, 220, 0, { 0 } }, + { 0x031F, 220, 0, { 0 } }, + { 0x0320, 220, 0, { 0 } }, + { 0x0321, 202, 0, { 0 } }, + { 0x0322, 202, 0, { 0 } }, + { 0x0323, 220, 0, { 0 } }, + { 0x0324, 220, 0, { 0 } }, + { 0x0325, 220, 0, { 0 } }, + { 0x0326, 220, 0, { 0 } }, + { 0x0327, 202, 0, { 0 } }, + { 0x0328, 202, 0, { 0 } }, + { 0x0329, 220, 0, { 0 } }, + { 0x032A, 220, 0, { 0 } }, + { 0x032B, 220, 0, { 0 } }, + { 0x032C, 220, 0, { 0 } }, + { 0x032D, 220, 0, { 0 } }, + { 0x032E, 220, 0, { 0 } }, + { 0x032F, 220, 0, { 0 } }, + { 0x0330, 220, 0, { 0 } }, + { 0x0331, 220, 0, { 0 } }, + { 0x0332, 220, 0, { 0 } }, + { 0x0333, 220, 0, { 0 } }, + { 0x0334, 1, 0, { 0 } }, + { 0x0335, 1, 0, { 0 } }, + { 0x0336, 1, 0, { 0 } }, + { 0x0337, 1, 0, { 0 } }, + { 0x0338, 1, 0, { 0 } }, + { 0x0339, 220, 0, { 0 } }, + { 0x033A, 220, 0, { 0 } }, + { 0x033B, 220, 0, { 0 } }, + { 0x033C, 220, 0, { 0 } }, + { 0x033D, 230, 0, { 0 } }, + { 0x033E, 230, 0, { 0 } }, + { 0x033F, 230, 0, { 0 } }, + { 0x0340, 230, 0, { 0 } }, + { 0x0341, 230, 0, { 0 } }, + { 0x0342, 230, 0, { 0 } }, + { 0x0343, 230, 0, { 0 } }, + { 0x0344, 230, 0, { 0 } }, + { 0x0345, 240, 0, { 0 } }, + { 0x0346, 230, 0, { 0 } }, + { 0x0347, 220, 0, { 0 } }, + { 0x0348, 220, 0, { 0 } }, + { 0x0349, 220, 0, { 0 } }, + { 0x034A, 230, 0, { 0 } }, + { 0x034B, 230, 0, { 0 } }, + { 0x034C, 230, 0, { 0 } }, + { 0x034D, 220, 0, { 0 } }, + { 0x034E, 220, 0, { 0 } }, + { 0x0360, 234, 0, { 0 } }, + { 0x0361, 234, 0, { 0 } }, + { 0x0362, 233, 0, { 0 } }, + { 0x0363, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x0364, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x0365, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x0366, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x0367, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x0368, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x0369, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x036A, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x036B, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x036C, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x036D, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x036E, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x036F, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x0391, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x0391, 0x0301 } }, + { 0x0395, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x0395, 0x0301 } }, + { 0x0397, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x0397, 0x0301 } }, + { 0x0399, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x0399, 0x0301 } }, + { 0x039F, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x039F, 0x0301 } }, + { 0x03A5, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x03A5, 0x0301 } }, + { 0x03A9, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x03A9, 0x0301 } }, + { 0x03B1, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x03B1, 0x0301 } }, + { 0x03B5, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x03B5, 0x0301 } }, + { 0x03B7, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x03B7, 0x0301 } }, + { 0x03B9, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x03B9, 0x0301 } }, + { 0x03BF, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x03BF, 0x0301 } }, + { 0x03C5, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x03C5, 0x0301 } }, + { 0x03C9, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x03C9, 0x0301 } }, + { 0x03D2, 0, kIfNextOneMatchesReplaceAllWithTwo, { 0x030D, 0x03D2, 0x0301 } }, + { 0x0400, 0, kReplaceCurWithTwo, { 0x0415, 0x0300 } }, + { 0x040D, 0, kReplaceCurWithTwo, { 0x0418, 0x0300 } }, + { 0x0450, 0, kReplaceCurWithTwo, { 0x0435, 0x0300 } }, + { 0x045D, 0, kReplaceCurWithTwo, { 0x0438, 0x0300 } }, + { 0x0483, 230, 0, { 0 } }, + { 0x0484, 230, 0, { 0 } }, + { 0x0485, 230, 0, { 0 } }, + { 0x0486, 230, 0, { 0 } }, + { 0x04EC, 0, kReplaceCurWithTwo, { 0x042D, 0x0308 } }, + { 0x04ED, 0, kReplaceCurWithTwo, { 0x044D, 0x0308 } }, + { 0x0591, 220, 0, { 0 } }, + { 0x0592, 230, 0, { 0 } }, + { 0x0593, 230, 0, { 0 } }, + { 0x0594, 230, 0, { 0 } }, + { 0x0595, 230, 0, { 0 } }, + { 0x0596, 220, 0, { 0 } }, + { 0x0597, 230, 0, { 0 } }, + { 0x0598, 230, 0, { 0 } }, + { 0x0599, 230, 0, { 0 } }, + { 0x059A, 222, 0, { 0 } }, + { 0x059B, 220, 0, { 0 } }, + { 0x059C, 230, 0, { 0 } }, + { 0x059D, 230, 0, { 0 } }, + { 0x059E, 230, 0, { 0 } }, + { 0x059F, 230, 0, { 0 } }, + { 0x05A0, 230, 0, { 0 } }, + { 0x05A1, 230, 0, { 0 } }, + { 0x05A3, 220, 0, { 0 } }, + { 0x05A4, 220, 0, { 0 } }, + { 0x05A5, 220, 0, { 0 } }, + { 0x05A6, 220, 0, { 0 } }, + { 0x05A7, 220, 0, { 0 } }, + { 0x05A8, 230, 0, { 0 } }, + { 0x05A9, 230, 0, { 0 } }, + { 0x05AA, 220, 0, { 0 } }, + { 0x05AB, 230, 0, { 0 } }, + { 0x05AC, 230, 0, { 0 } }, + { 0x05AD, 222, 0, { 0 } }, + { 0x05AE, 228, 0, { 0 } }, + { 0x05AF, 230, 0, { 0 } }, + { 0x05B0, 10, 0, { 0 } }, + { 0x05B1, 11, 0, { 0 } }, + { 0x05B2, 12, 0, { 0 } }, + { 0x05B3, 13, 0, { 0 } }, + { 0x05B4, 14, 0, { 0 } }, + { 0x05B5, 15, 0, { 0 } }, + { 0x05B6, 16, 0, { 0 } }, + { 0x05B7, 17, 0, { 0 } }, + { 0x05B8, 18, 0, { 0 } }, + { 0x05B9, 19, 0, { 0 } }, + { 0x05BB, 20, 0, { 0 } }, + { 0x05BC, 21, 0, { 0 } }, + { 0x05BD, 22, 0, { 0 } }, + { 0x05BF, 23, 0, { 0 } }, + { 0x05C1, 24, 0, { 0 } }, + { 0x05C2, 25, 0, { 0 } }, + { 0x05C4, 230, 0, { 0 } }, + { 0x0622, 0, kReplaceCurWithTwo, { 0x0627, 0x0653 } }, + { 0x0623, 0, kReplaceCurWithTwo, { 0x0627, 0x0654 } }, + { 0x0624, 0, kReplaceCurWithTwo, { 0x0648, 0x0654 } }, + { 0x0625, 0, kReplaceCurWithTwo, { 0x0627, 0x0655 } }, + { 0x0626, 0, kReplaceCurWithTwo, { 0x064A, 0x0654 } }, + { 0x064B, 27, 0, { 0 } }, + { 0x064C, 28, 0, { 0 } }, + { 0x064D, 29, 0, { 0 } }, + { 0x064E, 30, 0, { 0 } }, + { 0x064F, 31, 0, { 0 } }, + { 0x0650, 32, 0, { 0 } }, + { 0x0651, 33, 0, { 0 } }, + { 0x0652, 34, 0, { 0 } }, + { 0x0653, 230, 0, { 0 } }, + { 0x0654, 230, 0, { 0 } }, + { 0x0655, 220, 0, { 0 } }, + { 0x0670, 35, 0, { 0 } }, + { 0x06C0, 0, kReplaceCurWithTwo, { 0x06D5, 0x0654 } }, + { 0x06C2, 0, kReplaceCurWithTwo, { 0x06C1, 0x0654 } }, + { 0x06D3, 0, kReplaceCurWithTwo, { 0x06D2, 0x0654 } }, + { 0x06D6, 230, 0, { 0 } }, + { 0x06D7, 230, 0, { 0 } }, + { 0x06D8, 230, 0, { 0 } }, + { 0x06D9, 230, 0, { 0 } }, + { 0x06DA, 230, 0, { 0 } }, + { 0x06DB, 230, 0, { 0 } }, + { 0x06DC, 230, 0, { 0 } }, + { 0x06DF, 230, 0, { 0 } }, + { 0x06E0, 230, 0, { 0 } }, + { 0x06E1, 230, 0, { 0 } }, + { 0x06E2, 230, 0, { 0 } }, + { 0x06E3, 220, 0, { 0 } }, + { 0x06E4, 230, 0, { 0 } }, + { 0x06E7, 230, 0, { 0 } }, + { 0x06E8, 230, 0, { 0 } }, + { 0x06EA, 220, 0, { 0 } }, + { 0x06EB, 230, 0, { 0 } }, + { 0x06EC, 230, 0, { 0 } }, + { 0x06ED, 220, 0, { 0 } }, + { 0x0711, 36, 0, { 0 } }, + { 0x0730, 230, 0, { 0 } }, + { 0x0731, 220, 0, { 0 } }, + { 0x0732, 230, 0, { 0 } }, + { 0x0733, 230, 0, { 0 } }, + { 0x0734, 220, 0, { 0 } }, + { 0x0735, 230, 0, { 0 } }, + { 0x0736, 230, 0, { 0 } }, + { 0x0737, 220, 0, { 0 } }, + { 0x0738, 220, 0, { 0 } }, + { 0x0739, 220, 0, { 0 } }, + { 0x073A, 230, 0, { 0 } }, + { 0x073B, 220, 0, { 0 } }, + { 0x073C, 220, 0, { 0 } }, + { 0x073D, 230, 0, { 0 } }, + { 0x073E, 220, 0, { 0 } }, + { 0x073F, 230, 0, { 0 } }, + { 0x0740, 230, 0, { 0 } }, + { 0x0741, 230, 0, { 0 } }, + { 0x0742, 220, 0, { 0 } }, + { 0x0743, 230, 0, { 0 } }, + { 0x0744, 220, 0, { 0 } }, + { 0x0745, 230, 0, { 0 } }, + { 0x0746, 220, 0, { 0 } }, + { 0x0747, 230, 0, { 0 } }, + { 0x0748, 220, 0, { 0 } }, + { 0x0749, 230, 0, { 0 } }, + { 0x074A, 230, 0, { 0 } }, + { 0x093C, 7, 0, { 0 } }, + { 0x094D, 9, 0, { 0 } }, + { 0x0951, 230, 0, { 0 } }, + { 0x0952, 220, 0, { 0 } }, + { 0x0953, 230, 0, { 0 } }, + { 0x0954, 230, 0, { 0 } }, + { 0x09AC, 0, kIfNextOneMatchesReplaceAllWithOne, { 0x09BC, 0x09B0 } }, + { 0x09BC, 7, 0, { 0 } }, + { 0x09CD, 9, 0, { 0 } }, + { 0x0A21, 0, kIfNextOneMatchesReplaceAllWithOne, { 0x0A3C, 0x0A5C } }, + { 0x0A33, 0, kReplaceCurWithTwo, { 0x0A32, 0x0A3C } }, + { 0x0A36, 0, kReplaceCurWithTwo, { 0x0A38, 0x0A3C } }, + { 0x0A3C, 7, 0, { 0 } }, + { 0x0A4D, 9, 0, { 0 } }, + { 0x0ABC, 7, 0, { 0 } }, + { 0x0ACD, 9, 0, { 0 } }, + { 0x0B2F, 0, kIfNextOneMatchesReplaceAllWithOne, { 0x0B3C, 0x0B5F } }, + { 0x0B3C, 7, 0, { 0 } }, + { 0x0B4D, 9, 0, { 0 } }, + { 0x0BCD, 9, 0, { 0 } }, + { 0x0C4D, 9, 0, { 0 } }, + { 0x0C55, 84, 0, { 0 } }, + { 0x0C56, 91, 0, { 0 } }, + { 0x0CCD, 9, 0, { 0 } }, + { 0x0D4D, 9, 0, { 0 } }, + { 0x0DCA, 9, 0, { 0 } }, + { 0x0DDA, 0, kReplaceCurWithTwo, { 0x0DD9, 0x0DCA } }, + { 0x0DDC, 0, kReplaceCurWithTwo, { 0x0DD9, 0x0DCF } }, + { 0x0DDD, 0, kReplaceCurWithThree, { 0x0DD9, 0x0DCF, 0x0DCA } }, + { 0x0DDE, 0, kReplaceCurWithTwo, { 0x0DD9, 0x0DDF } }, + { 0x0E38, 103, 0, { 0 } }, + { 0x0E39, 103, 0, { 0 } }, + { 0x0E3A, 9, 0, { 0 } }, + { 0x0E48, 107, 0, { 0 } }, + { 0x0E49, 107, 0, { 0 } }, + { 0x0E4A, 107, 0, { 0 } }, + { 0x0E4B, 107, 0, { 0 } }, + { 0x0E4D, 0, kIfNextOneMatchesReplaceAllWithOne, { 0x0E32, 0x0E33 } }, + { 0x0EB8, 118, 0, { 0 } }, + { 0x0EB9, 118, 0, { 0 } }, + { 0x0EC8, 122, 0, { 0 } }, + { 0x0EC9, 122, 0, { 0 } }, + { 0x0ECA, 122, 0, { 0 } }, + { 0x0ECB, 122, 0, { 0 } }, + { 0x0ECD, 0, kIfNextOneMatchesReplaceAllWithOne, { 0x0EB2, 0x0EB3 } }, + { 0x0F18, 220, 0, { 0 } }, + { 0x0F19, 220, 0, { 0 } }, + { 0x0F35, 220, 0, { 0 } }, + { 0x0F37, 220, 0, { 0 } }, + { 0x0F39, 216, 0, { 0 } }, + { 0x0F71, 129, 0, { 0 } }, + { 0x0F72, 130, 0, { 0 } }, + { 0x0F74, 132, 0, { 0 } }, + { 0x0F7A, 130, 0, { 0 } }, + { 0x0F7B, 130, 0, { 0 } }, + { 0x0F7C, 130, 0, { 0 } }, + { 0x0F7D, 130, 0, { 0 } }, + { 0x0F80, 130, 0, { 0 } }, + { 0x0F82, 230, 0, { 0 } }, + { 0x0F83, 230, 0, { 0 } }, + { 0x0F84, 9, 0, { 0 } }, + { 0x0F86, 230, 0, { 0 } }, + { 0x0F87, 230, 0, { 0 } }, + { 0x0FB2, 0, kIfNextTwoMatchReplaceAllWithOne, { 0x0F80, 0x0F71, 0x0F77 } }, + { 0x0FB3, 0, kIfNextTwoMatchReplaceAllWithOne, { 0x0F80, 0x0F71, 0x0F79 } }, + { 0x0FC6, 220, 0, { 0 } }, + { 0x1026, 0, kReplaceCurWithTwo, { 0x1025, 0x102E } }, + { 0x1037, 7, 0, { 0 } }, + { 0x1039, 9, 0, { 0 } }, + { 0x1714, 9, 0, { 0 } }, // new char in Uncode 3.2 + { 0x1734, 9, 0, { 0 } }, // new char in Uncode 3.2 + { 0x17D2, 9, 0, { 0 } }, + { 0x18A9, 228, 0, { 0 } }, + { 0x20D0, 230, 0, { 0 } }, + { 0x20D1, 230, 0, { 0 } }, + { 0x20D2, 1, 0, { 0 } }, + { 0x20D3, 1, 0, { 0 } }, + { 0x20D4, 230, 0, { 0 } }, + { 0x20D5, 230, 0, { 0 } }, + { 0x20D6, 230, 0, { 0 } }, + { 0x20D7, 230, 0, { 0 } }, + { 0x20D8, 1, 0, { 0 } }, + { 0x20D9, 1, 0, { 0 } }, + { 0x20DA, 1, 0, { 0 } }, + { 0x20DB, 230, 0, { 0 } }, + { 0x20DC, 230, 0, { 0 } }, + { 0x20E1, 230, 0, { 0 } }, + { 0x20E5, 1, 0, { 0 } }, // new char in Uncode 3.2 + { 0x20E6, 1, 0, { 0 } }, // new char in Uncode 3.2 + { 0x20E7, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x20E8, 220, 0, { 0 } }, // new char in Uncode 3.2 + { 0x20E9, 230, 0, { 0 } }, // new char in Uncode 3.2 + { 0x20EA, 1, 0, { 0 } }, // new char in Uncode 3.2 + { 0x302A, 218, 0, { 0 } }, + { 0x302B, 228, 0, { 0 } }, + { 0x302C, 232, 0, { 0 } }, + { 0x302D, 222, 0, { 0 } }, + { 0x302E, 224, 0, { 0 } }, + { 0x302F, 224, 0, { 0 } }, + { 0x3099, 8, 0, { 0 } }, + { 0x309A, 8, 0, { 0 } }, + { 0xFB1D, 0, kReplaceCurWithTwo, { 0x05D9, 0x05B4 } }, + { 0xFB1E, 26, 0, { 0 } }, + { 0xFE20, 230, 0, { 0 } }, + { 0xFE21, 230, 0, { 0 } }, + { 0xFE22, 230, 0, { 0 } }, + { 0xFE23, 230, 0, { 0 } }, + { 0, 0, 0, { 0 } } +}; + +enum { + kMaxRangeCount = 108, + kMaxReplaceDataCount = 256, + kIndexValuesPerLine = 16, + kReplDataValuesPerLine = 8 +}; + +static int8_t rangesIndex[kHiFieldEntryCount]; // if >= 0, then index into xxxRanges[] +static u_int8_t classRanges[kMaxRangeCount][kLoFieldEntryCount]; +static u_int8_t replRanges[kMaxRangeCount][kLoFieldEntryCount]; +static u_int16_t rangesKey[kMaxRangeCount]; // remembers starting Unicode for range +static u_int16_t replacementData[kMaxReplaceDataCount]; + +int main(int argc, char *argv[]) { + u_int32_t entryIndex, rangeIndex; + const UniCharClassAndRepl * classAndReplPtr; + int32_t rangeCount; + u_int32_t replDataCount; + + // print header stuff + plog("/*\n"); + plog("\tFile:\t\tDecompData.h\n"); + plog("\tContains:\tData tables for use in FixDecomps (CatalogCheck.c)\n"); + plog("\tNote:\t\tThis file is generated automatically by running DecompMakeData\n"); + plog("*/\n"); + plog("#include \"DecompDataEnums.h\"\n\n"); + + // initialize arrays + for (entryIndex = 0; entryIndex < kHiFieldEntryCount; entryIndex++) { + rangesIndex[entryIndex] = -1; + } + for (rangeIndex = 0; rangeIndex < kMaxRangeCount; rangeIndex++) { + for (entryIndex = 0; entryIndex < kLoFieldEntryCount; entryIndex++) { + classRanges[rangeIndex][entryIndex] = 0; + replRanges[rangeIndex][entryIndex] = 0; + } + } + rangeCount = 0; + replDataCount = 0; + replacementData[replDataCount++] = 0; // need to start real data at index 1 + + // process data + for (classAndReplPtr = uCharClassAndRepl; classAndReplPtr->uChar != 0; classAndReplPtr++) { + u_int32_t matchAndReplacementCount, matchAndReplacementIndex; + u_int16_t shiftUChar = classAndReplPtr->uChar + kShiftUniCharOffset; + if (shiftUChar >= kShiftUniCharLimit) { + plog("Exceeded uChar range for 0x%04X\n", classAndReplPtr->uChar); + return 1; + } + entryIndex = shiftUChar >> kLoFieldBitSize; + if (rangesIndex[entryIndex] == -1) { + if (rangeCount >= kMaxRangeCount) { + plog("Exceeded max range count with 0x%04X\n", classAndReplPtr->uChar); + return 1; + } + rangesKey[rangeCount] = classAndReplPtr->uChar & ~kLoFieldMask; + rangesIndex[entryIndex] = rangeCount++; + } + entryIndex = shiftUChar & kLoFieldMask; + + if (classAndReplPtr->combClass != 0) + classRanges[rangeCount - 1][entryIndex] = classAndReplPtr->combClass; + + if (classAndReplPtr->action != 0) { + switch (classAndReplPtr->action) { + case kReplaceCurWithTwo: + case kIfNextOneMatchesReplaceAllWithOne: + matchAndReplacementCount = 2; + break; + case kReplaceCurWithThree: + case kIfNextOneMatchesReplaceAllWithTwo: + case kIfNextTwoMatchReplaceAllWithOne: + matchAndReplacementCount = 3; + break; + default: + matchAndReplacementCount = 0; + break; + } + if (replDataCount + matchAndReplacementCount >= kMaxReplaceDataCount) { + plog("Exceeded max replacement data count with 0x%04X\n", classAndReplPtr->uChar); + return 1; + } + replRanges[rangeCount - 1][entryIndex] = replDataCount; + replacementData[replDataCount++] = classAndReplPtr->action; + for (matchAndReplacementIndex = 0; matchAndReplacementIndex < matchAndReplacementCount; matchAndReplacementIndex++) { + replacementData[replDataCount++] = classAndReplPtr->matchAndReplacement[matchAndReplacementIndex]; + } + } + } + + // print filled-in index + plog("static const int8_t classAndReplIndex[kHiFieldEntryCount] = {\n"); + for (entryIndex = 0; entryIndex < kHiFieldEntryCount; entryIndex++) { + char * formatPtr = (entryIndex + 1 < kHiFieldEntryCount)? "%2d,\t": "%2d\t"; + if (entryIndex % kIndexValuesPerLine == 0) // beginning of line, + plog("\t"); // print tab + plog(formatPtr, rangesIndex[entryIndex]); // print values + if ((entryIndex + 1) % kIndexValuesPerLine == 0) // end of line, print starting UniChar value + plog("// uChar 0x%04X-\n", (u_int16_t)(((entryIndex + 1 - kIndexValuesPerLine) << kLoFieldBitSize) - kShiftUniCharOffset) ); + } + plog("};\n\n"); + + // print filled in class ranges + plog("static const u_int8_t combClassRanges[][kLoFieldEntryCount] = {\n", kLoFieldEntryCount); + for (rangeIndex = 0; rangeIndex < rangeCount; rangeIndex++) { + plog("\t{\t"); + for (entryIndex = 0; entryIndex < kLoFieldEntryCount; entryIndex++) { + char * formatPtr = (entryIndex + 1 < kLoFieldEntryCount)? "%3d,": "%3d"; + plog(formatPtr, classRanges[rangeIndex][entryIndex]); // print values + } + plog("\t},\t// uChar 0x%04X-\n", rangesKey[rangeIndex]); + } + plog("};\n\n"); + + // print filled in repl ranges + plog("static const u_int8_t replaceRanges[][kLoFieldEntryCount] = {\n", kLoFieldEntryCount); + for (rangeIndex = 0; rangeIndex < rangeCount; rangeIndex++) { + plog("\t{\t"); + for (entryIndex = 0; entryIndex < kLoFieldEntryCount; entryIndex++) { + char * formatPtr = (entryIndex + 1 < kLoFieldEntryCount)? "%3d,": "%3d"; + plog(formatPtr, replRanges[rangeIndex][entryIndex]); // print values + } + plog("\t},\t// uChar 0x%04X-\n", rangesKey[rangeIndex]); + } + plog("};\n\n"); + + // print filled in replacement data + plog("static const u_int16_t replaceData[] = {\n"); + for (entryIndex = 0; entryIndex < replDataCount; entryIndex++) { + char * formatPtr = (entryIndex + 1 < replDataCount)? "0x%04X,\t": "0x%04X\t"; + if (entryIndex % kReplDataValuesPerLine == 0) // beginning of line, + plog("\t"); // print tab + plog(formatPtr, replacementData[entryIndex]); // print values + if ((entryIndex + 1) % kReplDataValuesPerLine == 0 || entryIndex + 1 == replDataCount) // end of line, + plog("// index %d-\n", entryIndex & ~(kReplDataValuesPerLine-1) ); // print starting index value + } + plog("};\n\n"); + + // print summary info + plog("// combClassData:\n"); + plog("// trimmed index: kHiFieldEntryCount(= %d) bytes\n", kHiFieldEntryCount); + plog("// ranges: 2 * %d ranges * kLoFieldEntryCount(= %d) bytes = %d\n", rangeCount, kLoFieldEntryCount, 2*rangeCount*kLoFieldEntryCount); + plog("// replData: %d entries * 2 = %d\n", replDataCount, 2*replDataCount); + plog("// total: %d\n\n", kHiFieldEntryCount + 2*rangeCount*kLoFieldEntryCount + 2*replDataCount); + + return 0; +} diff --git a/fsck_hfs/dfalib/FixDecompsNotes.txt b/fsck_hfs/dfalib/FixDecompsNotes.txt new file mode 100644 index 0000000..199059c --- /dev/null +++ b/fsck_hfs/dfalib/FixDecompsNotes.txt @@ -0,0 +1,190 @@ +# +# File: fsckFixDecompsNotes.txt +# +# Contains: Notes on fsckFixDecomps function and related tools +# +# Copyright: © 2002 by Apple Computer, Inc., all rights reserved. +# +# CVS change log: +# +# $Log: FixDecompsNotes.txt,v $ +# Revision 1.2 2002/12/20 01:20:36 lindak +# Merged PR-2937515-2 into ZZ100 +# Old HFS+ decompositions need to be repaired +# +# Revision 1.1.4.1 2002/12/16 18:55:22 jcotting +# integrated code from text group (Peter Edberg) that will correct some +# illegal names created with obsolete Unicode 2.1.2 decomposition rules +# Bug #: 2937515 +# Submitted by: jerry cottingham +# Reviewed by: don brady +# +# Revision 1.1.2.1 2002/10/25 17:15:23 jcotting +# added code from Peter Edberg that will detect and offer replacement +# names for file system object names with pre-Jaguar decomp errors +# Bug #: 2937515 +# Submitted by: jerry cottingham +# Reviewed by: don brady +# +# Revision 1.2 2002/10/16 20:17:21 pedberg +# Add more notes +# +# Revision 1.1 2002/10/16 06:53:54 pedberg +# [3066897, 2937515] Start adding notes about this code +# +# --------------------------------------------------------------------------- + +Code provided per Radar #3066897 to fix bug #2937515. + +The Unicode decomposition used to date for HFS+ volumes - as described in + <http://developer.apple.com/technotes/tn/tn1150.html#CanonicalDecomposition> + <http://developer.apple.com/technotes/tn/tn1150table.html> +- is based on a modified version of the decomposition rules for Unicode 2.1.2 +(but even those were not correctly implemented for certain combinations of +multiple combining marks). Unicode has updated the decomposition and combining +mark reordering rules and data many times since then, but they have locked them +down for Unicode 3.1. This is because Unicode 3.1 is the basis of the Unicode +normalization forms such as NFC and NFD. We began supporting these normalization +formats in Jaguar. + +Because of this, the Apple Unicode cross-functional committee decided to do a +one-time change to update the decomposition rules used for HFS+ volumes from the +Unicode 2.1.2 rules to the Unicode 3.1 rules. TEC and the kernel encoding +converters made this change in Jaguar. One other piece that was supposed to +happen was an enhancement to fsck to convert filenames on HFS+ volumes from the +old decomposition to the new. + +That fsck change did not happen in Jaguar, and as a result there are bugs such +as 2937515 (in which users are seeing partial garbage for filenames). The update +affects the decomposition of Greek characters - about 80 of them (18 of which +correspond to characters in MacGreek). It also affects the decomposition of a +few others: around 23 Latin-script characters and 18 Cyrillic characters (none +of which correspond to anything in a traditional Mac encoding), 8 Arabic +characters (5 of which do correspond to MacArabic characters), 16 Indic, Thai, & +Lao characters (3 of which correspond to characters in Mac encodings). It also +potentially affects the ordering of all combining marks. + +This directory contains code provided per 3066897 that fsck can use to address +this problem for HFS+ volumes. + +---- +A. Data structure + +The data is organized into a two-level trie. The first level is a table that +maps the high-order 12 bits of a UniChar to an index value. The value is -1 if +no character with those high 12 bits has either a decomposition update or a +nonzero combining class; otherwise, it is an index into an array of ranges that +map the low 4 bits of the UniChar to the necessary data. There are two such +arrays of ranges; one provides the mappings to combining class values, the other +provides the mappings to decomposition update information. The latter is in the +form of an index into an array of sequences that contain an action code, an +optional list of additional characters that need to be matched for a complete +sequence match (in the case where a 2-element or 3-element sequence needs to be +updated), and the replacement decomposition sequence. + +There is one additional twist for the first-level trie table. Since the +characters that have classor decomposition data are all either in the range +0x0000-30FF or 0xFB00-FFFF, we can save 3K space in the table by eliminating the +middle. Before looking up a UTF16 character in the table, we first add 0x0500 to +it; the resulting shifted UniChar is in the range 0x0000-35FF. So if the shifted +UniChar is >= 0x3600, we don't bother looking in the table. + +The table data is generated automatically by the fsckMakeDecompData tool; the +sources for this tool contain an array with the raw data for characters that +either have nonzero combining class or begin a sequence of characters that may +need to be updated. The tool generates the index, the two range arrays, and the +list of decomposition update actions. + +---- +B. Files + +* fsckDecompDataEnums.h contains enums related to the data tables + +* fsckMakeDecompData.c contains the raw data source; when this tool is compiled +and run, it writes to standard output the contents of the binary data tables; +this should be directed into a file fsckDecompData.h. + +* fsckFixDecomps.h contains the interface for the fsckFixDecomps function (and +related types) + +* fsckFixDecomps.c contains the function code. + +---- +C. Function interface + +The basic interface (defined in fsckFixDecomps.h) is: + +Boolean fsckFixDecomps( ConstHFSUniStr255Param inFilename, HFSUniStr255 +*outFilename ); + +If inFilename needs updating and the function was able to do this without +overflowing the 255-character limit, it returns 1 (true) and outFIlename +contains the update file. If inFilename did not need updating, or an update +would overflow the limit, the function returns 0 (false) and the contents of +outFilename are undefined. + +The function needs a couple of types from Apple interface files (not standard C +ones): HFSUniStr255 and Boolean. For now these are defined in fsckFixDecomps.h +if NO_APPLE_INCLUDES is 1. For building with fsck_hfs, the appropriate includes +should be put into fsckFixDecomps.h. + +For the record, hfs_format.h defines HFSUniStr255 as follows: + +struct HFSUniStr255 { + u_int16_t length; /* number of unicode characters */ + u_int16_t unicode[255]; /* unicode characters */ +}; +typedef struct HFSUniStr255 HFSUniStr255; +typedef const HFSUniStr255 *ConstHFSUniStr255Param; + +---- +D. Function implementation + +Characters that don't require any special handling have combining class 0 and do +not begin a decomposition sequence (of 1-3 characters) that needs updating. For +these characters, the function just copies them from inFilename to outFilename +and sets the pointer outNameCombSeqPtr to NULL (when this pointer is not NULL, +it points to the beginning of a sequence of combining marks that continues up to +the current character; if the current character is combining, it may need to be +reordered into that sequence). The copying operation in cheap, and postponing it +until we know the filename needs modification would make the code much more +complicated. + +This copying operation may be invoked from many places in the code, some deeply +nested - any time the code determines that the current character needs no +special handling. For this reason it has a label (CopyBaseChar) and is located +at the end of the character processing loop; various places in the code use goto +statements to jump to it (this is a situation where they are justified). + +The main function loop has 4 sections. + +First, it quickly determines if the high 12 bits of the character indicate that +it is in a range that has neither nonzero combining class nor any decomposition +sequences that need updating. If so, the code jumps straight to CopyBaseChar. + +Second, the code determines if the character is part of a sequence that needs +updating. It checks if the current character has a corresponding action in the +replaceData array. If so, depending on the action, it may need to check for +additional matching characters in inFilename. If the sequence of 1-3 characters +is successfully matched, then a replacement sequence of 1-3 characters is +inserted at the corresponding position in outFilename. While this replacement +sequence is being inserted, each character must be checked to see if it has +nonzero combining class and needs reordering (some replacement sequences consist +entirely of combining characters and may interact with combining characters in +the filename before the updated sequence). + +Third, the code handles characters whose high-order 12 bits indicated that some +action was required, but were not part of sequences that needed updating (these +may include characters that were examined in the second part but were part of +sequences that did not completely match, so there are also goto fallthroughs to +this code - labeled CheckCombClass - from the second part). These characters +have to be checked for nonzero combining class; if so, they are reordered as +necessary. Each time a new nonzero class character is encountered, it is added +to outFIlename at the correct point in any active combining character sequence +(with other characters in the sequence moved as necessary), so the sequence +pointed to by outNameCombSeqPtr is always in correct order up to the current +character. + +Finally, the fourth part has the default handlers to just copy characters to +outFilename. + diff --git a/fsck_hfs/dfalib/HardLinkCheck.c b/fsck_hfs/dfalib/HardLinkCheck.c new file mode 100755 index 0000000..bc1787d --- /dev/null +++ b/fsck_hfs/dfalib/HardLinkCheck.c @@ -0,0 +1,1322 @@ +/* + * Copyright (c) 2000-2002, 2004-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "Scavenger.h" +#include <sys/stat.h> + +#define DEBUG_HARDLINKCHECK 0 + +/* If set, the node in hash is initialized and has valid inodeID */ +#define LINKINFO_INIT 0x01 +/* If set, verification of corresponding inode is completed successfully */ +#define LINKINFO_CHECK 0x02 + +/* info saved for each indirect link encountered */ +struct IndirectLinkInfo { + /* linkID is link reference number for file hard links, and + * inodeID for directory hard links. + */ + UInt32 linkID; + UInt32 linkCount; + UInt32 flags; + struct HardLinkList *list; +}; + +#define VISITED_INODE_ID 1 + +struct HardLinkInfo { + UInt32 privDirID; + SGlobPtr globals; + uint32_t priv_dir_itime; /* Creation (initialization) time of metadata folder */ + uint32_t root_dir_itime; /* Creation (initialization) time of root folder */ + PrimeBuckets *fileBucket; +}; + +struct HardLinkList { + UInt32 prev; + UInt32 fileID; + UInt32 next; +}; + +HFSPlusCatalogKey gMetaDataDirKey = { + 48, /* key length */ + 2, /* parent directory ID */ + { + 21, /* number of unicode characters */ + { + '\0','\0','\0','\0', + 'H','F','S','+',' ', + 'P','r','i','v','a','t','e',' ', + 'D','a','t','a' + } + } +}; + +/* private local routines */ +static int GetPrivateDir(SGlobPtr gp, CatalogRecord *rec); +static int GetRootDir(SGlobPtr gp, CatalogRecord *rec); +static int RecordOrphanOpenUnlink(SGlobPtr gp, UInt32 parID, unsigned char * filename); +static int RecordBadHardLinkChainFirst(SGlobPtr, UInt32, UInt32, UInt32); +static int RecordBadHardLinkNext(SGlobPtr gp, UInt32 fileID, UInt32 is, UInt32 shouldbe); +static int RecordBadHardLinkPrev(SGlobPtr gp, UInt32 fileID, UInt32 is, UInt32 shouldbe); +static int RecordBadLinkCount(SGlobPtr gp, UInt32 inodeID, UInt32 is, UInt32 shouldbe) ; +static int RecordOrphanLink(SGlobPtr gp, Boolean isdir, UInt32 linkID); +static int RecordOrphanInode(SGlobPtr gp, Boolean isdir, UInt32 inodeID); +static void hash_insert(UInt32 linkID, int totalSlots, int slotsUsed, struct IndirectLinkInfo *linkInfo); +static struct IndirectLinkInfo * hash_search(UInt32 linkID, int totalSlots, int slotsUsed, struct IndirectLinkInfo *linkInfo); + +/* + * Some functions used when sorting the hard link chain. + * chain_compare() is used by qsort; find_id is just a linear + * search to find a specific fileID; and tsort does a + * topological sort on the linked list. + */ +static int +chain_compare(const void *a1, const void *a2) { + struct HardLinkList *left = (struct HardLinkList*)a1; + struct HardLinkList *right = (struct HardLinkList*)a2; + + return (left->prev - right->prev); +} + +static int +find_id(struct HardLinkList *list, int nel, int id) +{ + int i; + for (i = 0; i < nel; i++) { + if (list[i].fileID == id) + return i; + } + return 0; +} + +static int +tsort(struct HardLinkList *list, int nel) +{ + struct HardLinkList *tmp; + int cur_indx, tmp_indx = 0; + + int rv = 0; + + tmp = calloc(sizeof(struct HardLinkList), nel); + if (tmp == NULL) { + rv = ENOMEM; + goto done; + } + + /* + * Since we only check list.next when doing the sort, we want to + * start with nodes that have prev == 0 (ones at the top of the + * graph, in other words). If there aren't any with a prev of 0, + * then the chain is broken somehow, and we'll repair it later. + */ + qsort(list, nel, sizeof(list[0]), chain_compare); + + for (cur_indx = 0; cur_indx < nel; cur_indx++) { + int i; + /* Skip nodes we've already come across */ + if (list[cur_indx].fileID == 0) + continue; + + /* Copy this node over to the new list */ + tmp[tmp_indx++] = list[cur_indx]; + list[cur_indx].fileID = 0; + + /* ... and then find all its children. */ + for (i = tmp[tmp_indx-1].next; i != 0; ) { + // look for the node in list with that fileID + int j = find_id(list, nel, i); + if (j == 0) { + // We couldn't find it + // So we're done + i = 0; + } else { + // we add this one to tmp + tmp[tmp_indx++] = list[j]; + list[j].fileID = 0; + i = tmp[tmp_indx-1].next; + } + } + } + + /* Copy the temporary list over, and we're done. */ + memcpy(list, tmp, nel * sizeof(struct HardLinkList)); +done: + if (tmp) { + free(tmp); + } + + return rv; +} + +/* + * CheckHardLinkList + * + * Verify that the linked list of hard link nodes (the catalog entries, not the indirect + * node in the private metadata directory) are correct and sane. If any discrepancies + * are detected, create repair order. + * + * To do this, we need to topologically sort the list, and then ensure that the prev/next + * chains are correct. + * + */ +static int +CheckHardLinkList(SGlobPtr gp, UInt32 inodeID, struct HardLinkList *list, int calc_link_count, UInt32 firstID) +{ + int retval; + int indx; + + if (list == NULL) { + if (fsckGetVerbosity(gp->context) >= kDebugLog) { + plog("\tCheckHardLinkList: list=NULL for inodeID = %u\n", inodeID); + } + return ENOMEM; + } + /* + * If we have a list, then we sort and verify it. It's pretty easy, once + * we're sorted, and tsort() above does the hard work for that. + */ + if (calc_link_count > 1) { + retval = tsort(list, calc_link_count); + if (retval) { + goto done; + } + } + + /* Previous link of first link should always be zero */ + if (list[0].prev != 0) { + RecordBadHardLinkPrev(gp, list[0].fileID, list[0].prev, 0); + } + + /* First ID in the inode should match the ID of the first hard link */ + if (list[0].fileID != firstID) { + RecordBadHardLinkChainFirst(gp, inodeID, firstID, list[0].fileID); + } + + /* Check if the previous/next IDs for all nodes except the first node are valid */ + for (indx = 1; indx < calc_link_count; indx++) { + if (list[indx-1].next != list[indx].fileID) { + RecordBadHardLinkNext(gp, list[indx-1].fileID, list[indx-1].next, list[indx].fileID); + } + + if (list[indx].prev != list[indx-1].fileID) { + RecordBadHardLinkPrev(gp, list[indx].fileID, list[indx].prev, list[indx-1].fileID); + } + } + + /* Next ID for the last link should always be zero */ + if (list[calc_link_count-1].next != 0) { + RecordBadHardLinkNext(gp, list[calc_link_count-1].fileID, list[calc_link_count-1].next, 0); + } + +done: +#if DEBUG_HARDLINKCHECK + /* This is just for debugging -- it's useful to know what the list looks like */ + if (fsckGetVerbosity(gp->context) >= kDebugLog) { + for (indx = 0; indx < calc_link_count; indx++) { + fplog(stderr, "CNID %u: #%u: <%u, %u, %u>\n", inodeID, indx, list[indx].prev, list[indx].fileID, list[indx].next); + } + } +#endif + + return 0; +} + +/* + * HardLinkCheckBegin + * + * Get ready to capture indirect link info. + * Called before iterating over all the catalog leaf nodes. + */ +int +HardLinkCheckBegin(SGlobPtr gp, void** cookie) +{ + struct HardLinkInfo *info; + CatalogRecord rec; + CatalogRecord rootFolder; + UInt32 folderID; + + if (GetPrivateDir(gp, &rec) == 0) { + folderID = rec.hfsPlusFolder.folderID; + } else { + folderID = 0; + } + + info = (struct HardLinkInfo *) malloc(sizeof(struct HardLinkInfo)); + + if (info == NULL) { + if (fsckGetVerbosity(gp->context) >= kDebugLog) { + plog("hardLinkCheckBegin: malloc(%zu) failed\n", sizeof(struct HardLinkInfo)); + } + return 1; + } + + info->privDirID = folderID; + info->priv_dir_itime = folderID ? rec.hfsPlusFolder.createDate : 0; + if (GetRootDir(gp, &rootFolder) == 0) { + info->root_dir_itime = rootFolder.hfsPlusFolder.createDate; + } else { + info->root_dir_itime = 0; + } + + info->globals = gp; + + /* We will use the ID of private metadata directory for file hard + * links to skip over hard link inode for an alias from directory + * hard link checks. + */ + gp->filelink_priv_dir_id = folderID; + + + info->fileBucket = calloc(1, sizeof(PrimeBuckets)); + if (info->fileBucket == NULL) { + if (fsckGetVerbosity(gp->context) >= kDebugLog) { + plog("HardLinkCheckBegin: prime bucket allocation failed\n"); + } + } + + * cookie = info; + return (0); +} + +/* + * HardLinkCheckEnd + * + * Dispose of captured data. + * Called after calling CheckHardLinks. + */ +void +HardLinkCheckEnd(void * cookie) +{ + if (cookie) { + struct HardLinkInfo * infoPtr; + + infoPtr = (struct HardLinkInfo *) cookie; + if (infoPtr->fileBucket) { + free(infoPtr->fileBucket); + infoPtr->fileBucket = NULL; + } + DisposeMemory(cookie); + } + +} + +/* Structures for file hard link hash used when a + * file hard link created in pre-Leopard OS is detected + * i.e. the file inode and hard links do not have the + * kHFSHasLinkChainBit set, and the first link, the + * previous link and the next link IDs are zero. The + * link count for such hard links cannot be verified + * using CRT, therefore it is accounted in this hash. + */ +#define FILELINK_HASH_SIZE 257 + +struct filelink_hash { + UInt32 link_ref_num; + UInt32 found_link_count; + UInt32 calc_link_count; + struct filelink_hash *next; +}; + +struct filelink_hash **filelink_head = NULL; +UInt32 filelink_entry_count = 0; + +/* Search and return pointer to the entry for given inode ID. + * If no entry is found, return NULL. + */ +static struct filelink_hash *filelink_hash_search(UInt32 link_ref_num) +{ + struct filelink_hash *cur; + + if (filelink_head == NULL) { + return NULL; + } + + cur = filelink_head[link_ref_num % FILELINK_HASH_SIZE]; + while (cur) { + if (link_ref_num == cur->link_ref_num) { + break; + } + cur = cur->next; + } + + return cur; +} + +/* Allocate and insert entry for given inode ID in the + * hash. The caller function is responsible for searching + * for duplicates before calling this function. + * Returns the pointer to the new hash entry. + */ +static struct filelink_hash *filelink_hash_insert(UInt32 link_ref_num) +{ + struct filelink_hash *cur; + + cur = malloc(sizeof(struct filelink_hash)); + if (!cur) { + return cur; + } + cur->link_ref_num = link_ref_num; + cur->found_link_count = 0; + cur->calc_link_count = 0; + cur->next = filelink_head[link_ref_num % FILELINK_HASH_SIZE]; + filelink_head[link_ref_num % FILELINK_HASH_SIZE] = cur; + filelink_entry_count++; + return cur; +} + +/* Update the hash with information about a file hard link + * that points to given inode ID. The calculated link count + * for given inode is incremented. + * Returns zero if the value was successfully updated to hash, + * and ENOMEM on error. + */ +static int filelink_hash_link(UInt32 link_ref_num) +{ + struct filelink_hash *cur; + + /* If no hash exists, allocate the hash */ + if (filelink_head == NULL) { + filelink_head = calloc(FILELINK_HASH_SIZE, sizeof(struct filelink_hash *)); + if (filelink_head == NULL) { + return ENOMEM; + } + } + + cur = filelink_hash_search(link_ref_num); + if (!cur) { + cur = filelink_hash_insert(link_ref_num); + } + if (cur) { + cur->calc_link_count++; + return 0; + } + + return ENOMEM; +} + +/* Update the hash with information about given file inode. + * The found link count in the hash is updated with the + * link count value provided. + * Returns zero if the value was successfully updated to hash, + * and ENOMEM on error. + */ +int filelink_hash_inode(UInt32 link_ref_num, UInt32 linkCount) +{ + struct filelink_hash *cur; + + /* If no hash exists, allocate the hash */ + if (filelink_head == NULL) { + filelink_head = calloc(FILELINK_HASH_SIZE, sizeof(struct filelink_hash *)); + if (filelink_head == NULL) { + return ENOMEM; + } + } + + cur = filelink_hash_search(link_ref_num); + if (!cur) { + cur = filelink_hash_insert(link_ref_num); + } + if (cur) { + cur->found_link_count = linkCount; + return 0; + } + return ENOMEM; +} + +/* If the file link hash was used to account for + * link count of file hard links created on pre-Leopard + * OS, destroy the hash by freeing all allocated + * memory. + */ +static void filelink_hash_destroy(void) +{ + int i; + struct filelink_hash *cur; + + for (i = 0; i < FILELINK_HASH_SIZE; i++) { + while (filelink_head[i]) { + cur = filelink_head[i]; + filelink_head[i] = cur->next; + free (cur); + } + } + free(filelink_head); + filelink_head = NULL; + filelink_entry_count = 0; +} + +/* + * CaptureHardLink + * + * Capture indirect link info. + * Called for every indirect link in the catalog. + */ +void +CaptureHardLink(void *cookie, const HFSPlusCatalogFile *file) +{ + struct HardLinkInfo * info = (struct HardLinkInfo *) cookie; + + /* A file hard link created on pre-Leopard OS does not + * have kHFSHasLinkChainBit set or prev/next link IDs. + * Ignore such file hard links from all check and CRT account + * and instead account the information in hash to verify the + * link counts later. + */ + if ((info->fileBucket == NULL) || + (((file->flags & kHFSHasLinkChainMask) == 0) && + (file->hl_prevLinkID == 0) && + (file->hl_nextLinkID == 0))) { + filelink_hash_link(file->hl_linkReference); + } else { + /* For file hard links, add link reference number + * and catalog link ID pair to the prime buckets. + */ + hardlink_add_bucket(info->fileBucket, file->hl_linkReference, + file->fileID); + + if ((file->flags & kHFSHasLinkChainMask) == 0) { + record_link_badchain(info->globals, false); + } + } + if ((info->priv_dir_itime && file->createDate != info->priv_dir_itime) && + (info->root_dir_itime && file->createDate != info->root_dir_itime)) { + RepairOrderPtr p; + char str1[12]; + char str2[12]; + uint32_t correct; + + if (debug) + plog("Hard Link catalog entry %u has bad time %u (should be %u, or at least %u)\n", + file->fileID, file->createDate, info->priv_dir_itime, info->root_dir_itime); + correct = info->priv_dir_itime; + + p = AllocMinorRepairOrder(info->globals, 0); + if (p == NULL) { + if (debug) + plog("Unable to allocate hard link time repair order!"); + return; + } + + fsckPrint(info->globals->context, E_BadHardLinkDate); + snprintf(str1, sizeof(str1), "%u", correct); + snprintf(str2, sizeof(str2), "%u", file->createDate); + fsckPrint(info->globals->context, E_BadValue, str1, str2); + + p->type = E_BadHardLinkDate; + p->parid = file->fileID; + p->correct = info->priv_dir_itime; + p->incorrect = file->createDate; + } + + return; +} + +/* + * RepairHardLinkChains + * + * Cycle through the catalog tree, and generate repair orders for hard + * links that may be broken. + */ +int +RepairHardLinkChains(SGlobPtr gp, Boolean isdir) +{ + int result = 0; + struct IndirectLinkInfo *linkInfo = NULL; + CatalogRecord rec; + HFSPlusCatalogKey *keyp; + BTreeIterator iterator; + FSBufferDescriptor btrec; + UInt16 reclen; + UInt32 linkID, inodeID; + UInt32 metadirid; + SFCB *fcb; + size_t prefixlen; + int slotsUsed = 0, slots = 0; + char *prefixName; + UInt32 folderID; + UInt32 link_ref_num; + int entries; + UInt32 flags; + + if (isdir) { + metadirid = gp->dirlink_priv_dir_id; + prefixlen = strlen(HFS_DIRINODE_PREFIX); + prefixName = HFS_DIRINODE_PREFIX; + } else { + metadirid = gp->filelink_priv_dir_id; + prefixlen = strlen(HFS_INODE_PREFIX); + prefixName = HFS_INODE_PREFIX; + } + + if (metadirid == 0) { + if (fsckGetVerbosity(gp->context) >= kDebugLog) { + if (isdir) { + plog ("\tPrivate directory for dirlinks not found. Stopping repairs.\n"); + } else { + plog ("\tPrivate directory for filelinks not found. Stopping repairs.\n"); + } + } + result = ENOENT; + goto done; + } + + // Initialize the hash + if (GetPrivateDir(gp, &rec) == 0 && rec.hfsPlusFolder.valence != 0) { + entries = rec.hfsPlusFolder.valence + 10; + folderID = rec.hfsPlusFolder.folderID; + } else { + entries = 1000; + folderID = 0; + } + + for (slots = 1; slots <= entries; slots <<= 1) + continue; + if (slots < (entries + (entries/3))) + slots <<= 1; + linkInfo = calloc(slots, sizeof(struct IndirectLinkInfo)); + if (linkInfo == NULL) { + if (fsckGetVerbosity(gp->context) >= kDebugLog) { + plog("RepairHardLinkChains: calloc(%d, %zu) failed\n", slots, sizeof(struct IndirectLinkInfo)); + } + result = ENOMEM; + goto done; + } + // Done initializing the hash + + // Set up the catalog BTree iterator + // (start from the root folder, and work our way down) + fcb = gp->calculatedCatalogFCB; + ClearMemory(&iterator, sizeof(iterator)); + keyp = (HFSPlusCatalogKey*)&iterator.key; + BuildCatalogKey(kHFSRootFolderID, NULL, true, (CatalogKey*)keyp); + btrec.bufferAddress = &rec; + btrec.itemCount = 1; + btrec.itemSize = sizeof(rec); + + /* Counter for number of inodes found and verified in the + * hash. When an inode is found when checking the hard links, + * the value is incremented. When an inode's linked list and + * link count are verified, the value is decremented. If + * this value remains non-zero at the end, there are + * orphan hard links. + */ + entries = 0; + + /* + * This chunk of code iterates through the entire catalog BTree. + * For each hard link node (that is, the "directory entry" that + * points to the actual node in the metadata directory), it may + * add it to the hash (if it doesn't exist yet; it also then increments + * the link count for that "inode"); it also creates an array + * of <previous, fileid, next> for the linked list. + */ + for (result = BTIterateRecord(fcb, kBTreeFirstRecord, &iterator, &btrec, &reclen); + result == 0; + result = BTIterateRecord(fcb, kBTreeNextRecord, &iterator, &btrec, &reclen)) { + HFSPlusCatalogFile *file = &rec.hfsPlusFile; + Boolean islink = false; + + if (rec.recordType != kHFSPlusFileRecord) + continue; + + if (isdir) { + /* Assume that this is a directory hard link if + * the atleast one value in finder info corresponds to + * alias, and the alias is not a file inode, and + * either the inode number is greater than + * kHFSFirstUserCatalogNodeID or the flag has + * kHFSHasLinkChainBit set. + */ + if (((file->userInfo.fdType == kHFSAliasType) || + (file->userInfo.fdCreator == kHFSAliasCreator) || + (file->userInfo.fdFlags & kIsAlias)) && + (keyp->parentID != gp->filelink_priv_dir_id) && + ((file->hl_linkReference >= kHFSFirstUserCatalogNodeID) || + (file->flags & kHFSHasLinkChainMask))) { + flags = rec.hfsPlusFile.flags; + islink = true; + } + } else if (file->userInfo.fdType == kHardLinkFileType && + file->userInfo.fdCreator == kHFSPlusCreator) { + flags = rec.hfsPlusFile.flags; + islink = true; + } + if (islink) { + struct IndirectLinkInfo *li = NULL; + struct HardLinkList *tlist = NULL; + int i; + int count; + + linkID = file->fileID; + inodeID = file->bsdInfo.special.iNodeNum; + + /* Now that we are in repair, all hard links should + * have this bit set because we upgrade all pre-Leopard + * file hard links to Leopard hard links on any + * file hard link repairs. + */ + if ((flags & kHFSHasLinkChainMask) == 0) { + record_link_badflags(gp, linkID, isdir, flags, + flags | kHFSHasLinkChainMask); + } + + /* For directory hard links, check ownerFlags and + * finderInfo because we could have missed creating + * repair orders in verification. Verification could + * have stopped before we saw this record because it + * stops as soon as it determines that it needs full + * knowledge of hard links on the disk during repair. + */ + if (isdir) { + /* Check if the directory hard link has UF_IMMUTABLE bit set */ + if ((file->bsdInfo.ownerFlags & UF_IMMUTABLE) == 0) { + record_dirlink_badownerflags(gp, file->fileID, + file->bsdInfo.ownerFlags, + file->bsdInfo.ownerFlags | UF_IMMUTABLE, true); + } + + /* Check Finder Info */ + if ((file->userInfo.fdType != kHFSAliasType) || + (file->userInfo.fdCreator != kHFSAliasCreator) || + ((file->userInfo.fdFlags & kIsAlias) == 0)) { + record_link_badfinderinfo(gp, file->fileID, true); + } + } + + /* For directory hard links, hash using inodeID. For + * file hard links, hash using link reference number + * (which is same as inode ID for file hard links + * created post-Tiger). For each inodeID, add the + * <prev, id, next> triad. + */ + li = hash_search(inodeID, slots, slotsUsed, linkInfo); + if (li) { + li->linkCount++; + } else { + entries++; + /* hash_insert() initializes linkCount to 1 */ + hash_insert(inodeID, slots, slotsUsed++, linkInfo); + li = hash_search(inodeID, slots, slotsUsed, linkInfo); + } + if (li == NULL) { + /* + * Either the hash passed in should have the entry, or + * the one we just created should (because we just added it); + * either way, if it's not here, we've got something weird + * going on, so let's just abort now. + */ + result = ENOENT; + goto done; + } + + count = li->linkCount - 1; + /* Reallocate memory to store information about file/directory hard links */ + if ((count % 10) == 0) { + tlist = realloc(li->list, (count + 10) * sizeof(struct HardLinkList)); + if (tlist == NULL) { + free(li->list); + li->list = NULL; + result = ENOMEM; + goto done; + } else { + li->list = tlist; // May be the same + for (i = count; i < (count + 10); i++) { + memset(&li->list[i], 0, sizeof(li->list[i])); + } + } + } + + /* Store information about this hard link */ + if (li->list) { + li->list[count].fileID = linkID; + li->list[count].prev = file->hl_prevLinkID; + li->list[count].next = file->hl_nextLinkID; + } + } + } + + if (result == btNotFound) + result = 0; // If we hit the end of the catalog tree, that's okay + + if (result) { + goto done; + } + + /* + * Next, we iterate through the metadata directory, and check the linked list. + */ + + ClearMemory(&iterator, sizeof(iterator)); + keyp = (HFSPlusCatalogKey*)&iterator.key; + BuildCatalogKey(metadirid, NULL, true, (CatalogKey*)keyp); + btrec.bufferAddress = &rec; + btrec.itemCount = 1; + btrec.itemSize = sizeof(rec); + + for (result = BTSearchRecord(fcb, &iterator, kInvalidMRUCacheKey, &btrec, &reclen, &iterator); + result == 0; + result = BTIterateRecord(fcb, kBTreeNextRecord, &iterator, &btrec, &reclen)) { + unsigned char filename[64]; + size_t len; + struct IndirectLinkInfo *li = NULL; + + if (rec.recordType == kHFSPlusFolderThreadRecord || + rec.recordType == kHFSPlusFileThreadRecord) + continue; + if (keyp->parentID != metadirid) + break; + if ((isdir && rec.recordType != kHFSPlusFolderRecord) || + (!isdir && rec.recordType != kHFSPlusFileRecord)) + continue; + (void)utf_encodestr(keyp->nodeName.unicode, + keyp->nodeName.length * 2, + filename, &len, sizeof(filename)); + filename[len] = 0; + if (strstr((char*)filename, prefixName) != (char*)filename) + continue; + + if (isdir) { + inodeID = rec.hfsPlusFolder.folderID; + link_ref_num = 0; + flags = rec.hfsPlusFolder.flags; + li = hash_search(inodeID, slots, slotsUsed, linkInfo); + } else { + inodeID = rec.hfsPlusFile.fileID; + link_ref_num = atol((char*)&filename[prefixlen]); + flags = rec.hfsPlusFile.flags; + li = hash_search(link_ref_num, slots, slotsUsed, linkInfo); + } + + /* file/directory inode should always have kHFSHasLinkChainBit set */ + if ((flags & kHFSHasLinkChainMask) == 0) { + record_inode_badflags(gp, inodeID, isdir, flags, + flags | kHFSHasLinkChainMask, true); + } + + if (li) { + UInt32 first_link_id = 0; + uint32_t linkCount = 0; + + result = get_first_link_id(gp, &rec, inodeID, isdir, &first_link_id); + if (result != 0) { + if (fsckGetVerbosity(gp->context) >= kDebugLog) + plog("\tError getting first link ID for inode = %u (result=%d)\n", inodeID, result); + } + + /* Check and create repairs for doubly linked list */ + result = CheckHardLinkList(gp, inodeID, li->list, li->linkCount, first_link_id); + + linkCount = isdir ? rec.hfsPlusFolder.bsdInfo.special.linkCount : rec.hfsPlusFile.bsdInfo.special.linkCount; + if (linkCount != li->linkCount) { + RecordBadLinkCount(gp, inodeID, linkCount, li->linkCount); + } + + li->flags |= LINKINFO_CHECK; + entries--; + } else { + /* Not found in hash, this is orphaned file/directory inode */ + RecordOrphanInode(gp, isdir, inodeID); + } + } + + if (result == btNotFound) { + result = 0; + } + if (result) { + goto done; + } + + /* Check for orphan hard links */ + if (entries) { + int i, j; + for (i = 0; i < slots; i++) { + /* If node is initialized but never checked, record orphan link */ + if ((linkInfo[i].flags & LINKINFO_INIT) && + ((linkInfo[i].flags & LINKINFO_CHECK) == 0)) { + for (j = 0; j < linkInfo[i].linkCount; j++) { + RecordOrphanLink(gp, isdir, linkInfo[i].list[j].fileID); + } + } + } + } + +done: + if (linkInfo) { + int i; + for (i = 0; i < slots; i++) { + if (linkInfo[i].list) + free(linkInfo[i].list); + } + free(linkInfo); + } + + return result; +} + +/* + * CheckHardLinks + * + * Check indirect node link counts against the indirect + * links that were found. There are 4 possible problems + * that can occur. + * 1. orphaned indirect node (i.e. no links found) + * 2. orphaned indirect link (i.e. indirect node missing) + * 3. incorrect link count + * 4. indirect link id was 0 (i.e. link id wasn't preserved) + */ +int +CheckHardLinks(void *cookie) +{ + struct HardLinkInfo *info = (struct HardLinkInfo *)cookie; + SGlobPtr gp; + UInt32 folderID; + SFCB * fcb; + CatalogRecord rec; + HFSPlusCatalogKey * keyp; + BTreeIterator iterator; + FSBufferDescriptor btrec; + UInt16 reclen; + size_t len; + size_t prefixlen; + int result; + unsigned char filename[64]; + PrimeBuckets *catBucket; + + /* All done if no hard links exist. */ + if (info == NULL) + return (0); + + gp = info->globals; + fsckPrint(gp->context, hfsHardLinkCheck); + + folderID = info->privDirID; + + catBucket = calloc(1, sizeof(PrimeBuckets)); + if (catBucket == NULL) { + if (fsckGetVerbosity(gp->context) >= kDebugLog) { + plog("CheckHardLinks: calloc(1, %zu) failed\n", sizeof(PrimeBuckets)); + } + result = ENOMEM; + goto exit; + } + + + fcb = gp->calculatedCatalogFCB; + prefixlen = strlen(HFS_INODE_PREFIX); + ClearMemory(&iterator, sizeof(iterator)); + keyp = (HFSPlusCatalogKey*)&iterator.key; + btrec.bufferAddress = &rec; + btrec.itemCount = 1; + btrec.itemSize = sizeof(rec); + /* + * position iterator at folder thread record + * (i.e. one record before first child) + */ + ClearMemory(&iterator, sizeof(iterator)); + BuildCatalogKey(folderID, NULL, true, (CatalogKey *)keyp); + result = BTSearchRecord(fcb, &iterator, kInvalidMRUCacheKey, &btrec, + &reclen, &iterator); + if ((result != 0) && (result != btNotFound)) { + goto exit; + } + + /* Visit all the children in private directory. */ + for (;;) { + result = BTIterateRecord(fcb, kBTreeNextRecord, &iterator, + &btrec, &reclen); + if (result || keyp->parentID != folderID) + break; + + if (rec.recordType != kHFSPlusFileRecord) + continue; + + (void) utf_encodestr(keyp->nodeName.unicode, + keyp->nodeName.length * 2, + filename, &len, sizeof(filename)); + filename[len] = '\0'; + + /* Report Orphaned nodes only in debug mode */ + if ((strstr((char *)filename, HFS_DELETE_PREFIX) == (char *)filename) && + (fsckGetVerbosity(gp->context) == kDebugLog)) { + RecordOrphanOpenUnlink(gp, folderID, filename); + continue; + } + + if (strstr((char *)filename, HFS_INODE_PREFIX) != (char *)filename) + continue; + + result = inode_check(gp, catBucket, (CatalogRecord*)&rec, (CatalogKey*)keyp, false); + if (result) { + break; + } + filename[0] = '\0'; + } + + if (result == btNotFound) { + result = 0; + } + + /* + * If we've reached this point, and result is clean, + * then we need to compare the two hard link + * buckets: if they don't match, then we have a hard link chain error, and + * need to either repair it, or just mark the error. + */ + if ((result == 0) && (info->fileBucket != NULL)) { + result = compare_prime_buckets(catBucket, info->fileBucket); + if (result) { + record_link_badchain(gp, false); + if (fsckGetVerbosity(gp->context) >= kDebugLog) { + plog("\tfilelink prime buckets do not match\n"); + } + goto exit; + } + } + + /* If hard links created in pre-Leopard OS were detected, they were + * added to the hash for checking link counts later. Check the + * link counts from the hash. Note that the hard links created in + * pre-Leopard OS do not have kHFSHasLinkChainBit set in the inode + * and the hard links, and the first/prev/next ID is zero --- and + * hence they were ignored from CRT check and added to hash. + */ + if (filelink_entry_count) { + int i; + struct filelink_hash *cur; + + /* Since pre-Leopard OS hard links were detected, they + * should be updated to new version. This is however + * an opportunistic repair and no corruption will be + * reported. This will be performed only if any other + * file hard link repairs are performed. + */ + if (fsckGetVerbosity(gp->context) >= kDebugLog) { + plog("\tCheckHardLinks: found %u pre-Leopard file inodes.\n", filelink_entry_count); + } + + for (i = 0; i < FILELINK_HASH_SIZE; i++) { + cur = filelink_head[i]; + while (cur) { + if ((cur->found_link_count == 0) || + (cur->calc_link_count == 0) || + (cur->found_link_count != cur->calc_link_count)) { + record_link_badchain(gp, false); + goto exit; + } + cur = cur->next; + } + } + } + +exit: + if (filelink_entry_count) { + filelink_hash_destroy(); + } + + if (catBucket) + free(catBucket); + + return (result); +} + +/* + * GetPrivateDir + * + * Get catalog entry for the "HFS+ Private Data" directory. + * The indirect nodes are stored in this directory. + */ +static int +GetPrivateDir(SGlobPtr gp, CatalogRecord * rec) +{ + HFSPlusCatalogKey * keyp; + BTreeIterator iterator; + FSBufferDescriptor btrec; + UInt16 reclen; + int result; + Boolean isHFSPlus; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + if (!isHFSPlus) + return (-1); + + ClearMemory(&iterator, sizeof(iterator)); + keyp = (HFSPlusCatalogKey*)&iterator.key; + + btrec.bufferAddress = rec; + btrec.itemCount = 1; + btrec.itemSize = sizeof(CatalogRecord); + + /* look up record for HFS+ private folder */ + ClearMemory(&iterator, sizeof(iterator)); + CopyMemory(&gMetaDataDirKey, keyp, sizeof(gMetaDataDirKey)); + result = BTSearchRecord(gp->calculatedCatalogFCB, &iterator, + kInvalidMRUCacheKey, &btrec, &reclen, &iterator); + + return (result); +} + +/* + * GetRootDir + * + * Get catalog entry for the Root Folder. + */ +static int +GetRootDir(SGlobPtr gp, CatalogRecord * rec) +{ + CatalogKey key; + uint16_t recSize; + int result; + Boolean isHFSPlus; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + if (!isHFSPlus) + return (-1); + + result = GetCatalogRecordByID(gp, kHFSRootFolderID, isHFSPlus, &key, rec, &recSize); + + return (result); +} + +/* + * RecordOrphanLink + * + * Record a repair to delete an orphaned hard links, i.e. hard links + * that do not have any corresponding inode. + */ +static int +RecordOrphanLink(SGlobPtr gp, Boolean isdir, UInt32 linkID) +{ + RepairOrderPtr p; + + fsckPrint(gp->context, isdir ? E_OrphanDirLink : E_OrphanFileLink, linkID); + + p = AllocMinorRepairOrder(gp, 0); + if (p == NULL) + return ENOMEM; + + p->type = isdir ? E_OrphanDirLink : E_OrphanFileLink; + p->parid = linkID; + + gp->CatStat |= S_LinkErrRepair; + + return 0; +} + +/* + * RecordOrphanInode + * + * Record a repair for orphan inode i.e. inodes that do not have + * any corresponding hard links. + */ +static int +RecordOrphanInode(SGlobPtr gp, Boolean isdir, UInt32 inodeID) +{ + RepairOrderPtr p; + + fsckPrint(gp->context, isdir ? E_OrphanDirInode : E_OrphanFileInode, inodeID); + + p = AllocMinorRepairOrder(gp, 0); + if (p == NULL) + return ENOMEM; + + p->type = isdir ? E_OrphanDirInode : E_OrphanFileInode; + p->parid = inodeID; + + gp->CatStat |= S_LinkErrRepair; + + return 0; +} + +/* + * RecordOrphanOpenUnlink + * + * This is only called when debugging is turned on. Don't + * record an actual error, just print out a message. + */ +static int +RecordOrphanOpenUnlink(SGlobPtr gp, UInt32 parID, unsigned char* filename) +{ + fsckPrint(gp->context, E_UnlinkedFile, filename); + + return (noErr); +} + + +static int +RecordBadHardLinkChainFirst(SGlobPtr gp, UInt32 fileID, UInt32 is, UInt32 shouldbe) +{ + RepairOrderPtr p; + char goodstr[16], badstr[16]; + + fsckPrint(gp->context, E_InvalidLinkChainFirst, fileID); + sprintf(goodstr, "%u", shouldbe); + sprintf(badstr, "%u", is); + fsckPrint(gp->context, E_BadValue, goodstr, badstr); + + p = AllocMinorRepairOrder(gp, 0); + + if (p == NULL) { + return (ENOMEM); + } + + p->type = E_InvalidLinkChainFirst; + p->incorrect = is; + p->correct = shouldbe; + p->hint = 0; + p->parid = fileID; // *Not* the parent ID! + gp->CatStat |= S_LinkErrRepair; + + return (0); +} + + +static int +RecordBadHardLinkPrev(SGlobPtr gp, UInt32 fileID, UInt32 is, UInt32 shouldbe) +{ + RepairOrderPtr p; + char goodstr[16], badstr[16]; + + fsckPrint(gp->context, E_InvalidLinkChainPrev, fileID); + sprintf(goodstr, "%u", shouldbe); + sprintf(badstr, "%u", is); + fsckPrint(gp->context, E_BadValue, goodstr, badstr); + + p = AllocMinorRepairOrder(gp, 0); + if (p == NULL) + return (R_NoMem); + + p->type = E_InvalidLinkChainPrev; + p->incorrect = is; + p->correct = shouldbe; + p->hint = 0; + p->parid = fileID; // *Not* the parent ID + gp->CatStat |= S_LinkCount; + return (0); +} + +static int +RecordBadHardLinkNext(SGlobPtr gp, UInt32 fileID, UInt32 is, UInt32 shouldbe) +{ + RepairOrderPtr p; + char goodstr[16], badstr[16]; + + fsckPrint(gp->context, E_InvalidLinkChainNext, fileID); + + sprintf(goodstr, "%u", shouldbe); + sprintf(badstr, "%u", is); + fsckPrint(gp->context, E_BadValue, goodstr, badstr); + + p = AllocMinorRepairOrder(gp, 0); + if (p == NULL) + return (R_NoMem); + + p->type = E_InvalidLinkChainNext; + p->incorrect = is; + p->correct = shouldbe; + p->hint = 0; + p->parid = fileID; // *Not* the parent ID + gp->CatStat |= S_LinkCount; + return (0); +} + +static int +RecordBadLinkCount(SGlobPtr gp, UInt32 inodeID, UInt32 is, UInt32 shouldbe) +{ + RepairOrderPtr p; + char goodstr[16], badstr[16]; + fsckPrint(gp->context, E_InvalidLinkCount, inodeID); + + sprintf(goodstr, "%u", shouldbe); + sprintf(badstr, "%u", is); + fsckPrint(gp->context, E_BadValue, goodstr, badstr); + + p = AllocMinorRepairOrder(gp, 0); + if (p == NULL) + return (R_NoMem); + + p->type = E_InvalidLinkCount; + p->incorrect = is; + p->correct = shouldbe; + p->hint = 0; + p->parid = inodeID; // *Not* the parent ID + return (0); +} + + +static void +hash_insert(UInt32 linkID, int totalSlots, int slotsUsed, struct IndirectLinkInfo *linkInfo) +{ + int i, last; + + i = linkID & (totalSlots - 1); + + last = (i + (totalSlots-1)) % totalSlots; + while ((i != last) && + (linkInfo[i].flags & LINKINFO_INIT) && + (linkInfo[i].linkID != linkID)) { + i = (i + 1) % totalSlots; + } + + if ((linkInfo[i].flags & LINKINFO_INIT) == 0) { + if (linkInfo[i].list) { + plog ("hash: overwriting data! (old:%u, new:%u)\n", linkInfo[i].linkID, linkID); + exit(13); + } + linkInfo[i].flags |= LINKINFO_INIT; + linkInfo[i].linkID = linkID; + linkInfo[i].linkCount = 1; + } else if (linkInfo[i].linkID == linkID) { + plog("hash: duplicate insert! (%d)\n", linkID); + exit(13); + } else { + plog("hash table full (%d entries) \n", slotsUsed); + exit(14); + } +} + + +static struct IndirectLinkInfo * +hash_search(UInt32 linkID, int totalSlots, int slotsUsed, struct IndirectLinkInfo *linkInfo) +{ + int i, last; + int p = 1; + + + i = linkID & (totalSlots - 1); + + last = (i + (slotsUsed-1)) % totalSlots; + while ((i != last) && + (linkInfo[i].flags & LINKINFO_INIT) && + (linkInfo[i].linkID != linkID)) { + i = (i + 1) % totalSlots; + ++p; + } + + if ((linkInfo[i].flags & LINKINFO_INIT) && + (linkInfo[i].linkID == linkID)) { + return (&linkInfo[i]); + } else { + return (NULL); + } +} diff --git a/fsck_hfs/dfalib/SAllocate.c b/fsck_hfs/dfalib/SAllocate.c new file mode 100644 index 0000000..5f3e8a7 --- /dev/null +++ b/fsck_hfs/dfalib/SAllocate.c @@ -0,0 +1,1561 @@ +/* + * Copyright (c) 1999-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: SAllocate.c + + Contains: Routines for accessing and modifying the volume bitmap. + + Version: HFS Plus 1.0 + + Copyright: © 1996-1999 by Apple Computer, Inc., all rights reserved. + +*/ + +/* +Public routines: + BlockAllocate + Allocate space on a volume. Can allocate space contiguously. + If not contiguous, then allocation may be less than what was + asked for. Returns the starting block number, and number of + blocks. (Will only do a single extent???) + BlockDeallocate + Deallocate a contiguous run of allocation blocks. + +Internal routines: + BlockAllocateAny + Find and allocate a contiguous range of blocks up to a given size. The + first range of contiguous free blocks found are allocated, even if there + are fewer blocks than requested (and even if a contiguous range of blocks + of the given size exists elsewhere). + + BlockMarkFree + Mark a contiguous range of blocks as free. The corresponding + bits in the volume bitmap will be cleared. + BlockMarkAllocated + Mark a contiguous range of blocks as allocated. The cor- + responding bits in the volume bitmap are set. Also tests to see + if any of the blocks were previously unallocated. + FindContiguous + Find a contiguous range of blocks of a given size. The caller + specifies where to begin the search (by block number). The + block number of the first block in the range is returned. + BlockAllocateContig + Find and allocate a contiguous range of blocks of a given size. If + a contiguous range of free blocks of the given size isn't found, then + the allocation fails (i.e. it is "all or nothing"). + ReadBitmapBlock + Given an allocation block number, read the bitmap block that + contains that allocation block into a caller-supplied buffer. +*/ + +#include "Scavenger.h" + + +enum { + kBitsPerByte = 8, + kBitsPerWord = 32, + kBitsWithinWordMask = kBitsPerWord-1 +}; + +#define kBytesPerBlock ( (vcb->vcbSignature == kHFSSigWord) ? kHFSBlockSize : vcb->vcbAllocationFile->fcbBlockSize ) +#define kWordsPerBlock ( kBytesPerBlock / 4 ) +#define kBitsPerBlock ( kBytesPerBlock * kBitsPerByte ) +#define kBitsWithinBlockMask ( kBitsPerBlock - 1 ) +#define kWordsWithinBlockMask ( kWordsPerBlock - 1 ) + +#define kLowBitInWordMask 0x00000001u +#define kHighBitInWordMask 0x80000000u +#define kAllBitsSetInWord 0xFFFFFFFFu + + +static OSErr ReadBitmapBlock( + SVCB *vcb, + UInt32 bit, + BlockDescriptor *block); + +static OSErr ReleaseBitmapBlock( + SVCB *vcb, + OptionBits options, + BlockDescriptor *block); + +static OSErr BlockAllocateContig( + SVCB *vcb, + UInt32 startingBlock, + UInt32 minBlocks, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks); + +static OSErr BlockAllocateAny( + SVCB *vcb, + UInt32 startingBlock, + register UInt32 endingBlock, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks); + +static OSErr BlockFindContiguous( + SVCB *vcb, + UInt32 startingBlock, + UInt32 endingBlock, + UInt32 minBlocks, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks); + +static OSErr BlockMarkAllocated( + SVCB *vcb, + UInt32 startingBlock, + UInt32 numBlocks); + +static OSErr BlockMarkFree( + SVCB *vcb, + UInt32 startingBlock, + UInt32 numBlocks); + +/* +;________________________________________________________________________________ +; +; Routine: BlockAllocate +; +; Function: Allocate space on a volume. If contiguous allocation is requested, +; at least the requested number of bytes will be allocated or an +; error will be returned. If contiguous allocation is not forced, +; the space will be allocated at the first free fragment following +; the requested starting allocation block. If there is not enough +; room there, a block of less than the requested size will be +; allocated. +; +; If the requested starting block is 0 (for new file allocations), +; the volume's allocation block pointer will be used as a starting +; point. +; +; The function uses on-disk volume bitmap for allocation +; and updates it with newly allocated blocks. It also +; updates the in-memory volume bitmap. +; +; Input Arguments: +; vcb - Pointer to SVCB for the volume to allocate space on +; fcb - Pointer to FCB for the file for which storage is being allocated +; startingBlock - Preferred starting allocation block, 0 = no preference +; forceContiguous - Force contiguous flag - if bit 0 set, allocation is contiguous +; or an error is returned +; blocksRequested - Number of allocation blocks requested. If the allocation is +; non-contiguous, less than this may actually be allocated +; blocksMaximum - The maximum number of allocation blocks to allocate. If there +; is additional free space after blocksRequested, then up to +; blocksMaximum blocks should really be allocated. (Used by +; ExtendFileC to round up allocations to a multiple of the +; file's clump size.) +; +; Output: +; (result) - Error code, zero for successful allocation +; *startBlock - Actual starting allocation block +; *actualBlocks - Actual number of allocation blocks allocated +; +; Side effects: +; The volume bitmap is read and updated; the volume bitmap cache may be changed. +; +; Modification history: +;________________________________________________________________________________ +*/ + +OSErr BlockAllocate ( + SVCB *vcb, /* which volume to allocate space on */ + UInt32 startingBlock, /* preferred starting block, or 0 for no preference */ + UInt32 blocksRequested, /* desired number of BYTES to allocate */ + UInt32 blocksMaximum, /* maximum number of bytes to allocate */ + Boolean forceContiguous, /* non-zero to force contiguous allocation and to force */ + /* bytesRequested bytes to actually be allocated */ + UInt32 *actualStartBlock, /* actual first block of allocation */ + UInt32 *actualNumBlocks) /* number of blocks actually allocated; if forceContiguous */ + /* was zero, then this may represent fewer than bytesRequested */ + /* bytes */ +{ + OSErr err; + Boolean updateAllocPtr = false; // true if nextAllocation needs to be updated + + // + // Initialize outputs in case we get an error + // + *actualStartBlock = 0; + *actualNumBlocks = 0; + + // + // If the disk is already full, don't bother. + // + if (vcb->vcbFreeBlocks == 0) { + err = dskFulErr; + goto Exit; + } + if (forceContiguous && vcb->vcbFreeBlocks < blocksRequested) { + err = dskFulErr; + goto Exit; + } + + // + // If caller didn't specify a starting block number, then use the volume's + // next block to allocate from. + // + if (startingBlock == 0) { + startingBlock = vcb->vcbNextAllocation; + updateAllocPtr = true; + } + + // + // If the request must be contiguous, then find a sequence of free blocks + // that is long enough. Otherwise, find the first free block. + // + if (forceContiguous) + err = BlockAllocateContig(vcb, startingBlock, blocksRequested, blocksMaximum, actualStartBlock, actualNumBlocks); + else { + // We'll try to allocate contiguous space first. If that fails, we'll fall back to finding whatever tiny + // extents we can find. It would be nice if we kept track of the largest N free extents so that falling + // back grabbed a small number of large extents. + err = BlockAllocateContig(vcb, startingBlock, blocksRequested, blocksMaximum, actualStartBlock, actualNumBlocks); + if (err == dskFulErr) + err = BlockAllocateAny(vcb, startingBlock, vcb->vcbTotalBlocks, blocksMaximum, actualStartBlock, actualNumBlocks); + if (err == dskFulErr) + err = BlockAllocateAny(vcb, 0, startingBlock, blocksMaximum, actualStartBlock, actualNumBlocks); + } + + if (err == noErr) { + // + // If we used the volume's roving allocation pointer, then we need to update it. + // Adding in the length of the current allocation might reduce the next allocate + // call by avoiding a re-scan of the already allocated space. However, the clump + // just allocated can quite conceivably end up being truncated or released when + // the file is closed or its EOF changed. Leaving the allocation pointer at the + // start of the last allocation will avoid unnecessary fragmentation in this case. + // + if (updateAllocPtr) + vcb->vcbNextAllocation = *actualStartBlock; + + // + // Update the number of free blocks on the volume + // + vcb->vcbFreeBlocks -= *actualNumBlocks; + MarkVCBDirty(vcb); + } + +Exit: + + return err; +} + + + +/* +;________________________________________________________________________________ +; +; Routine: BlockDeallocate +; +; Function: Update the bitmap to deallocate a run of disk allocation blocks +; The on-disk volume bitmap is read and updated; the in-memory volume bitmap +; is also updated. +; +; Input Arguments: +; vcb - Pointer to SVCB for the volume to free space on +; firstBlock - First allocation block to be freed +; numBlocks - Number of allocation blocks to free up (must be > 0!) +; +; Output: +; (result) - Result code +; +; Side effects: +; The on-disk volume bitmap is read and updated; the in-memory volume bitmap +; is also changed. +; +; Modification history: +; +; <06Oct85> PWD Changed to check for error after calls to ReadBM and NextWord +; Now calls NextBit to read successive bits from the bitmap +;________________________________________________________________________________ +*/ + +OSErr BlockDeallocate ( + SVCB *vcb, // Which volume to deallocate space on + UInt32 firstBlock, // First block in range to deallocate + UInt32 numBlocks) // Number of contiguous blocks to deallocate +{ + OSErr err; + + + // + // If no blocks to deallocate, then exit early + // + if (numBlocks == 0) { + err = noErr; + goto Exit; + } + + // + // Call internal routine to free the sequence of blocks + // + err = BlockMarkFree(vcb, firstBlock, numBlocks); + if (err) + goto Exit; + + // + // Update the volume's free block count, and mark the VCB as dirty. + // + vcb->vcbFreeBlocks += numBlocks; + MarkVCBDirty(vcb); + +Exit: + + return err; +} + + +/* +;_______________________________________________________________________ +; +; Routine: DivideAndRoundUp +; +; Function: Divide numerator by denominator, rounding up the result if there +; was a remainder. This is frequently used for computing the number +; of whole and/or partial blocks used by some count of bytes. +;_______________________________________________________________________ +*/ +UInt32 DivideAndRoundUp( + UInt32 numerator, + UInt32 denominator) +{ + UInt32 quotient; + + quotient = numerator / denominator; + if (quotient * denominator != numerator) + quotient++; + + return quotient; +} + + + +/* +;_______________________________________________________________________ +; +; Routine: ReadBitmapBlock +; +; Function: Read in a bitmap block corresponding to a given allocation +; block. Return a pointer to the bitmap block. +; +; Inputs: +; vcb -- Pointer to SVCB +; block -- Allocation block whose bitmap block is desired +; +; Outputs: +; buffer -- Pointer to bitmap block corresonding to "block" +;_______________________________________________________________________ +*/ +static OSErr ReadBitmapBlock( + SVCB *vcb, + UInt32 bit, + BlockDescriptor *block) +{ + OSErr err = noErr; + UInt64 blockNum; + + if (vcb->vcbSignature == kHFSSigWord) { + // + // HFS: Turn block number into physical block offset within the + // bitmap, and then the physical block within the volume. + // + blockNum = bit / kBitsPerBlock; /* block offset within bitmap */ + blockNum += vcb->vcbVBMSt; /* block within whole volume */ + + err = GetVolumeBlock(vcb, blockNum, kGetBlock | kSkipEndianSwap, block); + + } else { + // HFS+: "bit" is the allocation block number that we are looking for + // in the allocation bit map. GetFileBlock wants a file block number + // so we calculate how many bits (kBitsPerBlock) fit in a file + // block then convert that to a file block number (bit / kBitsPerBlock) + // for our call. + err = GetFileBlock( vcb->vcbAllocationFile, (bit / kBitsPerBlock), kGetBlock, block ); + } + + return err; +} + + + +static OSErr ReleaseBitmapBlock( + SVCB *vcb, + OptionBits options, + BlockDescriptor *block) +{ + OSErr err; + + if (vcb->vcbSignature == kHFSSigWord) + err = ReleaseVolumeBlock (vcb, block, options | kSkipEndianSwap); + else + err = ReleaseFileBlock (vcb->vcbAllocationFile, block, options); + + return err; +} + + + +/* +_______________________________________________________________________ + +Routine: BlockAllocateContig + +Function: Allocate a contiguous group of allocation blocks. The + allocation is all-or-nothing. The caller guarantees that + there are enough free blocks (though they may not be + contiguous, in which case this call will fail). + + The function uses on-disk volume bitmap for allocation + and updates it with newly allocated blocks. It also + updates the in-memory volume bitmap. + +Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock Preferred first block for allocation + minBlocks Minimum number of contiguous blocks to allocate + maxBlocks Maximum number of contiguous blocks to allocate + +Outputs: + actualStartBlock First block of range allocated, or 0 if error + actualNumBlocks Number of blocks allocated, or 0 if error +_______________________________________________________________________ +*/ +static OSErr BlockAllocateContig( + SVCB *vcb, + UInt32 startingBlock, + UInt32 minBlocks, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks) +{ + OSErr err; + + // + // Find a contiguous group of blocks at least minBlocks long. + // Determine the number of contiguous blocks available (up + // to maxBlocks). + // + err = BlockFindContiguous(vcb, startingBlock, vcb->vcbTotalBlocks, minBlocks, maxBlocks, + actualStartBlock, actualNumBlocks); + if (err == dskFulErr) { + //¥¥ Should constrain the endingBlock here, so we don't bother looking for ranges + //¥¥ that start after startingBlock, since we already checked those. + err = BlockFindContiguous(vcb, 0, vcb->vcbTotalBlocks, minBlocks, maxBlocks, + actualStartBlock, actualNumBlocks); + } + if (err != noErr) goto Exit; + + // + // Now mark those blocks allocated. + // + err = BlockMarkAllocated(vcb, *actualStartBlock, *actualNumBlocks); + +Exit: + if (err != noErr) { + *actualStartBlock = 0; + *actualNumBlocks = 0; + } + + return err; +} + + + +/* +_______________________________________________________________________ + +Routine: BlockAllocateAny + +Function: Allocate one or more allocation blocks. If there are fewer + free blocks than requested, all free blocks will be + allocated. The caller guarantees that there is at least + one free block. + + The function uses on-disk volume bitmap for allocation + and updates it with newly allocated blocks. It also + updates the in-memory volume bitmap. + +Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock Preferred first block for allocation + endingBlock Last block to check + 1 + maxBlocks Maximum number of contiguous blocks to allocate + +Outputs: + actualStartBlock First block of range allocated, or 0 if error + actualNumBlocks Number of blocks allocated, or 0 if error +_______________________________________________________________________ +*/ +static OSErr BlockAllocateAny( + SVCB *vcb, + UInt32 startingBlock, + register UInt32 endingBlock, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks) +{ + OSErr err; + register UInt32 block = 0; // current block number + register UInt32 currentWord; // Pointer to current word within bitmap block + register UInt32 bitMask; // Word with given bits already set (ready to OR in) + register UInt32 wordsLeft; // Number of words left in this bitmap block + UInt32 *buffer; + BlockDescriptor bd = {0}; + OptionBits relOpt = kReleaseBlock; + + // Since this routine doesn't wrap around + if (maxBlocks > (endingBlock - startingBlock)) { + maxBlocks = endingBlock - startingBlock; + } + + // + // Pre-read the first bitmap block + // + + err = ReadBitmapBlock(vcb, startingBlock, &bd); + if (err != noErr) goto Exit; + relOpt = kMarkBlockDirty; + buffer = (UInt32 *) bd.buffer; + + // + // Set up the current position within the block + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = (startingBlock & kBitsWithinBlockMask) / kBitsPerWord; + buffer += wordIndexInBlock; + wordsLeft = kWordsPerBlock - wordIndexInBlock; + currentWord = SWAP_BE32(*buffer); + bitMask = kHighBitInWordMask >> (startingBlock & kBitsWithinWordMask); + } + + // + // Find the first unallocated block + // + block = startingBlock; + while (block < endingBlock) { + if ((currentWord & bitMask) == 0) + break; + + // Next bit + ++block; + bitMask >>= 1; + if (bitMask == 0) { + // Next word + bitMask = kHighBitInWordMask; + ++buffer; + + if (--wordsLeft == 0) { + // Next block + err = ReleaseBitmapBlock(vcb, relOpt, &bd); + bd.buffer = NULL; + if (err != noErr) goto Exit; + + err = ReadBitmapBlock(vcb, block, &bd); + if (err != noErr) goto Exit; + buffer = (UInt32 *) bd.buffer; + relOpt = kMarkBlockDirty; + + wordsLeft = kWordsPerBlock; + } + currentWord = SWAP_BE32(*buffer); + } + } + + // Did we get to the end of the bitmap before finding a free block? + // If so, then couldn't allocate anything. + if (block == endingBlock) { + err = dskFulErr; + goto Exit; + } + + // Return the first block in the allocated range + *actualStartBlock = block; + + // If we could get the desired number of blocks before hitting endingBlock, + // then adjust endingBlock so we won't keep looking. Ideally, the comparison + // would be (block + maxBlocks) < endingBlock, but that could overflow. The + // comparison below yields identical results, but without overflow. + if (block < (endingBlock-maxBlocks)) { + endingBlock = block + maxBlocks; // if we get this far, we've found enough + } + + // + // Allocate all of the consecutive blocks + // + while ((currentWord & bitMask) == 0) { + // Allocate this block + currentWord |= bitMask; + + // Move to the next block. If no more, then exit. + ++block; + if (block == endingBlock) + break; + + // Next bit + bitMask >>= 1; + if (bitMask == 0) { + *buffer = SWAP_BE32(currentWord); // update value in bitmap + + // Next word + bitMask = kHighBitInWordMask; + ++buffer; + + if (--wordsLeft == 0) { + // Next block + err = ReleaseBitmapBlock(vcb, relOpt, &bd); + if (err != noErr) goto Exit; + bd.buffer = NULL; + + err = ReadBitmapBlock(vcb, block, &bd); + if (err != noErr) goto Exit; + buffer = (UInt32 *) bd.buffer; + relOpt = kMarkBlockDirty; + + wordsLeft = kWordsPerBlock; + } + currentWord = SWAP_BE32(*buffer); + } + } + *buffer = SWAP_BE32(currentWord); // update the last change + +Exit: + if (err == noErr) { + *actualNumBlocks = block - *actualStartBlock; + + /* Update the in-memory copy of bitmap */ + (void) CaptureBitmapBits (*actualStartBlock, *actualNumBlocks); + } + else { + *actualStartBlock = 0; + *actualNumBlocks = 0; + } + + if (bd.buffer != NULL) + (void) ReleaseBitmapBlock(vcb, relOpt, &bd); + + return err; +} + + + +/* +_______________________________________________________________________ + +Routine: BlockMarkAllocated + +Function: Mark a contiguous group of blocks as allocated (set in the + bitmap). The function sets the bit independent of the + previous state (set/clear) of the bit. + + The function uses on-disk volume bitmap for allocation + and updates it with newly allocated blocks. It also + updates the in-memory volume bitmap. + +Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock First block number to mark as allocated + numBlocks Number of blocks to mark as allocated +_______________________________________________________________________ +*/ +static OSErr BlockMarkAllocated( + SVCB *vcb, + UInt32 startingBlock, + register UInt32 numBlocks) +{ + OSErr err; + register UInt32 *currentWord; // Pointer to current word within bitmap block + register UInt32 wordsLeft; // Number of words left in this bitmap block + register UInt32 bitMask; // Word with given bits already set (ready to OR in) + UInt32 firstBit; // Bit index within word of first bit to allocate + UInt32 numBits; // Number of bits in word to allocate + UInt32 *buffer; + BlockDescriptor bd = {0}; + OptionBits relOpt = kReleaseBlock; + + UInt32 saveNumBlocks = numBlocks; + UInt32 saveStartingBlock = startingBlock; + + // + // Pre-read the bitmap block containing the first word of allocation + // + + err = ReadBitmapBlock(vcb, startingBlock, &bd); + if (err != noErr) goto Exit; + buffer = (UInt32 *) bd.buffer; + relOpt = kMarkBlockDirty; + + // + // Initialize currentWord, and wordsLeft. + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = (startingBlock & kBitsWithinBlockMask) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + wordsLeft = kWordsPerBlock - wordIndexInBlock; + } + + // + // If the first block to allocate doesn't start on a word + // boundary in the bitmap, then treat that first word + // specially. + // + + firstBit = startingBlock % kBitsPerWord; + if (firstBit != 0) { + bitMask = kAllBitsSetInWord >> firstBit; // turn off all bits before firstBit + numBits = kBitsPerWord - firstBit; // number of remaining bits in this word + if (numBits > numBlocks) { + numBits = numBlocks; // entire allocation is inside this one word + bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits)); // turn off bits after last + } +#if DEBUG_BUILD + if ((*currentWord & SWAP_BE32(bitMask)) != 0) { + DebugStr("\pFATAL: blocks already allocated!"); + err = fsDSIntErr; + goto Exit; + } +#endif + *currentWord |= SWAP_BE32(bitMask); // set the bits in the bitmap + numBlocks -= numBits; // adjust number of blocks left to allocate + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Allocate whole words (32 blocks) at a time. + // + + bitMask = kAllBitsSetInWord; // put this in a register for 68K + while (numBlocks >= kBitsPerWord) { + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += kBitsPerBlock; // generate a block number in the next bitmap block + + err = ReleaseBitmapBlock(vcb, relOpt, &bd); + if (err != noErr) goto Exit; + bd.buffer = NULL; + + err = ReadBitmapBlock(vcb, startingBlock, &bd); + if (err != noErr) goto Exit; + buffer = (UInt32 *) bd.buffer; + relOpt = kMarkBlockDirty; + + // Readjust currentWord and wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } +#if DEBUG_BUILD + if (*currentWord != 0) { + DebugStr("\pFATAL: blocks already allocated!"); + err = fsDSIntErr; + goto Exit; + } +#endif + *currentWord = SWAP_BE32(bitMask); + numBlocks -= kBitsPerWord; + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Allocate any remaining blocks. + // + + if (numBlocks != 0) { + bitMask = ~(kAllBitsSetInWord >> numBlocks); // set first numBlocks bits + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += kBitsPerBlock; // generate a block number in the next bitmap block + + err = ReleaseBitmapBlock(vcb, relOpt, &bd); + if (err != noErr) goto Exit; + bd.buffer = NULL; + + err = ReadBitmapBlock(vcb, startingBlock, &bd); + if (err != noErr) goto Exit; + buffer = (UInt32 *) bd.buffer; + relOpt = kMarkBlockDirty; + + // Readjust currentWord and wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } +#if DEBUG_BUILD + if ((*currentWord & SWAP_BE32(bitMask)) != 0) { + DebugStr("\pFATAL: blocks already allocated!"); + err = fsDSIntErr; + goto Exit; + } +#endif + *currentWord |= SWAP_BE32(bitMask); // set the bits in the bitmap + + // No need to update currentWord or wordsLeft + } + + /* Update the in-memory copy of the volume bitmap */ + (void) CaptureBitmapBits(saveStartingBlock, saveNumBlocks); + +Exit: + if (bd.buffer != NULL) + (void) ReleaseBitmapBlock(vcb, relOpt, &bd); + + return err; +} + + + +/* +_______________________________________________________________________ + +Routine: BlockMarkFree + +Function: Mark a contiguous group of blocks as free (clear in the + bitmap). The function clears the bit independent of the + previous state (set/clear) of the bit. + + This function uses the on-disk bitmap and also updates + the in-memory bitmap with the deallocated blocks + +Inputs: + vcb Pointer to volume where space is to be freed + startingBlock First block number to mark as freed + numBlocks Number of blocks to mark as freed +_______________________________________________________________________ +*/ +static OSErr BlockMarkFree( + SVCB *vcb, + UInt32 startingBlock, + register UInt32 numBlocks) +{ + OSErr err; + register UInt32 *currentWord; // Pointer to current word within bitmap block + register UInt32 wordsLeft; // Number of words left in this bitmap block + register UInt32 bitMask; // Word with given bits already set (ready to OR in) + UInt32 firstBit; // Bit index within word of first bit to allocate + UInt32 numBits; // Number of bits in word to allocate + UInt32 *buffer; + BlockDescriptor bd = {0}; + OptionBits relOpt = kReleaseBlock; + + UInt32 saveNumBlocks = numBlocks; + UInt32 saveStartingBlock = startingBlock; + + // + // Pre-read the bitmap block containing the first word of allocation + // + + err = ReadBitmapBlock(vcb, startingBlock, &bd); + if (err != noErr) goto Exit; + buffer = (UInt32 *) bd.buffer; + relOpt = kMarkBlockDirty; + + // + // Initialize currentWord, and wordsLeft. + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = (startingBlock & kBitsWithinBlockMask) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + wordsLeft = kWordsPerBlock - wordIndexInBlock; + } + + // + // If the first block to free doesn't start on a word + // boundary in the bitmap, then treat that first word + // specially. + // + + firstBit = startingBlock % kBitsPerWord; + if (firstBit != 0) { + bitMask = kAllBitsSetInWord >> firstBit; // turn off all bits before firstBit + numBits = kBitsPerWord - firstBit; // number of remaining bits in this word + if (numBits > numBlocks) { + numBits = numBlocks; // entire allocation is inside this one word + bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits)); // turn off bits after last + } +#if DEBUG_BUILD + if ((*currentWord & SWAP_BE32(bitMask)) != SWAP_BE32(bitMask)) { + DebugStr("\pFATAL: blocks not allocated!"); + err = fsDSIntErr; + goto Exit; + } +#endif + *currentWord &= SWAP_BE32(~bitMask); // clear the bits in the bitmap + numBlocks -= numBits; // adjust number of blocks left to free + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Allocate whole words (32 blocks) at a time. + // + + while (numBlocks >= kBitsPerWord) { + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += kBitsPerBlock; // generate a block number in the next bitmap block + + err = ReleaseBitmapBlock(vcb, relOpt, &bd); + if (err != noErr) goto Exit; + bd.buffer = NULL; + + err = ReadBitmapBlock(vcb, startingBlock, &bd); + if (err != noErr) goto Exit; + buffer = (UInt32 *) bd.buffer; + relOpt = kMarkBlockDirty; + + // Readjust currentWord and wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } +#if DEBUG_BUILD + if (*currentWord != kAllBitsSetInWord) { + DebugStr("\pFATAL: blocks not allocated!"); + err = fsDSIntErr; + goto Exit; + } +#endif + *currentWord = 0; // clear the entire word + numBlocks -= kBitsPerWord; + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Allocate any remaining blocks. + // + + if (numBlocks != 0) { + bitMask = ~(kAllBitsSetInWord >> numBlocks); // set first numBlocks bits + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += kBitsPerBlock; // generate a block number in the next bitmap block + + err = ReleaseBitmapBlock(vcb, relOpt, &bd); + if (err != noErr) goto Exit; + bd.buffer = NULL; + + err = ReadBitmapBlock(vcb, startingBlock, &bd); + if (err != noErr) goto Exit; + buffer = (UInt32 *) bd.buffer; + relOpt = kMarkBlockDirty; + + // Readjust currentWord and wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } +#if DEBUG_BUILD + if ((*currentWord & SWAP_BE32(bitMask)) != SWAP_BE32(bitMask)) { + DebugStr("\pFATAL: blocks not allocated!"); + err = fsDSIntErr; + goto Exit; + } +#endif + *currentWord &= SWAP_BE32(~bitMask); // clear the bits in the bitmap + + // No need to update currentWord or wordsLeft + } + + /* Update the in-memory copy of the volume bitmap */ + (void) ReleaseBitmapBits(saveStartingBlock, saveNumBlocks); + +Exit: + if (bd.buffer != NULL) + (void) ReleaseBitmapBlock(vcb, relOpt, &bd); + + return err; +} + + +/* +_______________________________________________________________________ + +Routine: BlockFindContiguous + +Function: Find a contiguous range of blocks that are free (bits + clear in the bitmap). If a contiguous range of the + minimum size can't be found, an error will be returned. + + ¥¥ It would be nice if we could skip over whole words + ¥¥ with all bits set. + + ¥¥ When we find a bit set, and are about to set freeBlocks + ¥¥ to 0, we should check to see whether there are still + ¥¥ minBlocks bits left in the bitmap. + +Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock Preferred first block of range + endingBlock Last possible block in range + 1 + minBlocks Minimum number of blocks needed. Must be > 0. + maxBlocks Maximum (ideal) number of blocks desired + +Outputs: + actualStartBlock First block of range found, or 0 if error + actualNumBlocks Number of blocks found, or 0 if error +_______________________________________________________________________ +*/ +/* +_________________________________________________________________________________________ + (DSH) 5/8/97 Description of BlockFindContiguous() algorithm + Finds a contiguous range of free blocks by searching back to front. This + allows us to skip ranges of bits knowing that they are not candidates for + a match because they are too small. The below ascii diagrams illustrate + the algorithm in action. + + Representation of a piece of a volume bitmap file + If BlockFindContiguous() is called with minBlocks == 10, maxBlocks == 20 + + +Fig. 1 initialization of variables, "<--" represents direction of travel + +startingBlock (passed in) + | + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + | <--| +stopBlock currentBlock freeBlocks == 0 + countedFreeBlocks == 0 + +Fig. 2 dirty bit found + + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + | | +stopBlock currentBlock freeBlocks == 3 + countedFreeBlocks == 0 + +Fig. 3 reset variables to search for remainder of minBlocks + + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + |_________________| | | + Unsearched stopBlock currentBlock freeBlocks == 0 + countedFreeBlocks == 3 + +Fig. 4 minBlocks contiguous blocks found, *actualStartBlock is set + + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + |_________________| | + Unsearched stopBlock freeBlocks == 7 + currentBlock countedFreeBlocks == 3 + +Fig. 5 Now run it forwards trying to accumalate up to maxBlocks if possible + + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + |_________________| | --> + Unsearched currentBlock + *actualNumBlocks == 10 + +Fig. 6 Dirty bit is found, return actual number of contiguous blocks found + + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + |_________________| | + Unsearched currentBlock + *actualNumBlocks == 16 +_________________________________________________________________________________________ +*/ +static OSErr BlockFindContiguous( + SVCB *vcb, + UInt32 startingBlock, + register UInt32 endingBlock, + UInt32 minBlocks, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks) +{ + OSErr err; + register UInt32 bitMask; // mask of bit within word for currentBlock + register UInt32 tempWord; // bitmap word currently being examined + register UInt32 freeBlocks; // number of contiguous free blocks so far + register UInt32 currentBlock; // block number we're currently examining + UInt32 wordsLeft; // words remaining in bitmap block + UInt32 *buffer = NULL; + register UInt32 *currentWord; + + UInt32 stopBlock; // when all blocks until stopBlock are free, we found enough + UInt32 countedFreeBlocks; // how many contiguous free block behind stopBlock + UInt32 currentSector; // which allocations file sector + BlockDescriptor bd = {0}; + + if ((endingBlock - startingBlock) < minBlocks) { + // The set of blocks we're checking is smaller than the minimum number + // of blocks, so we couldn't possibly find a good range. + err = dskFulErr; + goto Exit; + } + + // Search for min blocks from back to front. + // If min blocks is found, advance the allocation pointer up to max blocks + + // + // Pre-read the bitmap block containing currentBlock + // + stopBlock = startingBlock; + currentBlock = startingBlock + minBlocks - 1; // (-1) to include startingBlock + + err = ReadBitmapBlock(vcb, currentBlock, &bd); + if ( err != noErr ) goto Exit; + buffer = (UInt32 *) bd.buffer; + // + // Init buffer, currentWord, wordsLeft, and bitMask + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = ( currentBlock & kBitsWithinBlockMask ) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + + wordsLeft = wordIndexInBlock; + tempWord = SWAP_BE32(*currentWord); + bitMask = kHighBitInWordMask >> ( currentBlock & kBitsWithinWordMask ); + currentSector = currentBlock / kBitsPerBlock; + } + + // + // Look for maxBlocks free blocks. If we find an allocated block, + // see if we've found minBlocks. + // + freeBlocks = 0; + countedFreeBlocks = 0; + + while ( currentBlock >= stopBlock ) + { + // Check current bit + if ((tempWord & bitMask) == 0) + { + ++freeBlocks; + } + else // Used bitmap block found + { + if ( ( freeBlocks + countedFreeBlocks ) >= minBlocks ) + { + break; // Found enough + } + else + { + // We found a dirty bit, so we want to check if the next (minBlocks-freeBlocks) blocks + // are free beyond what we have already checked. At Fig.2 setting up for Fig.3 + + stopBlock = currentBlock + 1 + freeBlocks; // Advance stop condition + currentBlock += minBlocks; + if ( currentBlock >= endingBlock ) break; + countedFreeBlocks = freeBlocks; + freeBlocks = 0; // Not enough; look for another range + + if ( currentSector != currentBlock / kBitsPerBlock ) + { + err = ReleaseBitmapBlock(vcb, kReleaseBlock, &bd); + if (err != noErr) goto Exit; + bd.buffer = NULL; + + err = ReadBitmapBlock( vcb, currentBlock, &bd ); + if (err != noErr) goto Exit; + buffer = (UInt32 *) bd.buffer; + currentSector = currentBlock / kBitsPerBlock; + } + + wordsLeft = ( currentBlock & kBitsWithinBlockMask ) / kBitsPerWord; + currentWord = buffer + wordsLeft; + tempWord = SWAP_BE32(*currentWord); + bitMask = kHighBitInWordMask >> ( currentBlock & kBitsWithinWordMask ); + + continue; // Back to the while loop + } + } + + // Move to next bit + --currentBlock; + bitMask <<= 1; + if (bitMask == 0) // On a word boundry, start masking words + { + bitMask = kLowBitInWordMask; + + // Move to next word +NextWord: + if ( wordsLeft != 0 ) + { + --currentWord; + --wordsLeft; + } + else + { + // Read in the next bitmap block + err = ReleaseBitmapBlock(vcb, kReleaseBlock, &bd); + if (err != noErr) goto Exit; + bd.buffer = NULL; + + err = ReadBitmapBlock( vcb, currentBlock, &bd ); + if (err != noErr) goto Exit; + buffer = (UInt32 *) bd.buffer; + // Adjust currentWord, wordsLeft, currentSector + currentSector = currentBlock / kBitsPerBlock; + currentWord = buffer + kWordsPerBlock - 1; // Last word in buffer + wordsLeft = kWordsPerBlock - 1; + } + + tempWord = SWAP_BE32(*currentWord); // Grab the current word + + // + // If we found a whole word of free blocks, quickly skip over it. + // NOTE: we could actually go beyond the end of the bitmap if the + // number of allocation blocks on the volume is not a multiple of + // 32. If this happens, we'll adjust currentBlock and freeBlocks + // after the loop. + // + if ( tempWord == 0 ) + { + freeBlocks += kBitsPerWord; + currentBlock -= kBitsPerWord; + if ( freeBlocks + countedFreeBlocks >= minBlocks ) + break; // Found enough + goto NextWord; + } + } + } + + if ( freeBlocks + countedFreeBlocks < minBlocks ) + { + *actualStartBlock = 0; + *actualNumBlocks = 0; + err = dskFulErr; + goto Exit; + } + + // + // When we get here, we know we've found minBlocks continuous space. + // At Fig.4, setting up for Fig.5 + // From here we do a forward search accumalating additional free blocks. + // + + *actualNumBlocks = minBlocks; + *actualStartBlock = stopBlock - countedFreeBlocks; // ActualStartBlock is set to return to the user + currentBlock = *actualStartBlock + minBlocks; // Right after found free space + + // Now lets see if we can run the actualNumBlocks number all the way up to maxBlocks + if ( currentSector != currentBlock / kBitsPerBlock ) + { + err = ReleaseBitmapBlock(vcb, kReleaseBlock, &bd); + if (err != noErr) goto Exit; + bd.buffer = NULL; + + err = ReadBitmapBlock( vcb, currentBlock, &bd ); + if (err != noErr) + { + err = noErr; // We already found the space + goto Exit; + } + buffer = (UInt32 *) bd.buffer; + currentSector = currentBlock / kBitsPerBlock; + } + + // + // Init buffer, currentWord, wordsLeft, and bitMask + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = (currentBlock & kBitsWithinBlockMask) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + tempWord = SWAP_BE32(*currentWord); + wordsLeft = kWordsPerBlock - wordIndexInBlock; + bitMask = kHighBitInWordMask >> (currentBlock & kBitsWithinWordMask); + } + + if ( *actualNumBlocks < maxBlocks ) + { + while ( currentBlock < endingBlock ) + { + + if ( (tempWord & bitMask) == 0 ) + { + *actualNumBlocks += 1; + + if ( *actualNumBlocks == maxBlocks ) + break; + } + else + { + break; + } + + // Move to next bit + ++currentBlock; + bitMask >>= 1; + if (bitMask == 0) + { + bitMask = kHighBitInWordMask; + ++currentWord; + + if ( --wordsLeft == 0) + { + err = ReleaseBitmapBlock(vcb, kReleaseBlock, &bd); + if (err != noErr) goto Exit; + bd.buffer = NULL; + + err = ReadBitmapBlock(vcb, currentBlock, &bd); + if (err != noErr) break; + buffer = (UInt32 *) bd.buffer; + + // Adjust currentWord, wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } + tempWord = SWAP_BE32(*currentWord); // grab the current word + } + } + } + +Exit: + + if (bd.buffer != NULL) + (void) ReleaseBitmapBlock(vcb, kReleaseBlock, &bd); + + return err; +} + +/* + * Find the smallest extent in the array. + */ +static int +FindMinExt(HFSPlusExtentDescriptor *exts) +{ + int minIndx = -1; + UInt32 min = (UInt32)-1; + int i; + + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (exts[i].blockCount < min) { + min = exts[i].blockCount; + minIndx = i; + } + } + return minIndx; +} + +/* + * Truncate any excess extents. There should be only one, + * but we'll go through them all to make sure. + */ +static void +PruneExtents(HFSPlusExtentDescriptor *exts, UInt32 needed) +{ + int i; + UInt32 total = 0; + UInt32 excess = 0; + + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (excess) { + exts[i].startBlock = exts[i].blockCount = 0; + continue; + } + total += exts[i].blockCount; + if (total > needed) { + exts[i].blockCount -= total - needed; + excess = 1; + } + } + return; +} + +/* + * A much more specialized function: simply find the 8 largest extents + * to hold the needed size. It will either find enough blocks to + * fit the needed size, or it will fail. + */ +OSErr +BlockFindAll( + SFCB *fcb, + UInt32 needed) +{ + OSErr err; + SVCB *vcb; + register UInt32 bitMask; // mask of bit within word for currentBlock + register UInt32 tempWord; // bitmap word currently being examined + HFSPlusExtentDescriptor *exts = fcb->fcbExtents32; + int minIndx; + UInt32 total = 0; + + UInt32 firstFreeBlock; + UInt32 freeBlocks = 0; + UInt32 currentBlock; + UInt32 endingBlock; + UInt32 wordsLeft; // words remaining in bitmap block + UInt32 *buffer = NULL; + UInt32 contigSize = 1; + register UInt32 *currentWord; + struct BlockDescriptor bd = { 0 }; + + vcb = fcb->fcbVolume; + + if (vcb->vcbFreeBlocks < needed) { + // Nothing to do + if (debug) + plog("%s: %u blocks free, but need %u; ignoring for now\n", __FUNCTION__, vcb->vcbFreeBlocks, needed); + } + + memset(exts, 0, sizeof(fcb->fcbExtents32)); // Zero out the extents. + minIndx = 0; + if (vcb->vcbBlockSize < fcb->fcbBlockSize) { + contigSize = fcb->fcbBlockSize / vcb->vcbBlockSize; // Number of volume blocks in a btree block + } + + currentBlock = 0; + endingBlock = vcb->vcbTotalBlocks; + + freeBlocks = 0; + + err = ReadBitmapBlock(vcb, currentBlock, &bd); + if ( err != noErr ) goto done; + buffer = (UInt32 *) bd.buffer; + // + // Init buffer, currentWord, wordsLeft, and bitMask + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = ( currentBlock & kBitsWithinBlockMask ) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + + wordsLeft = kWordsPerBlock - wordIndexInBlock - 1; + tempWord = SWAP_BE32(*currentWord); + bitMask = kHighBitInWordMask >> ( currentBlock & kBitsWithinWordMask ); + } + + /* + * This macro is used to cycle through the allocation bitmap. + * We examine one bit in a word at a time; when we're done with that word, + * we go to the next word in the block; when we're done with that block, + * we get the next one. Until we're out of blocks. + */ +#define nextblock() do { \ + currentBlock++; \ + if (currentBlock == endingBlock) goto done; \ + bitMask >>= 1; \ + if (bitMask == 0) { \ + bitMask = kHighBitInWordMask; \ + if (wordsLeft != 0) { \ + ++currentWord; \ + --wordsLeft; \ + } else { \ + err = ReleaseBitmapBlock(vcb, kReleaseBlock, &bd); \ + if (err != noErr) goto done; \ + bd.buffer = NULL; \ + err = ReadBitmapBlock(vcb, currentBlock, &bd); \ + if (err != noErr) goto done; \ + buffer = (UInt32*)bd.buffer; \ + currentWord = buffer + ((currentBlock & kBitsWithinBlockMask) / kBitsPerWord); \ + wordsLeft = kWordsPerBlock - 1; \ + } \ + tempWord = SWAP_BE32(*currentWord); \ + } \ + } while (0) + +loop: + + /* + * We have two while loops here. The first one, at the top, looks for + * used blocks. We ignore those. The second while loop then looks + * for empty blocks, and keeps track of the length of the run. It creates + * an extent from these, and puts them into the exts array. We use + * the funciton FindMinExt() to find the smallest one in the array, and + * we only replace it if the new extent is larger. (When first starting, + * all of the extents will be 0 bytes long.) + * + * We stop when we've run out of blocks (the nextblock macro will jump + * to done at that point), or when we've got enough total blocks to + * fit our needs. + */ + freeBlocks = 0; + while ((tempWord & bitMask) != 0) { + nextblock(); + } + firstFreeBlock = currentBlock; + while ((tempWord & bitMask) == 0) { + ++freeBlocks; + if (freeBlocks >= needed) + break; + nextblock(); + } + + /* + * We need to ensure that nodes are not split across + * volume blocks -- journaling will cause an error + * if this happens. + */ + freeBlocks -= freeBlocks % contigSize; + + if (freeBlocks > exts[minIndx].blockCount) { + total -= exts[minIndx].blockCount; + exts[minIndx].blockCount = freeBlocks; + exts[minIndx].startBlock = firstFreeBlock; + total += freeBlocks; + minIndx = FindMinExt(exts); + } + + if (total >= needed) { + goto done; + } + + goto loop; + +done: + if (bd.buffer) { + (void)ReleaseBitmapBlock(vcb, kReleaseBlock, &bd); + } + if (err == noErr) { + + if (total < needed) { + if (debug) + plog("%s: found %u blocks but needed %u\n", __FUNCTION__, total, needed); + err = dskFulErr; + } else { + /* + * If we've got enough blocks, we need to prune any extra. + * PruneExtents() will decrement the extents in the array to + * ensure we have only as much as we need. After that, we + * mark them as allocated, and return. + */ + int i; + if (total > needed) { + PruneExtents(exts, needed); + } + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (exts[i].blockCount) { + BlockMarkAllocated(vcb, exts[i].startBlock, exts[i].blockCount); + vcb->vcbFreeBlocks -= exts[i].blockCount; + } + } + MarkVCBDirty(vcb); + } + } + return err; +} diff --git a/fsck_hfs/dfalib/SBTree.c b/fsck_hfs/dfalib/SBTree.c new file mode 100644 index 0000000..517cddc --- /dev/null +++ b/fsck_hfs/dfalib/SBTree.c @@ -0,0 +1,621 @@ +/* + * Copyright (c) 1999, 2002, 2006 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: SBTree.c + + Contains: Scavanger B-tree callback procs + + Version: HFS Plus 1.0 + + Copyright: © 1996-1999 by Apple Computer, Inc., all rights reserved. + +*/ + + +#include "BTree.h" +#include "BTreePrivate.h" +#include "Scavenger.h" + + + +// local routines +static void InvalidateBTreeIterator ( SFCB *fcb ); +static OSErr CheckBTreeKey(const BTreeKey *key, const BTreeControlBlock *btcb); +static Boolean ValidHFSRecord(const void *record, const BTreeControlBlock *btcb, UInt16 recordSize); + + +// This function determines the size of the output buffer depending on +// the type of the BTree. It should really be taking the buffer size +// as input instead of assuming it (4425231). +// +// This function may also truncate inline attribute record because caller +// sends HFSPlusAttrRecord as output buffer and this function also assumes +// the output buffer of size HFSPlusAttrRecord. It may therefore not be +// enough to copy entire inline attribute record (4425232). + +OSErr SearchBTreeRecord(SFCB *fcb, const void* key, UInt32 hint, void* foundKey, void* data, UInt16 *dataSize, UInt32 *newHint) +{ + FSBufferDescriptor btRecord; + BTreeIterator searchIterator; + BTreeIterator *resultIterator; + BTreeControlBlock *btcb; + OSStatus result; + + + btcb = (BTreeControlBlock*) fcb->fcbBtree; + + btRecord.bufferAddress = data; + btRecord.itemCount = 1; + if ( btcb->maxKeyLength == kHFSExtentKeyMaximumLength ) + btRecord.itemSize = sizeof(HFSExtentRecord); + else if ( btcb->maxKeyLength == kHFSPlusExtentKeyMaximumLength ) + btRecord.itemSize = sizeof(HFSPlusExtentRecord); + else if ( btcb->maxKeyLength == kHFSPlusAttrKeyMaximumLength ) + btRecord.itemSize = sizeof(HFSPlusAttrRecord); + else + btRecord.itemSize = sizeof(CatalogRecord); + + searchIterator.hint.writeCount = 0; // clear these out for debugging... + searchIterator.hint.reserved1 = 0; + searchIterator.hint.reserved2 = 0; + + searchIterator.hint.nodeNum = hint; + searchIterator.hint.index = 0; + + result = CheckBTreeKey((BTreeKey *) key, btcb); + ExitOnError(result); + + CopyMemory(key, &searchIterator.key, CalcKeySize(btcb, (BTreeKey *) key)); //¥¥ should we range check against maxkeylen? + + resultIterator = &btcb->lastIterator; + + result = BTSearchRecord( fcb, &searchIterator, kInvalidMRUCacheKey, &btRecord, dataSize, resultIterator ); + if (result == noErr) + { + if (newHint != NULL) + *newHint = resultIterator->hint.nodeNum; + + result = CheckBTreeKey(&resultIterator->key, btcb); + ExitOnError(result); + + if (foundKey != NULL) + CopyMemory(&resultIterator->key, foundKey, CalcKeySize(btcb, &resultIterator->key)); //¥¥ warning, this could overflow user's buffer!!! + + if ( DEBUG_BUILD && !ValidHFSRecord(data, btcb, *dataSize) ) + DebugStr("\pSearchBTreeRecord: bad record?"); + } + +ErrorExit: + + return result; +} + + + +// Note +// The new B-tree manager differs from the original b-tree in how it does iteration. We need +// to account for these differences here. We save an iterator in the BTree control block so +// that we have a context in which to perfrom the iteration. Also note that the old B-tree +// allowed you to specify any number relative to the last operation (including 0) whereas the +// new B-tree only allows next/previous. +// +// This function determines the size of the output buffer depending on +// the type of the BTree. It should really be taking the buffer size +// as input instead of assuming it (4425231). +// +// This function may also truncate inline attribute record because caller +// sends HFSPlusAttrRecord as output buffer and this function also assumes +// the output buffer of size HFSPlusAttrRecord. It may therefore not be +// enough to copy entire inline attribute record (4425232). + +OSErr GetBTreeRecord(SFCB *fcb, SInt16 selectionIndex, void* key, void* data, UInt16 *dataSize, UInt32 *newHint) +{ + FSBufferDescriptor btRecord; + BTreeIterator *iterator; + BTreeControlBlock *btcb; + OSStatus result; + UInt16 operation; + + + // pick up our iterator in the BTCB for context... + + btcb = (BTreeControlBlock*) fcb->fcbBtree; + iterator = &btcb->lastIterator; + + btRecord.bufferAddress = data; + btRecord.itemCount = 1; + if ( btcb->maxKeyLength == kHFSExtentKeyMaximumLength ) + btRecord.itemSize = sizeof(HFSExtentRecord); + else if ( btcb->maxKeyLength == kHFSPlusExtentKeyMaximumLength ) + btRecord.itemSize = sizeof(HFSPlusExtentRecord); + else if ( btcb->maxKeyLength == kHFSPlusAttrKeyMaximumLength ) + btRecord.itemSize = sizeof(HFSPlusAttrRecord); + else + btRecord.itemSize = sizeof(CatalogRecord); + + // now we have to map index into next/prev operations... + + if (selectionIndex == 1) + { + operation = kBTreeNextRecord; + } + else if (selectionIndex == -1) + { + operation = kBTreePrevRecord; + } + else if (selectionIndex == 0) + { + operation = kBTreeCurrentRecord; + } + else if (selectionIndex == (SInt16) 0x8001) + { + operation = kBTreeFirstRecord; + } + else if (selectionIndex == (SInt16) 0x7FFF) + { + operation = kBTreeLastRecord; + } + else if (selectionIndex > 1) + { + UInt32 i; + + for (i = 1; i < selectionIndex; ++i) + { + result = BTIterateRecord( fcb, kBTreeNextRecord, iterator, &btRecord, dataSize ); + ExitOnError(result); + } + operation = kBTreeNextRecord; + } + else // (selectionIndex < -1) + { + SInt32 i; + + for (i = -1; i > selectionIndex; --i) + { + result = BTIterateRecord( fcb, kBTreePrevRecord, iterator, &btRecord, dataSize ); + ExitOnError(result); + } + operation = kBTreePrevRecord; + } + + result = BTIterateRecord( fcb, operation, iterator, &btRecord, dataSize ); + + if (result == noErr) + { + *newHint = iterator->hint.nodeNum; + + result = CheckBTreeKey(&iterator->key, btcb); + ExitOnError(result); + + CopyMemory(&iterator->key, key, CalcKeySize(btcb, &iterator->key)); //¥¥ warning, this could overflow user's buffer!!! + + if ( DEBUG_BUILD && !ValidHFSRecord(data, btcb, *dataSize) ) + DebugStr("\pGetBTreeRecord: bad record?"); + + } + +ErrorExit: + + return result; +} + + +OSErr InsertBTreeRecord(SFCB *fcb, const void* key, const void* data, UInt16 dataSize, UInt32 *newHint) +{ + FSBufferDescriptor btRecord; + BTreeIterator iterator; + BTreeControlBlock *btcb; + OSStatus result; + + + btcb = (BTreeControlBlock*) fcb->fcbBtree; + + btRecord.bufferAddress = (void *)data; + btRecord.itemSize = dataSize; + btRecord.itemCount = 1; + + iterator.hint.nodeNum = 0; // no hint + + result = CheckBTreeKey((BTreeKey *) key, btcb); + ExitOnError(result); + + CopyMemory(key, &iterator.key, CalcKeySize(btcb, (BTreeKey *) key)); //¥¥ should we range check against maxkeylen? + + if ( DEBUG_BUILD && !ValidHFSRecord(data, btcb, dataSize) ) + DebugStr("\pInsertBTreeRecord: bad record?"); + + result = BTInsertRecord( fcb, &iterator, &btRecord, dataSize ); + + *newHint = iterator.hint.nodeNum; + + InvalidateBTreeIterator(fcb); // invalidate current record markers + +ErrorExit: + + return result; +} + + +OSErr DeleteBTreeRecord(SFCB *fcb, const void* key) +{ + BTreeIterator iterator; + BTreeControlBlock *btcb; + OSStatus result; + + + btcb = (BTreeControlBlock*) fcb->fcbBtree; + + iterator.hint.nodeNum = 0; // no hint + + result = CheckBTreeKey((BTreeKey *) key, btcb); + ExitOnError(result); + + CopyMemory(key, &iterator.key, CalcKeySize(btcb, (BTreeKey *) key)); //¥¥ should we range check against maxkeylen? + + result = BTDeleteRecord( fcb, &iterator ); + + InvalidateBTreeIterator(fcb); // invalidate current record markers + +ErrorExit: + + return result; +} + + +OSErr ReplaceBTreeRecord(SFCB *fcb, const void* key, UInt32 hint, void *newData, UInt16 dataSize, UInt32 *newHint) +{ + FSBufferDescriptor btRecord; + BTreeIterator iterator; + BTreeControlBlock *btcb; + OSStatus result; + + + btcb = (BTreeControlBlock*) fcb->fcbBtree; + + btRecord.bufferAddress = newData; + btRecord.itemSize = dataSize; + btRecord.itemCount = 1; + + iterator.hint.nodeNum = hint; + + result = CheckBTreeKey((BTreeKey *) key, btcb); + ExitOnError(result); + + CopyMemory(key, &iterator.key, CalcKeySize(btcb, (BTreeKey *) key)); //¥¥ should we range check against maxkeylen? + + if ( DEBUG_BUILD && !ValidHFSRecord(newData, btcb, dataSize) ) + DebugStr("\pReplaceBTreeRecord: bad record?"); + + result = BTReplaceRecord( fcb, &iterator, &btRecord, dataSize ); + + *newHint = iterator.hint.nodeNum; + + //¥¥Êdo we need to invalidate the iterator? + +ErrorExit: + + return result; +} + + +OSStatus +SetEndOfForkProc ( SFCB *filePtr, FSSize minEOF, FSSize maxEOF ) +{ +#pragma unused (maxEOF) + + OSStatus result; + UInt32 actualSectorsAdded; + UInt64 bytesToAdd; + UInt64 fileSize; // in sectors + SVCB * vcb; + UInt32 flags; + + + if ( minEOF > filePtr->fcbLogicalSize ) + { + bytesToAdd = minEOF - filePtr->fcbLogicalSize; + + if (bytesToAdd < filePtr->fcbClumpSize) + bytesToAdd = filePtr->fcbClumpSize; //¥¥Êwhy not always be a mutiple of clump size ??? + } + else + { + if ( DEBUG_BUILD ) + DebugStr("\pSetEndOfForkProc: minEOF is smaller than current size!"); + return -1; + } + + vcb = filePtr->fcbVolume; + + flags = kEFNoClumpMask; + + // Due to time contraints we force the new rebuilt catalog file to be contiguous. + // It's hard to handle catalog file in extents because we have to do a swap + // of the old catalog file with the rebuilt catalog file at the end of + // the rebuild process. Extent records use the file ID as part of the key so + // it would be messy to fix them after the swap. + if ( filePtr->fcbFileID == kHFSRepairCatalogFileID) + flags |= kEFNoExtOvflwMask; + + result = ExtendFileC ( vcb, filePtr, (bytesToAdd+511)>>9, flags, &actualSectorsAdded ); + ReturnIfError(result); + + filePtr->fcbLogicalSize = filePtr->fcbPhysicalSize; // new B-tree looks at fcbEOF + fileSize = filePtr->fcbLogicalSize >> 9; // get size in sectors (for calls to ZeroFileBlocks) + + // + // Make sure we got at least as much space as we needed + // + if (filePtr->fcbLogicalSize < minEOF) { + Panic("\pSetEndOfForkProc: disk too full to extend B-tree file"); + return dskFulErr; + } + + // + // Update the Alternate MDB or Alternate HFSPlusVolumeHeader + // + if ( vcb->vcbSignature == kHFSPlusSigWord ) + { + // If any of the HFS+ private files change size, flush them back to the Alternate volume header + if ( (filePtr->fcbFileID == kHFSExtentsFileID) + || (filePtr->fcbFileID == kHFSCatalogFileID) + || (filePtr->fcbFileID == kHFSStartupFileID) + || (filePtr->fcbFileID == kHFSAttributesFileID) + || (filePtr->fcbFileID == kHFSRepairCatalogFileID) ) + { + MarkVCBDirty( vcb ); + result = FlushAlternateVolumeControlBlock( vcb, true ); + + // Zero newly allocated portion of HFS+ private file. + if ( result == noErr ) + result = ZeroFileBlocks( vcb, filePtr, fileSize - actualSectorsAdded, actualSectorsAdded ); + } + } + else if ( vcb->vcbSignature == kHFSSigWord ) + { + if ( filePtr->fcbFileID == kHFSExtentsFileID ) + { + // vcb->vcbXTAlBlks = filePtr->fcbPhysicalSize / vcb->vcbBlockSize; + MarkVCBDirty( vcb ); + result = FlushAlternateVolumeControlBlock( vcb, false ); + if ( result == noErr ) + result = ZeroFileBlocks( vcb, filePtr, fileSize - actualSectorsAdded, actualSectorsAdded ); + } + else if ( filePtr->fcbFileID == kHFSCatalogFileID || filePtr->fcbFileID == kHFSRepairCatalogFileID ) + { + // vcb->vcbCTAlBlks = filePtr->fcbPhysicalSize / vcb->vcbBlockSize; + MarkVCBDirty( vcb ); + result = FlushAlternateVolumeControlBlock( vcb, false ); + if ( result == noErr ) + result = ZeroFileBlocks( vcb, filePtr, fileSize - actualSectorsAdded, actualSectorsAdded ); + } + } + + return result; + +} // end SetEndOfForkProc + + +static void +InvalidateBTreeIterator(SFCB *fcb) +{ + BTreeControlBlock *btcb; + + btcb = (BTreeControlBlock*) fcb->fcbBtree; + + (void) BTInvalidateHint ( &btcb->lastIterator ); +} + + +static OSErr CheckBTreeKey(const BTreeKey *key, const BTreeControlBlock *btcb) +{ + UInt16 keyLen; + + if ( btcb->attributes & kBTBigKeysMask ) + keyLen = key->length16; + else + keyLen = key->length8; + + if ( (keyLen < 6) || (keyLen > btcb->maxKeyLength) ) + { + if ( DEBUG_BUILD ) + DebugStr("\pCheckBTreeKey: bad key length!"); + return fsBTInvalidKeyLengthErr; + } + + return noErr; +} + + +static Boolean ValidHFSRecord(const void *record, const BTreeControlBlock *btcb, UInt16 recordSize) +{ + UInt32 cNodeID; + + if ( btcb->maxKeyLength == kHFSExtentKeyMaximumLength ) + { + return ( recordSize == sizeof(HFSExtentRecord) ); + } + else if (btcb->maxKeyLength == kHFSPlusExtentKeyMaximumLength ) + { + return ( recordSize == sizeof(HFSPlusExtentRecord) ); + } + else if (btcb->maxKeyLength == kAttributeKeyMaximumLength ) + { + HFSPlusAttrRecord *attributeRecord = (HFSPlusAttrRecord *) record; + + switch (attributeRecord->recordType) { + case kHFSPlusAttrInlineData: + break; + + case kHFSPlusAttrForkData: + break; + + case kHFSPlusAttrExtents: + break; + } + } + else // Catalog record + { + CatalogRecord *catalogRecord = (CatalogRecord*) record; + + switch(catalogRecord->recordType) + { + case kHFSFolderRecord: + { + if ( recordSize != sizeof(HFSCatalogFolder) ) + return false; + if ( catalogRecord->hfsFolder.flags != 0 ) + return false; + if ( catalogRecord->hfsFolder.valence > 0x7FFF ) + return false; + + cNodeID = catalogRecord->hfsFolder.folderID; + + if ( (cNodeID == 0) || (cNodeID < 16 && cNodeID > 2) ) + return false; + } + break; + + case kHFSPlusFolderRecord: + { + if ( recordSize != sizeof(HFSPlusCatalogFolder) ) + return false; + if ( (catalogRecord->hfsPlusFolder.flags & (kHFSFileLockedMask | kHFSThreadExistsMask)) != 0 ) + return false; + + cNodeID = catalogRecord->hfsPlusFolder.folderID; + + if ( (cNodeID == 0) || (cNodeID < 16 && cNodeID > 2) ) + return false; + } + break; + + case kHFSFileRecord: + { + UInt16 i; + HFSExtentDescriptor *dataExtent; + HFSExtentDescriptor *rsrcExtent; + + if ( recordSize != sizeof(HFSCatalogFile) ) + return false; + if ( (catalogRecord->hfsFile.flags & ~(0x83)) != 0 ) + return false; + + cNodeID = catalogRecord->hfsFile.fileID; + + if ( cNodeID < 16 ) + return false; + + // make sure 0 ² LEOF ² PEOF for both forks + + if ( catalogRecord->hfsFile.dataLogicalSize < 0 ) + return false; + if ( catalogRecord->hfsFile.dataPhysicalSize < catalogRecord->hfsFile.dataLogicalSize ) + return false; + if ( catalogRecord->hfsFile.rsrcLogicalSize < 0 ) + return false; + if ( catalogRecord->hfsFile.rsrcPhysicalSize < catalogRecord->hfsFile.rsrcLogicalSize ) + return false; + + dataExtent = (HFSExtentDescriptor*) &catalogRecord->hfsFile.dataExtents; + rsrcExtent = (HFSExtentDescriptor*) &catalogRecord->hfsFile.rsrcExtents; + + for (i = 0; i < kHFSExtentDensity; ++i) + { + if ( (dataExtent[i].blockCount > 0) && (dataExtent[i].startBlock == 0) ) + return false; + if ( (rsrcExtent[i].blockCount > 0) && (rsrcExtent[i].startBlock == 0) ) + return false; + } + } + break; + + case kHFSPlusFileRecord: + { + UInt16 i; + HFSPlusExtentDescriptor *dataExtent; + HFSPlusExtentDescriptor *rsrcExtent; + + if ( recordSize != sizeof(HFSPlusCatalogFile) ) + return false; + + cNodeID = catalogRecord->hfsPlusFile.fileID; + + if ( cNodeID < 16 ) + return false; + + //¥¥ make sure 0 ² LEOF ² PEOF for both forks + + dataExtent = (HFSPlusExtentDescriptor*) &catalogRecord->hfsPlusFile.dataFork.extents; + rsrcExtent = (HFSPlusExtentDescriptor*) &catalogRecord->hfsPlusFile.resourceFork.extents; + + for (i = 0; i < kHFSPlusExtentDensity; ++i) + { + if ( (dataExtent[i].blockCount > 0) && (dataExtent[i].startBlock == 0) ) + return false; + if ( (rsrcExtent[i].blockCount > 0) && (rsrcExtent[i].startBlock == 0) ) + return false; + } + } + break; + + case kHFSFolderThreadRecord: + case kHFSFileThreadRecord: + { + if ( recordSize != sizeof(HFSCatalogThread) ) + return false; + + cNodeID = catalogRecord->hfsThread.parentID; + if ( (cNodeID == 0) || (cNodeID < 16 && cNodeID > 2) ) + return false; + + if ( (catalogRecord->hfsThread.nodeName[0] == 0) || + (catalogRecord->hfsThread.nodeName[0] > 31) ) + return false; + } + break; + + case kHFSPlusFolderThreadRecord: + case kHFSPlusFileThreadRecord: + { + if ( recordSize > sizeof(HFSPlusCatalogThread) || recordSize < (sizeof(HFSPlusCatalogThread) - sizeof(HFSUniStr255))) + return false; + + cNodeID = catalogRecord->hfsPlusThread.parentID; + if ( (cNodeID == 0) || (cNodeID < 16 && cNodeID > 2) ) + return false; + + if ( (catalogRecord->hfsPlusThread.nodeName.length == 0) || + (catalogRecord->hfsPlusThread.nodeName.length > 255) ) + return false; + } + break; + + default: + return false; + } + } + + return true; // record appears to be OK +} + diff --git a/fsck_hfs/dfalib/SCatalog.c b/fsck_hfs/dfalib/SCatalog.c new file mode 100644 index 0000000..80aabdd --- /dev/null +++ b/fsck_hfs/dfalib/SCatalog.c @@ -0,0 +1,268 @@ +/* + * Copyright (c) 1999-2000, 2002, 2007 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "Scavenger.h" + + +OSErr FlushCatalogFile( SVCB *vcb ) +{ + OSErr err; + + err = BTFlushPath(vcb->vcbCatalogFile); + if ( err == noErr ) + { + if( ( vcb->vcbCatalogFile->fcbFlags & fcbModifiedMask ) != 0 ) + { + (void) MarkVCBDirty( vcb ); + err = FlushVolumeControlBlock( vcb ); + } + } + + return( err ); +} + +OSErr LocateCatalogNode(SFCB *fcb, BTreeIterator *iterator, FSBufferDescriptor *btRecord, UInt16 *reclen) +{ + CatalogRecord * recp; + CatalogKey * keyp; + CatalogName * namep = NULL; + UInt32 threadpid = 0; + OSErr result; + Boolean isHFSPlus = false; + + result = BTSearchRecord(fcb, iterator, kInvalidMRUCacheKey, btRecord, reclen, iterator); + if (result == btNotFound) + result = cmNotFound; + ReturnIfError(result); + + recp = (CatalogRecord *)btRecord->bufferAddress; + keyp = (CatalogKey*)&iterator->key; + + /* if we got a thread record, then go look up real record */ + switch (recp->recordType) { + case kHFSFileThreadRecord: + case kHFSFolderThreadRecord: + threadpid = recp->hfsThread.parentID; + namep = (CatalogName *) &recp->hfsThread.nodeName; + isHFSPlus = false; + break; + + case kHFSPlusFileThreadRecord: + case kHFSPlusFolderThreadRecord: + threadpid = recp->hfsPlusThread.parentID; + namep = (CatalogName *) &recp->hfsPlusThread.nodeName; + isHFSPlus = true; + break; + + default: + threadpid = 0; + break; + } + + if (threadpid) { + (void) BTInvalidateHint(iterator); + BuildCatalogKey(threadpid, namep, isHFSPlus, keyp); + result = BTSearchRecord(fcb, iterator, kInvalidMRUCacheKey, btRecord, reclen, iterator); + } + + return result; +} + + +OSErr +UpdateFolderCount(SVCB *vcb, HFSCatalogNodeID pid, const CatalogName *name, SInt16 newType, + UInt32 hint, SInt16 valenceDelta) +{ + CatalogRecord tempData; // 520 bytes + HFSCatalogNodeID folderID; + UInt16 reclen; + OSErr result; + BTreeIterator btIterator; + FSBufferDescriptor btRecord; + + btRecord.bufferAddress = &tempData; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(tempData); + + ClearMemory(&btIterator, sizeof(btIterator)); + btIterator.hint.nodeNum = hint; + BuildCatalogKey(pid, name, vcb->vcbSignature == kHFSPlusSigWord, (CatalogKey*)&btIterator.key); + result = LocateCatalogNode(vcb->vcbCatalogFile, &btIterator, &btRecord, &reclen); + ReturnIfError(result); + + if (vcb->vcbSignature == kHFSPlusSigWord) { + UInt32 timeStamp; + + timeStamp = GetTimeUTC(); + tempData.hfsPlusFolder.valence += valenceDelta; // adjust valence + tempData.hfsPlusFolder.contentModDate = timeStamp; // set date/time last modified + folderID = tempData.hfsPlusFolder.folderID; + } else /* kHFSSigWord */ { + tempData.hfsFolder.valence += valenceDelta; // adjust valence + tempData.hfsFolder.modifyDate = GetTimeLocal(true); // set date/time last modified + folderID = tempData.hfsFolder.folderID; + } + + result = BTReplaceRecord(vcb->vcbCatalogFile, &btIterator, &btRecord, reclen); + ReturnIfError(result); + + if (folderID == kHFSRootFolderID) { + if (newType == kHFSFolderRecord || newType == kHFSPlusFolderRecord) + vcb->vcbNmRtDirs += valenceDelta; // adjust root folder count (undefined for HFS Plus) + else + vcb->vcbNmFls += valenceDelta; // adjust root file count (used by GetVolInfo) + } + + if (newType == kHFSFolderRecord || newType == kHFSPlusFolderRecord) + vcb->vcbFolderCount += valenceDelta; // adjust volume folder count, €€ worry about overflow? + else + vcb->vcbFileCount += valenceDelta; // adjust volume file count + + vcb->vcbModifyDate = GetTimeUTC(); // update last modified date + MarkVCBDirty( vcb ); + + return result; +} + + +/* Delete the catalog node with given name from given parent directory. + * The boolean value for_rename indicates that the caller is interested + * in deleting this record as part of rename operation and hence when set + * to true, the function does not return error if the directory record + * being deleted has non-zero valence and does not deallocate blocks for given + * file. + */ +OSErr +DeleteCatalogNode(SVCB *vcb, UInt32 pid, const CatalogName * name, UInt32 hint, Boolean for_rename) +{ + CatalogKey * keyp; + CatalogRecord rec; + BTreeIterator btIterator; + FSBufferDescriptor btRecord; + + HFSCatalogNodeID nodeID; + HFSCatalogNodeID nodeParentID; + UInt16 nodeType; + UInt16 reclen; + OSErr result; + Boolean isHFSPlus = (vcb->vcbSignature == kHFSPlusSigWord); + + btRecord.bufferAddress = &rec; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(rec); + + ClearMemory(&btIterator, sizeof(btIterator)); + btIterator.hint.nodeNum = hint; + keyp = (CatalogKey*)&btIterator.key; + BuildCatalogKey(pid, name, isHFSPlus, keyp); + + result = LocateCatalogNode(vcb->vcbCatalogFile, &btIterator, &btRecord, &reclen); + ReturnIfError(result); + + /* establish real parent cnid and cnode type */ + nodeParentID = isHFSPlus ? keyp->hfsPlus.parentID : keyp->hfs.parentID; + nodeType = rec.recordType; + nodeID = 0; + + switch (nodeType) { + case kHFSFolderRecord: + if ((for_rename == false) && (rec.hfsFolder.valence != 0)) + return cmNotEmpty; + + nodeID = rec.hfsFolder.folderID; + break; + + case kHFSPlusFolderRecord: + if ((for_rename == false) && (rec.hfsPlusFolder.valence != 0)) + return cmNotEmpty; + + nodeID = rec.hfsPlusFolder.folderID; + break; + + case kHFSFileRecord: + if (rec.hfsFile.flags & kHFSThreadExistsMask) + nodeID = rec.hfsFile.fileID; + break; + + case kHFSPlusFileRecord: + nodeID = rec.hfsPlusFile.fileID; + break; + + default: + return cmNotFound; + } + + if (nodeID == kHFSRootFolderID) + return cmRootCN; /* sorry, you can't delete the root! */ + + /* delete catalog records for CNode and thread */ + result = BTDeleteRecord(vcb->vcbCatalogFile, &btIterator); + ReturnIfError(result); + + (void) BTInvalidateHint(&btIterator); + + if ( nodeID ) { + BuildCatalogKey(nodeID, NULL, isHFSPlus, keyp); + (void) BTDeleteRecord(vcb->vcbCatalogFile, &btIterator); + } + + /* update directory and volume stats */ + + result = UpdateFolderCount(vcb, nodeParentID, NULL, nodeType, kNoHint, -1); + ReturnIfError(result); + + (void) FlushCatalogFile(vcb); + + if (((nodeType == kHFSPlusFileRecord) || (nodeType == kHFSFileRecord)) && + (for_rename == false)) + result = DeallocateFile(vcb, &rec); + + return result; +} + + +OSErr +GetCatalogNode(SVCB *vcb, UInt32 pid, const CatalogName * name, UInt32 hint, CatalogRecord *data) +{ + CatalogKey * keyp; + BTreeIterator btIterator; + FSBufferDescriptor btRecord; + + UInt16 reclen; + OSErr result; + Boolean isHFSPlus = (vcb->vcbSignature == kHFSPlusSigWord); + + btRecord.bufferAddress = data; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(CatalogRecord); + + ClearMemory(&btIterator, sizeof(btIterator)); + btIterator.hint.nodeNum = hint; + keyp = (CatalogKey*)&btIterator.key; + BuildCatalogKey(pid, name, isHFSPlus, keyp); + + result = LocateCatalogNode(vcb->vcbCatalogFile, &btIterator, &btRecord, &reclen); + + return result; +} + diff --git a/fsck_hfs/dfalib/SControl.c b/fsck_hfs/dfalib/SControl.c new file mode 100644 index 0000000..5c654b9 --- /dev/null +++ b/fsck_hfs/dfalib/SControl.c @@ -0,0 +1,1585 @@ +/* + * Copyright (c) 1999-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: SControl.c + + Contains: This file contains the routines which control the scavenging operations. + + Version: xxx put version here xxx + + Written by: Bill Bruffey + + Copyright: © 1985, 1986, 1992-1999 by Apple Computer, Inc., all rights reserved. +*/ + +#define SHOW_ELAPSED_TIMES 0 + + +#if SHOW_ELAPSED_TIMES +#include <sys/time.h> +#endif + +#include "Scavenger.h" +#include "fsck_journal.h" +#include <setjmp.h> +#include <unistd.h> + +#ifndef CONFIG_HFS_TRIM +#define CONFIG_HFS_TRIM 1 +#endif + +#define DisplayTimeRemaining 0 + +/* Variable containing diskdev_cmds tag number and date/time when the binary was built. + * This variable is populated automatically using version.pl by B&I when building the + * project. For development purposes, if the current directory name looks something + * like a tag name or a version number is provided to buildit, buildit populates it + * correctly. For all other ways to build the code, like 'make', the tag number will + * be left empty and only project name and build date/time will be shown. + * + * TODO: Get this building properly within Xcode, without need for the version.pl script! + */ +extern const unsigned char fsck_hfsVersionString[]; + +int gGUIControl; +extern char lflag; + + +// Static function prototypes + +static void printVerifyStatus( SGlobPtr GPtr ); +static Boolean IsBlueBoxSharedDrive ( DrvQElPtr dqPtr ); +static int ScavSetUp( SGlobPtr GPtr ); +static int ScavTerm( SGlobPtr GPtr ); + +/* this procedure receives progress calls and will allow canceling of procedures - we are using it here to print out the progress of the current operation for DFA and DiskUtility - ESP 1/10/00 */ + +int cancelProc(UInt16 progress, UInt16 secondsRemaining, Boolean progressChanged, UInt16 stage, void *context, int passno) +{ + if (progressChanged) { + int base; + int pct; + int scale; + static int lastPct = -1; + if (passno < 0) { + base = 0; + scale = 100; + } else { + base = (passno * 100) / kMaxReScan; // Multiply by 100 because we're doing ints + scale = 100 / kMaxReScan; + } + pct = ((progress * scale) / 100) + base; + if (pct != lastPct && pct != 100) { + fsckPrint((fsck_ctx_t)context, fsckProgress, pct); + lastPct = pct; + draw_progress(pct); + } + } + return 0; +} + +static const int kMaxMediumErrors = 25; + +/* + * Determine whether an error is major or minor. The main critera we chose for + * this is whether you can continue to use -- reading, creating, and deleting -- + * in a volume with the error present. This should at some point go into the + * message structure itself. + */ +static int +isMinorError(int msg, int *counts) +{ + switch (msg) { + case hfsExtBTCheck: + case hfsCatBTCheck: + case hfsCatHierCheck: + case hfsExtAttrBTCheck: + case hfsVolBitmapCheck: + case hfsVolInfoCheck: + case hfsHardLinkCheck: + case hfsRebuildExtentBTree: + case hfsRebuildCatalogBTree: + case hfsRebuildAttrBTree: + case hfsCaseSensitive: + case hfsMultiLinkDirCheck: + case hfsJournalVolCheck: + case hfsLiveVerifyCheck: + case hfsVerifyVolWithWrite: + case hfsCheckHFS: + case hfsCheckNoJnl: + case E_DirVal: + case E_CName: + case E_NoFile: + case E_NoRtThd: + case E_NoThd: + case E_NoDir: + case E_RtDirCnt: + case E_RtFilCnt: + case E_DirCnt: + case E_FilCnt: + case E_CatDepth: + case E_NoFThdFlg: + case E_CatalogFlagsNotZero: + case E_BadFileName: + case E_InvalidClumpSize: + case E_LockedDirName: + case E_FreeBlocks: + case E_LeafCnt: + case E_BadValue: + case E_InvalidID: + case E_DiskFull: + case E_InvalidLinkCount: + case E_UnlinkedFile: + case E_InvalidPermissions: + case E_InvalidUID_Unused: + case E_IllegalName: + case E_IncorrectNumThdRcd: + case E_SymlinkCreate: + case E_IncorrectAttrCount: + case E_IncorrectSecurityCount: + case E_PEOAttr: + case E_LEOAttr: + case E_FldCount: + case E_HsFldCount: + case E_BadPermPrivDir: + case E_DirInodeBadFlags: + case E_DirInodeBadParent: + case E_DirInodeBadName: + case E_DirHardLinkChain: + case E_DirHardLinkOwnerFlags: + case E_DirHardLinkFinderInfo: + case E_DirLinkAncestorFlags: + case E_DirHardLinkNesting: + case E_InvalidLinkChainPrev: + case E_InvalidLinkChainNext: + case E_FileInodeBadFlags: + case E_FileInodeBadName: + case E_FileHardLinkChain: + case E_FileHardLinkFinderInfo: + case E_InvalidLinkChainFirst: + case E_FileLinkBadFlags: + case E_DirLinkBadFlags: + case E_OrphanFileLink: + case E_OrphanDirLink: + case E_OrphanFileInode: + case E_OrphanDirInode: + case E_UnusedNodeNotZeroed: + case E_VBMDamagedOverAlloc: + case E_BadSymLink: + case E_BadSymLinkLength: + case E_BadSymLinkName: + return 1; + /* + * A lot of EOF errors may indicate that there were some more significant + * problems with the volume; just one by itself, with no other volume layout + * problems, won't affect the volume usage. So we keep track of them. + */ + case E_PEOF: + case E_LEOF: + if (++counts[abs(msg)] > kMaxMediumErrors) + return 0; + return 1; + default: + return 0; + } +} + +/*------------------------------------------------------------------------------ + +External + Routines: CheckHFS - Controls the scavenging process. + +------------------------------------------------------------------------------*/ + +static jmp_buf envBuf; +int +CheckHFS( const char *rdevnode, int fsReadRef, int fsWriteRef, int checkLevel, + int repairLevel, fsck_ctx_t fsckContext, int lostAndFoundMode, + int canWrite, int *modified, int liveMode, int rebuildOptions ) +{ + SGlob dataArea; // Allocate the scav globals + short temp; + FileIdentifierTable *fileIdentifierTable = nil; + OSErr err = noErr; + OSErr scavError = 0; + int scanCount = 0; + int isJournaled = 0; + Boolean autoRepair; + Boolean exitEarly = 0; + __block int *msgCounts = NULL; + Boolean majorErrors = 0; + + if (checkLevel == kMajorCheck) { + checkLevel = kForceCheck; + exitEarly = 1; + msgCounts = malloc(sizeof(int) * E_LastError); + } + + autoRepair = (fsWriteRef != -1 && repairLevel != kNeverRepair); + + /* Initialize the messages only once before the verify stage */ + if (fsckContext) { + extern fsck_message_t hfs_messages[]; + extern fsck_message_t hfs_errors[]; + + if (fsckAddMessages(fsckContext, hfs_messages) == -1 || + fsckAddMessages(fsckContext, hfs_errors) == -1) { + // XXX + return -1; + } + } + + /* + * Get the project name and version that is being built. + * + * The __fsck_hfsVersionString contents are of the form: + * "@(#)PROGRAM:fsck_hfs PROJECT:hfs-557~332\n" + */ + if (1) { + const char project[] = " PROJECT:"; + char *vstr, *tmp; + + tmp = strstr((const char *)fsck_hfsVersionString, project); + if (tmp) { + vstr = strdup(tmp + strlen(project)); + tmp = strstr(vstr, "\n"); + if (tmp) + *tmp = 0; + } else { + vstr = strdup((const char *)fsck_hfsVersionString); + } + + fsckPrint(fsckContext, fsckInformation, "fsck_hfs", vstr); + free(vstr); + } + + if (setjmp(envBuf) == 1) { + /* + * setjmp() returns the second argument to longjmp(), so if it returns 1, then + * we've hit a major error. + */ + dataArea.RepLevel = repairLevelVeryMinorErrors; + majorErrors = 1; + goto EarlyExitLabel; + } else { + if (exitEarly && fsckContext) { + /* + * Set the after-printing block to a small bit of code that checks to see if + * the message in question corresponds to a major or a minor error. If it's + * major, we longjmp just above, which causes us to exit out early. + */ + fsckSetBlock(fsckContext, fsckPhaseAfterMessage, (fsckBlock_t) ^(fsck_ctx_t c, int msgNum, va_list args) { + if (abs(msgNum) > E_FirstError && abs(msgNum) < E_LastError) { + if (isMinorError(abs(msgNum), msgCounts) == 1) + return fsckBlockContinue; + longjmp(envBuf, 1); + return fsckBlockAbort; + } else { + return fsckBlockContinue; + } + }); + } + } +DoAgain: + ClearMemory( &dataArea, sizeof(SGlob) ); + if (msgCounts) + memset(msgCounts, 0, sizeof(int) * E_LastError); + + // Initialize some scavenger globals + dataArea.itemsProcessed = 0; // Initialize to 0% complete + dataArea.itemsToProcess = 1; + dataArea.chkLevel = checkLevel; + dataArea.repairLevel = repairLevel; + dataArea.rebuildOptions = rebuildOptions; + dataArea.canWrite = canWrite; + dataArea.writeRef = fsWriteRef; + dataArea.lostAndFoundMode = lostAndFoundMode; + dataArea.DrvNum = fsReadRef; + dataArea.liveVerifyState = liveMode; + dataArea.scanCount = scanCount; + if (strlcpy(dataArea.deviceNode, rdevnode, sizeof(dataArea.deviceNode)) != strlen(rdevnode)) { + dataArea.deviceNode[0] = '\0'; + } + + /* there are cases where we cannot get the name of the volume so we */ + /* set our default name to one blank */ + dataArea.volumeName[ 0 ] = ' '; + dataArea.volumeName[ 1 ] = '\0'; + + if (fsckContext) { + dataArea.context = fsckContext; + dataArea.guiControl = true; + dataArea.userCancelProc = cancelProc; + } + // + // Initialize the scavenger + // + ScavCtrl( &dataArea, scavInitialize, &scavError ); + if ( checkLevel == kNeverCheck || (checkLevel == kDirtyCheck && dataArea.cleanUnmount) || + scavError == R_NoMem || scavError == R_BadSig) { + // also need to bail when allocate fails in ScavSetUp or we bus error! + goto termScav; + } + + isJournaled = CheckIfJournaled( &dataArea, false ); + if (isJournaled != 0 && + scanCount == 0 && + checkLevel != kForceCheck && + !(checkLevel == kPartialCheck && repairLevel == kForceRepairs)) { + if (fsckGetOutputStyle(dataArea.context) == fsckOutputTraditional) { + plog("fsck_hfs: Volume is journaled. No checking performed.\n"); + plog("fsck_hfs: Use the -f option to force checking.\n"); + } + scavError = 0; + goto termScav; + } + dataArea.calculatedVCB->vcbDriveNumber = fsReadRef; + dataArea.calculatedVCB->vcbDriverWriteRef = fsWriteRef; + + // Only show the progress bar if we're doing a real check. + if (fsckContext) { + start_progress(); + } + + // + // Now verify the volume + // + if ( scavError == noErr ) + ScavCtrl( &dataArea, scavVerify, &scavError ); + +EarlyExitLabel: + if (scavError == noErr && fsckGetVerbosity(dataArea.context) >= kDebugLog) + printVerifyStatus(&dataArea); + + // Looped for maximum times for verify and repair. This was the last verify and + // we bail out if problems were found + if (scanCount >= kMaxReScan && (dataArea.RepLevel != repairLevelNoProblemsFound)) { + fsckPrint(dataArea.context, fsckVolumeNotRepairedTries, dataArea.volumeName, scanCount); + scavError = R_RFail; + goto termScav; + } + + if ( dataArea.RepLevel == repairLevelUnrepairable ) + err = cdUnrepairableErr; + + if ( !autoRepair && + (dataArea.RepLevel == repairLevelVolumeRecoverable || + dataArea.RepLevel == repairLevelCatalogBtreeRebuild || + dataArea.RepLevel == repairLevelVeryMinorErrors) ) { + fsckPrint(dataArea.context, fsckVolumeCorruptNeedsRepair, dataArea.volumeName); + scavError = R_VFail; + goto termScav; + } + + if ( scavError == noErr && dataArea.RepLevel == repairLevelNoProblemsFound ) { + if (CONFIG_HFS_TRIM && + (dataArea.canWrite != 0) && (dataArea.writeRef != -1) && + IsTrimSupported()) + { + fsckPrint(dataArea.context, fsckTrimming); + TrimFreeBlocks(&dataArea); + } + + if (scanCount == 0) { + fsckPrint(dataArea.context, fsckVolumeOK, dataArea.volumeName); + } else { + fsckPrint(dataArea.context, fsckRepairSuccessful, dataArea.volumeName); + } + } + + // + // Repair the volume if it needs repairs, its repairable and we were able to unmount it + // + if ( dataArea.RepLevel == repairLevelNoProblemsFound && repairLevel == kForceRepairs ) + { + if (rebuildOptions & REBUILD_CATALOG) { + dataArea.CBTStat |= S_RebuildBTree; + } + if (rebuildOptions & REBUILD_EXTENTS) { + dataArea.EBTStat |= S_RebuildBTree; + } + if (rebuildOptions & REBUILD_ATTRIBUTE) { + dataArea.ABTStat |= S_RebuildBTree; + } + dataArea.RepLevel = repairLevelCatalogBtreeRebuild; + } + + if ( ((scavError == noErr) || (scavError == errRebuildBtree)) && + (autoRepair == true) && + (dataArea.RepLevel != repairLevelUnrepairable) && + (dataArea.RepLevel != repairLevelNoProblemsFound) ) + { + // we cannot repair a volume when others have write access to the block device + // for the volume + + if ( dataArea.canWrite == 0 ) { + scavError = R_WrErr; + fsckPrint(dataArea.context, fsckVolumeNotRepairedInUse, dataArea.volumeName); + } + else + ScavCtrl( &dataArea, scavRepair, &scavError ); + + if ( scavError == noErr ) + { + *modified = 1; /* Report back that we made repairs */ + + /* we just repaired a volume, so scan it again to check if it corrected everything properly */ + ScavCtrl( &dataArea, scavTerminate, &temp ); + repairLevel = kMajorRepairs; + checkLevel = kAlwaysCheck; + fsckPrint(dataArea.context, fsckRecheckingVolume); + scanCount++; + goto DoAgain; + } + else { + fsckPrint(dataArea.context, fsckVolumeNotRepaired, dataArea.volumeName); + } + } + else if ( scavError != noErr ) { + // Is this correct? + fsckPrint(dataArea.context, fsckVolumeVerifyIncomplete, dataArea.volumeName); + if ( fsckGetVerbosity(dataArea.context) >= kDebugLog ) + plog("\tvolume check failed with error %d \n", scavError); + } + + // Set up structures for post processing + if ( (autoRepair == true) && (dataArea.fileIdentifierTable != nil) ) + { + // *repairInfo = *repairInfo | kVolumeHadOverlappingExtents; // Report back that volume has overlapping extents + fileIdentifierTable = (FileIdentifierTable *) AllocateMemory( GetHandleSize( (Handle) dataArea.fileIdentifierTable ) ); + CopyMemory( *(dataArea.fileIdentifierTable), fileIdentifierTable, GetHandleSize( (Handle) dataArea.fileIdentifierTable ) ); + } + + + // + // Post processing + // + if ( fileIdentifierTable != nil ) + { + DisposeMemory( fileIdentifierTable ); + } + +termScav: + if (gBlkListEntries != 0) + dumpblocklist(&dataArea); + + if (err == noErr) { + err = scavError; + } + + // + // Terminate the scavenger + // + + if ( fsckGetVerbosity(dataArea.context) >= kDebugLog && + (err != noErr || dataArea.RepLevel != repairLevelNoProblemsFound) ) + PrintVolumeObject(); + + // If we have write access on volume and we are allowed to write, + // mark the volume clean/dirty + if ((fsWriteRef != -1) && (dataArea.canWrite != 0)) { + Boolean update; + if (scavError) { + // Mark volume dirty + CheckForClean(&dataArea, kMarkVolumeDirty, &update); + } else { + // Mark volume clean + CheckForClean(&dataArea, kMarkVolumeClean, &update); + } + if (update) { + /* Report back that volume was modified */ + *modified = 1; + } + } + ScavCtrl( &dataArea, scavTerminate, &temp ); // Note: use a temp var so that real scav error can be returned + + if (fsckContext) { + fsckPrint( fsckContext, fsckProgress, 100); // End each run with 100% message, if desired + draw_progress(100); + end_progress(); + } + if (exitEarly && majorErrors) + err = MAJOREXIT; + + if (msgCounts) { + free(msgCounts); + } + + return( err ); +} + + +/*------------------------------------------------------------------------------ + +Function: ScavCtrl - (Scavenger Control) + +Function: Controls the scavenging process. Interfaces with the User Interface + Layer (written in PASCAL). + +Input: ScavOp - scavenging operation to be performed: + + scavInitialize = start initial volume check + scavVerify = start verify + scavRepair = start repair + scavTerminate = finished scavenge + + GPtr - pointer to scavenger global area + + +Output: ScavRes - scavenge result code (R_xxx, or 0 if no error) + +------------------------------------------------------------------------------*/ + +void ScavCtrl( SGlobPtr GPtr, UInt32 ScavOp, short *ScavRes ) +{ + OSErr result; + unsigned int stat; +#if SHOW_ELAPSED_TIMES + struct timeval myStartTime; + struct timeval myEndTime; + struct timeval myElapsedTime; + struct timezone zone; +#endif + + // + // initialize some stuff + // + result = noErr; // assume good status + *ScavRes = 0; + GPtr->ScavRes = 0; + + // + // dispatch next scavenge operation + // + switch ( ScavOp ) + { + case scavInitialize: // INITIAL VOLUME CHECK + { + Boolean modified; + int clean; + + if ( ( result = ScavSetUp( GPtr ) ) ) // set up BEFORE CheckForStop + break; + if ( IsBlueBoxSharedDrive( GPtr->DrvPtr ) ) + break; + if ( ( result = CheckForStop( GPtr ) ) ) // in order to initialize wrCnt + break; + + /* Call for all chkLevel options and check return value only + * for kDirtyCheck for preen option and kNeverCheck for quick option + */ + clean = CheckForClean(GPtr, kCheckVolume, &modified); + if ((GPtr->chkLevel == kDirtyCheck) || (GPtr->chkLevel == kNeverCheck)) { + if (clean == 1) { + /* volume was unmounted cleanly */ + GPtr->cleanUnmount = true; + break; + } + + if (GPtr->chkLevel == kNeverCheck) { + if (clean == -1) + result = R_BadSig; + else if (clean == 0) { + /* + * We lie for journaled file systems since + * they get cleaned up in mount by replaying + * the journal. + * Note: CheckIfJournaled will return negative + * if it finds lastMountedVersion = FSK!. + */ + if (CheckIfJournaled(GPtr, false)) + GPtr->cleanUnmount = true; + else + result = R_Dirty; + } + break; + } + } + + if (CheckIfJournaled(GPtr, false) + && GPtr->chkLevel != kForceCheck + && !(GPtr->chkLevel == kPartialCheck && GPtr->repairLevel == kForceRepairs) + && !(GPtr->chkLevel == kAlwaysCheck && GPtr->repairLevel == kMajorRepairs)) { + break; + } + + if (GPtr->liveVerifyState) { + fsckPrint(GPtr->context, hfsLiveVerifyCheck); + } else if (GPtr->canWrite == 0 && nflag == 0) { + fsckPrint(GPtr->context, hfsVerifyVolWithWrite); + } + + /* + * In the first pass, if fsck_hfs is verifying a + * journaled volume, and it's not a live verification, + * check to see if the journal is empty. If it is not, + * flag it as a journal error, and print a message. + * (A live verify will almost certainly have a non-empty + * journal, but that should be safe in this case due + * to the freeze command flushing everything.) + */ + if ((GPtr->scanCount == 0) && + (CheckIfJournaled(GPtr, true) == 1) && + (GPtr->canWrite == 0 || GPtr->writeRef == -1) && + (lflag == 0)) { + fsckJournalInfo_t jnlInfo = { 0 }; + UInt64 numBlocks; + UInt32 blockSize; + jnlInfo.jnlfd = -1; + + if (IsJournalEmpty(GPtr, &jnlInfo) == 0) { + // disable_journal can currently only be set with debug enabled + if (disable_journal) { + fsckPrint(GPtr->context, E_DirtyJournal); + GPtr->JStat |= S_DirtyJournal; + } else { + (void)GetDeviceSize(GPtr->calculatedVCB->vcbDriveNumber, &numBlocks, &blockSize); +#if 0 + // For debugging the cache. WAY to verbose to run with even normal debug + if (debug) { + printf("Before journal replay\n"); + dumpCache(&fscache); + } +#endif + if (journal_open(jnlInfo.jnlfd, + jnlInfo.jnlOffset, + jnlInfo.jnlSize, + blockSize, + 0, + jnlInfo.name, + ^(off_t start, void *data, size_t len) { + Buf_t *buf; + int rv; + rv = CacheRead(&fscache, start, (int)len, &buf); + if (rv != 0) + abort(); + memcpy(buf->Buffer, data, len); + rv = CacheWrite(&fscache, buf, 0, kLockWrite); + if (rv != 0) + abort(); + return 0;} + ) == -1) { + fsckPrint(GPtr->context, E_DirtyJournal); + GPtr->JStat |= S_DirtyJournal; + } else if (debug) { + plog("Journal replay simulation succeeded\n"); +#if 0 + // Still way too verbose to run + dumpCache(&fscache); +#endif + } + } + } else { + if (debug) + plog("Journal is empty\n"); + } + if (jnlInfo.jnlfd != -1) + close(jnlInfo.jnlfd); + if (jnlInfo.name != NULL) + free(jnlInfo.name); + } + + result = IVChk( GPtr ); + + break; + } + + case scavVerify: // VERIFY + { + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myStartTime, &zone ); +#endif + + /* Initialize volume bitmap structure */ + if ( BitMapCheckBegin(GPtr) != 0) + break; + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myEndTime, &zone ); + timersub( &myEndTime, &myStartTime, &myElapsedTime ); + plog( "\n%s - BitMapCheck elapsed time \n", __FUNCTION__ ); + plog( "########## secs %d msecs %d \n\n", + myElapsedTime.tv_sec, myElapsedTime.tv_usec ); +#endif + + if ( IsBlueBoxSharedDrive( GPtr->DrvPtr ) ) + break; + if ( ( result = CheckForStop( GPtr ) ) ) + break; + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myStartTime, &zone ); +#endif + + /* Create calculated BTree structures */ + if ( ( result = CreateExtentsBTreeControlBlock( GPtr ) ) ) + break; + if ( ( result = CreateCatalogBTreeControlBlock( GPtr ) ) ) + break; + if ( ( result = CreateAttributesBTreeControlBlock( GPtr ) ) ) + break; + if ( ( result = CreateExtendedAllocationsFCB( GPtr ) ) ) + break; + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myEndTime, &zone ); + timersub( &myEndTime, &myStartTime, &myElapsedTime ); + plog( "\n%s - create control blocks elapsed time \n", __FUNCTION__ ); + plog( ">>>>>>>>>>>>> secs %d msecs %d \n\n", + myElapsedTime.tv_sec, myElapsedTime.tv_usec ); +#endif + + // Now that preflight of the BTree structures is calculated, compute the CheckDisk items + CalculateItemCount( GPtr, &GPtr->itemsToProcess, &GPtr->onePercent ); + GPtr->itemsProcessed += GPtr->onePercent; // We do this 4 times as set up in CalculateItemCount() to smooth the scroll + + if ( ( result = VLockedChk( GPtr ) ) ) + break; + + GPtr->itemsProcessed += GPtr->onePercent; // We do this 4 times as set up in CalculateItemCount() to smooth the scroll + fsckPrint(GPtr->context, hfsExtBTCheck); + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myStartTime, &zone ); +#endif + + /* Verify extent btree structure */ + if ((result = ExtBTChk(GPtr))) + break; + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myEndTime, &zone ); + timersub( &myEndTime, &myStartTime, &myElapsedTime ); + plog( "\n%s - ExtBTChk elapsed time \n", __FUNCTION__ ); + plog( ">>>>>>>>>>>>> secs %d msecs %d \n\n", + myElapsedTime.tv_sec, myElapsedTime.tv_usec ); +#endif + + if ((result = CheckForStop(GPtr))) + break; + + GPtr->itemsProcessed += GPtr->onePercent; // We do this 4 times as set up in CalculateItemCount() to smooth the scroll + + /* Check extents of bad block file */ + if ((result = BadBlockFileExtentCheck(GPtr))) + break; + if ((result = CheckForStop(GPtr))) + break; + + GPtr->itemsProcessed += GPtr->onePercent; // We do this 4 times as set up in CalculateItemCount() to smooth the scroll + GPtr->itemsProcessed += GPtr->onePercent; + fsckPrint(GPtr->context, hfsCatBTCheck); + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myStartTime, &zone ); +#endif + + if ( GPtr->chkLevel == kPartialCheck ) + { + /* skip the rest of the verify code path the first time */ + /* through when we are rebuilding the catalog B-Tree file. */ + /* we will be back here after the rebuild. */ + if (GPtr->rebuildOptions & REBUILD_CATALOG) { + GPtr->CBTStat |= S_RebuildBTree; + } + if (GPtr->rebuildOptions & REBUILD_EXTENTS) { + GPtr->EBTStat |= S_RebuildBTree; + } + if (GPtr->rebuildOptions & REBUILD_ATTRIBUTE) { + GPtr->ABTStat |= S_RebuildBTree; + } + result = errRebuildBtree; + break; + } + + /* Check catalog btree. For given fileID, the function accounts + * for all extents existing in catalog record as well as in + * overflow extent btree + */ + if ((result = CheckCatalogBTree(GPtr))) + break; + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myEndTime, &zone ); + timersub( &myEndTime, &myStartTime, &myElapsedTime ); + plog( "\n%s - CheckCatalogBTree elapsed time \n", __FUNCTION__ ); + plog( ">>>>>>>>>>>>> secs %d msecs %d \n\n", + myElapsedTime.tv_sec, myElapsedTime.tv_usec ); +#endif + + if ((result = CheckForStop(GPtr))) + break; + + if (scanflag == 0) { + fsckPrint(GPtr->context, hfsCatHierCheck); + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myStartTime, &zone ); +#endif + + /* Check catalog hierarchy */ + if ((result = CatHChk(GPtr))) + break; + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myEndTime, &zone ); + timersub( &myEndTime, &myStartTime, &myElapsedTime ); + plog( "\n%s - CatHChk elapsed time \n", __FUNCTION__ ); + plog( ">>>>>>>>>>>>> secs %d msecs %d \n\n", + myElapsedTime.tv_sec, myElapsedTime.tv_usec ); +#endif + + if ((result = CheckForStop(GPtr))) + break; + + if (VolumeObjectIsHFSX(GPtr)) { + result = CheckFolderCount(GPtr); + if (result) + break; + + if ((result=CheckForStop(GPtr))) + break; + } + } + /* Check attribute btree. The function accounts for all extents + * for extended attributes whose values are stored in + * allocation blocks + */ + if ((result = AttrBTChk(GPtr))) + break; + + if ((result = CheckForStop(GPtr))) + break; + + /* + * fsck_hfs has accounted for all valid allocation blocks by + * traversing all catalog records and attribute records. + * These traversals may have found overlapping extents. Note + * that the overlapping extents are detected in CaptureBitmapBits + * when it tries to set a bit corresponding to allocation block + * and finds that it is already set. Therefore fsck_hfs does not + * know the orignal file involved overlapped extents. + */ + if (GPtr->VIStat & S_OverlappingExtents) { + /* Find original files involved in overlapped extents */ + result = FindOrigOverlapFiles(GPtr); + if (result) { + break; + } + + /* Print all unique overlapping file IDs and paths */ + (void) PrintOverlapFiles(GPtr); + } + + if (scanflag == 0) { + /* Directory inodes store first link information in + * an extended attribute. Therefore start directory + * hard link check after extended attribute checks. + */ + result = dirhardlink_check(GPtr); + /* On error or unrepairable corruption, stop the verification */ + if ((result != 0) || (GPtr->CatStat & S_LinkErrNoRepair)) { + if (result == 0) { + result = -1; + } + + break; + } + } + + fsckPrint(GPtr->context, hfsVolBitmapCheck); + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myStartTime, &zone ); +#endif + + /* Compare in-memory volume bitmap with on-disk bitmap */ + if ((result = CheckVolumeBitMap(GPtr, false))) + break; + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myEndTime, &zone ); + timersub( &myEndTime, &myStartTime, &myElapsedTime ); + plog( "\n%s - CheckVolumeBitMap elapsed time \n", __FUNCTION__ ); + plog( ">>>>>>>>>>>>> secs %d msecs %d \n\n", + myElapsedTime.tv_sec, myElapsedTime.tv_usec ); +#endif + + if ((result = CheckForStop(GPtr))) + break; + + fsckPrint(GPtr->context, hfsVolInfoCheck); + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myStartTime, &zone ); +#endif + + /* Verify volume level information */ + if ((result = VInfoChk(GPtr))) + break; + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myEndTime, &zone ); + timersub( &myEndTime, &myStartTime, &myElapsedTime ); + plog( "\n%s - VInfoChk elapsed time \n", __FUNCTION__ ); + plog( ">>>>>>>>>>>>> secs %d msecs %d \n\n", + myElapsedTime.tv_sec, myElapsedTime.tv_usec ); +#endif + + stat = GPtr->VIStat | GPtr->ABTStat | GPtr->EBTStat | GPtr->CBTStat | + GPtr->CatStat | GPtr->JStat; + + if ( stat != 0 ) + { + if ( (GPtr->RepLevel == repairLevelNoProblemsFound) || (GPtr->RepLevel == repairLevelVolumeRecoverable) ) + { + // 2200106, We isolate very minor errors so that if the volume cannot be unmounted + // CheckDisk will just return noErr + unsigned int minorErrors = (GPtr->CatStat & ~S_LockedDirName) | + GPtr->VIStat | GPtr->ABTStat | GPtr->EBTStat | GPtr->CBTStat | GPtr->JStat; + if ( minorErrors == 0 ) + GPtr->RepLevel = repairLevelVeryMinorErrors; + else + GPtr->RepLevel = repairLevelVolumeRecoverable; + } + } + else if ( GPtr->RepLevel == repairLevelNoProblemsFound ) + { + } + + GPtr->itemsProcessed = GPtr->itemsToProcess; + result = CheckForStop(GPtr); // one last check for modified volume + break; + } + + case scavRepair: // REPAIR + { + if ( IsBlueBoxSharedDrive( GPtr->DrvPtr ) ) + break; + if ( ( result = CheckForStop(GPtr) ) ) + break; + if ( GPtr->CBTStat & S_RebuildBTree + || GPtr->EBTStat & S_RebuildBTree + || GPtr->ABTStat & S_RebuildBTree) { +// fsckPrint(GPtr->context, hfsRebuildCatalogBTree); +// fsckPrint(GPtr->context, hfsRebuildAttrBTree); +// actually print nothing yet -- we print out when we are rebuilding the trees + } else { + fsckPrint(GPtr->context, fsckRepairingVolume); + if (embedded == 1 && debug == 0) + fsckPrint(GPtr->context, fsckLimitedRepairs); + } + result = RepairVolume( GPtr ); + break; + } + + case scavTerminate: // CLEANUP AFTER SCAVENGE + { + result = ScavTerm(GPtr); + break; + } + } // end ScavOp switch + + + // + // Map internal error codes to scavenger result codes + // + if ( (result < 0) || (result > Max_RCode) ) + { + switch ( ScavOp ) + { + case scavInitialize: + case scavVerify: + if ( result == ioErr ) + result = R_RdErr; + else if ( result == errRebuildBtree ) + { + GPtr->RepLevel = repairLevelCatalogBtreeRebuild; + break; + } + else + result = R_VFail; + GPtr->RepLevel = repairLevelUnrepairable; + break; + case scavRepair: + result = R_RFail; + break; + default: + result = R_IntErr; + } + } + + GPtr->ScavRes = result; + + *ScavRes = result; + +} // end of ScavCtrl + + + +/*------------------------------------------------------------------------------ + +Function: CheckForStop + +Function: Checks for the user hitting the "STOP" button during a scavenge, + which interrupts the operation. Additionally, we monitor the write + count of a mounted volume, to be sure that the volume is not + modified by another app while we scavenge. + +Input: GPtr - pointer to scavenger global area + +Output: Function result: + 0 - ok to continue + R_UInt - STOP button hit + R_Modified - another app has touched the volume +-------------------------------------------------------------------------------*/ + +short CheckForStop( SGlob *GPtr ) +{ + OSErr err = noErr; // Initialize err to noErr + long ticks = TickCount(); + UInt16 dfaStage = (UInt16) GetDFAStage(); + + //plog("%d, %d", dfaStage, kAboutToRepairStage); + + //if ( ((ticks - 10) > GPtr->lastTickCount) || (dfaStage == kAboutToRepairStage) ) // To reduce cursor flicker on fast machines, call through on a timed interval + //{ + if ( GPtr->userCancelProc != nil ) + { + UInt64 progress = 0; + Boolean progressChanged; + // UInt16 elapsedTicks; + + if ( dfaStage != kRepairStage ) + { + progress = GPtr->itemsProcessed * 100; + progress /= GPtr->itemsToProcess; + progressChanged = ( progress != GPtr->lastProgress ); + GPtr->lastProgress = progress; + + #if( DisplayTimeRemaining ) + if ( (progressChanged) && (progress > 5) ) + { + elapsedTicks = TickCount() - GPtr->startTicks; + GPtr->secondsRemaining = ( ( ( 100 * elapsedTicks ) / progress ) - elapsedTicks ) / 60; + } + #endif + err = CallUserCancelProc( GPtr->userCancelProc, (UInt16)progress, (UInt16)GPtr->secondsRemaining, progressChanged, dfaStage, GPtr->context, GPtr->scanCount ); + } + else + { + (void) CallUserCancelProc( GPtr->userCancelProc, (UInt16)progress, 0, false, dfaStage, GPtr->context, GPtr->scanCount ); + } + + } + + if ( err != noErr ) + err = R_UInt; + #if 0 + if ( GPtr->realVCB ) // If the volume is mounted + if ( GPtr->realVCB->vcbWrCnt != GPtr->wrCnt ) + err = R_Modified; // Its been modified behind our back + #endif + GPtr->lastTickCount = ticks; + //} + + return ( err ); +} + + + +/*------------------------------------------------------------------------------ + +Function: ScavSetUp - (Scavenger Set Up) + +Function: Sets up scavenger globals for a new scavenge operation. Memory is + allocated for the Scavenger's static data structures (VCB, FCBs, + BTCBs, and TPTs). The contents of the data structures are + initialized to zero. + +Input: GPtr - pointer to scavenger global area + +Output: ScavSetUp - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ + +struct ScavStaticStructures { + SVCB vcb; + SFCB fcbList[6]; + BTreeControlBlock btcb[4]; // 4 btcb's + SBTPT btreePath; // scavenger BTree path table +}; +typedef struct ScavStaticStructures ScavStaticStructures; + + +static int ScavSetUp( SGlob *GPtr) +{ + OSErr err; + SVCB * vcb; +#if !BSD + DrvQEl *drvP; + short ioRefNum; +#endif + + GPtr->MinorRepairsP = nil; + + GPtr->itemsProcessed = 0; + GPtr->lastProgress = 0; + GPtr->startTicks = TickCount(); + + // + // allocate the static data structures (VCB, FCB's, BTCB'S, DPT and BTPT) + // + { + ScavStaticStructures *pointer; + + pointer = (ScavStaticStructures *) AllocateClearMemory( sizeof(ScavStaticStructures) ); + if ( pointer == nil ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\t error %d - could not allocate %ld bytes of memory \n", + R_NoMem, sizeof(ScavStaticStructures) ); + } + return( R_NoMem ); + } + GPtr->scavStaticPtr = pointer; + + GPtr->DirPTPtr = AllocateClearMemory(sizeof(SDPR) * CMMaxDepth); + if ( GPtr->DirPTPtr == nil ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\t error %d - could not allocate %ld bytes of memory \n", + R_NoMem, sizeof(SDPR) * CMMaxDepth ); + } + return( R_NoMem ); + } + GPtr->dirPathCount = CMMaxDepth; + + GPtr->calculatedVCB = vcb = &pointer->vcb; + vcb->vcbGPtr = GPtr; + + GPtr->FCBAPtr = (Ptr) &pointer->fcbList; + GPtr->calculatedExtentsFCB = &pointer->fcbList[0]; + GPtr->calculatedCatalogFCB = &pointer->fcbList[1]; + GPtr->calculatedAllocationsFCB = &pointer->fcbList[2]; + GPtr->calculatedAttributesFCB = &pointer->fcbList[3]; + GPtr->calculatedStartupFCB = &pointer->fcbList[4]; + GPtr->calculatedRepairFCB = &pointer->fcbList[5]; + + GPtr->calculatedExtentsBTCB = &pointer->btcb[0]; + GPtr->calculatedCatalogBTCB = &pointer->btcb[1]; + GPtr->calculatedRepairBTCB = &pointer->btcb[2]; + GPtr->calculatedAttributesBTCB = &pointer->btcb[3]; + + GPtr->BTPTPtr = (SBTPT*) &pointer->btreePath; + } + + + SetDFAStage( kVerifyStage ); + SetFCBSPtr( GPtr->FCBAPtr ); + + // + // locate the driveQ element for drive being scavenged + // + GPtr->DrvPtr = 0; // <8> initialize so we can know if drive disappears + + // + // Set up Real structures + // +#if !BSD + err = FindDrive( &ioRefNum, &(GPtr->DrvPtr), GPtr->DrvNum ); +#endif + if ( IsBlueBoxSharedDrive( GPtr->DrvPtr ) ) + return noErr; + + err = GetVolumeFeatures( GPtr ); // Sets up GPtr->volumeFeatures and GPtr->realVCB + +#if !BSD + if ( GPtr->DrvPtr == NULL ) // <8> drive is no longer there! + return ( R_NoVol ); + else + drvP = GPtr->DrvPtr; + + // Save current value of vcbWrCnt, to detect modifications to volume by other apps etc + if ( GPtr->volumeFeatures & volumeIsMountedMask ) + { + FlushVol( nil, GPtr->realVCB->vcbVRefNum ); // Ask HFS to update all changes to disk + GPtr->wrCnt = GPtr->realVCB->vcbWrCnt; // Remember write count after writing changes + } +#endif + + // Finish initializing the VCB + + // The calculated structures +#if BSD + InitBlockCache(vcb); + vcb->vcbDriveNumber = GPtr->DrvNum; + vcb->vcbDriverReadRef = GPtr->DrvNum; + vcb->vcbDriverWriteRef = -1; /* XXX need to get real fd here */ +#else + vcb->vcbDriveNumber = drvP->dQDrive; + vcb->vcbDriverReadRef = drvP->dQRefNum; + vcb->vcbDriverWriteRef = drvP->dQRefNum; + vcb->vcbFSID = drvP->dQFSID; +#endif +// vcb->vcbVRefNum = Vol_RefN; + + // + // finish initializing the FCB's + // + { + SFCB *fcb; + + // Create Calculated Extents FCB + fcb = GPtr->calculatedExtentsFCB; + fcb->fcbFileID = kHFSExtentsFileID; + fcb->fcbVolume = vcb; + fcb->fcbBtree = GPtr->calculatedExtentsBTCB; + vcb->vcbExtentsFile = fcb; + + // Create Calculated Catalog FCB + fcb = GPtr->calculatedCatalogFCB; + fcb->fcbFileID = kHFSCatalogFileID; + fcb->fcbVolume = vcb; + fcb->fcbBtree = GPtr->calculatedCatalogBTCB; + vcb->vcbCatalogFile = fcb; + + // Create Calculated Allocations FCB + fcb = GPtr->calculatedAllocationsFCB; + fcb->fcbFileID = kHFSAllocationFileID; + fcb->fcbVolume = vcb; + fcb->fcbBtree = NULL; // no BitMap B-Tree + vcb->vcbAllocationFile = fcb; + + // Create Calculated Attributes FCB + fcb = GPtr->calculatedAttributesFCB; + fcb->fcbFileID = kHFSAttributesFileID; + fcb->fcbVolume = vcb; + fcb->fcbBtree = GPtr->calculatedAttributesBTCB; + vcb->vcbAttributesFile = fcb; + + /* Create Calculated Startup FCB */ + fcb = GPtr->calculatedStartupFCB; + fcb->fcbFileID = kHFSStartupFileID; + fcb->fcbVolume = vcb; + fcb->fcbBtree = NULL; + vcb->vcbStartupFile = fcb; + } + + // finish initializing the BTCB's + { + BTreeControlBlock *btcb; + + btcb = GPtr->calculatedExtentsBTCB; // calculatedExtentsBTCB + btcb->fcbPtr = GPtr->calculatedExtentsFCB; + btcb->getBlockProc = GetFileBlock; + btcb->releaseBlockProc = ReleaseFileBlock; + btcb->setEndOfForkProc = SetEndOfForkProc; + + btcb = GPtr->calculatedCatalogBTCB; // calculatedCatalogBTCB + btcb->fcbPtr = GPtr->calculatedCatalogFCB; + btcb->getBlockProc = GetFileBlock; + btcb->releaseBlockProc = ReleaseFileBlock; + btcb->setEndOfForkProc = SetEndOfForkProc; + + btcb = GPtr->calculatedAttributesBTCB; // calculatedAttributesBTCB + btcb->fcbPtr = GPtr->calculatedAttributesFCB; + btcb->getBlockProc = GetFileBlock; + btcb->releaseBlockProc = ReleaseFileBlock; + btcb->setEndOfForkProc = SetEndOfForkProc; + } + + + // + // Initialize some global stuff + // + + GPtr->RepLevel = repairLevelNoProblemsFound; + GPtr->ErrCode = 0; + GPtr->IntErr = noErr; + GPtr->VIStat = 0; + GPtr->ABTStat = 0; + GPtr->EBTStat = 0; + GPtr->CBTStat = 0; + GPtr->CatStat = 0; + GPtr->VeryMinorErrorsStat = 0; + GPtr->JStat = 0; + + /* Assume that the volume is dirty unmounted */ + GPtr->cleanUnmount = false; + + // + // Initialize VolumeObject + // + + InitializeVolumeObject( GPtr ); + + /* Check if the volume type of initialized object is valid. If not, return error */ + if (VolumeObjectIsValid() == false) { + return (R_BadSig); + } + + // Keep a valid file id list for HFS volumes + GPtr->validFilesList = (UInt32**)NewHandle( 0 ); + if ( GPtr->validFilesList == nil ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\t error %d - could not allocate file ID list \n", R_NoMem ); + } + return( R_NoMem ); + } + + // Convert the security attribute name from utf8 to utf16. This will + // avoid repeated conversion of all extended attributes to compare with + // security attribute name + (void) utf_decodestr((unsigned char *)KAUTH_FILESEC_XATTR, strlen(KAUTH_FILESEC_XATTR), GPtr->securityAttrName, &GPtr->securityAttrLen, sizeof(GPtr->securityAttrName)); + + return( noErr ); + +} /* end of ScavSetUp */ + + + + +/*------------------------------------------------------------------------------ + +Function: ScavTerm - (Scavenge Termination)) + +Function: Terminates the current scavenging operation. Memory for the + VCB, FCBs, BTCBs, volume bit map, and BTree bit maps is + released. + +Input: GPtr - pointer to scavenger global area + +Output: ScavTerm - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ + +static int ScavTerm( SGlobPtr GPtr ) +{ + SFCB *fcbP; + BTreeControlBlock *btcbP; + RepairOrderPtr rP; + OSErr err; + ExtentsTable **extentsTableH; + ExtentInfo *curExtentInfo; + int i; + + (void) BitMapCheckEnd(); + + while( (rP = GPtr->MinorRepairsP) != nil ) // loop freeing leftover (undone) repair orders + { + GPtr->MinorRepairsP = rP->link; // (in case repairs were not made) + DisposeMemory(rP); + err = MemError(); + } + + if( GPtr->validFilesList != nil ) + DisposeHandle( (Handle) GPtr->validFilesList ); + + if( GPtr->overlappedExtents != nil ) { + extentsTableH = GPtr->overlappedExtents; + + /* Overlapped extents list also allocated memory for attribute name */ + for (i=0; i<(**extentsTableH).count; i++) { + curExtentInfo = &((**extentsTableH).extentInfo[i]); + + /* Deallocate memory for attribute name, if any */ + if (curExtentInfo->attrname) { + free(curExtentInfo->attrname); + } + } + + DisposeHandle( (Handle) GPtr->overlappedExtents ); + } + + if( GPtr->fileIdentifierTable != nil ) + DisposeHandle( (Handle) GPtr->fileIdentifierTable ); + + if( GPtr->calculatedVCB == nil ) // already freed? + return( noErr ); + + // If the FCB's and BTCB's have been set up, dispose of them + fcbP = GPtr->calculatedExtentsFCB; // release extent file BTree bit map + if ( fcbP != nil ) + { + btcbP = (BTreeControlBlock*)fcbP->fcbBtree; + if ( btcbP != nil) + { + if( btcbP->refCon != nil ) + { + if(((BTreeExtensionsRec*)btcbP->refCon)->BTCBMPtr != nil) + { + DisposeMemory(((BTreeExtensionsRec*)btcbP->refCon)->BTCBMPtr); + err = MemError(); + } + DisposeMemory( (Ptr)btcbP->refCon ); + err = MemError(); + btcbP->refCon = nil; + } + + fcbP = GPtr->calculatedCatalogFCB; // release catalog BTree bit map + btcbP = (BTreeControlBlock*)fcbP->fcbBtree; + + if( btcbP->refCon != nil ) + { + if(((BTreeExtensionsRec*)btcbP->refCon)->BTCBMPtr != nil) + { + DisposeMemory(((BTreeExtensionsRec*)btcbP->refCon)->BTCBMPtr); + err = MemError(); + } + DisposeMemory( (Ptr)btcbP->refCon ); + err = MemError(); + btcbP->refCon = nil; + } + } + } + + DisposeMemory(GPtr->DirPTPtr); + DisposeMemory((ScavStaticStructures *)GPtr->scavStaticPtr); + GPtr->scavStaticPtr = nil; + GPtr->calculatedVCB = nil; + + return( noErr ); +} + +#define BLUE_BOX_SHARED_DRVR_NAME "\p.BlueBoxShared" +#define BLUE_BOX_FLOPPY_WHERE_STRING "\pdisk%d (Shared)" +#define SONY_DRVR_NAME "\p.Sony" + +/*------------------------------------------------------------------------------ + +Routine: IsBlueBoxSharedDrive + +Function: Given a DQE address, return a boolean that determines whether + or not a drive is a Blue Box disk being accessed via Shared mode. + Such drives do not support i/o and cannot be scavenged. + +Input: Arg 1 - DQE pointer + +Output: D0.L - 0 if drive not to be used + 1 otherwise +------------------------------------------------------------------------------*/ + +struct IconAndStringRec { + char icon[ 256 ]; + Str255 string; +}; +typedef struct IconAndStringRec IconAndStringRec, * IconAndStringRecPtr; + + +Boolean IsBlueBoxSharedDrive ( DrvQElPtr dqPtr ) +{ +#if 0 + Str255 blueBoxSharedDriverName = BLUE_BOX_SHARED_DRVR_NAME; + Str255 blueBoxFloppyWhereString = BLUE_BOX_FLOPPY_WHERE_STRING; + Str255 sonyDriverName = SONY_DRVR_NAME; + DCtlHandle driverDCtlHandle; + DCtlPtr driverDCtlPtr; + DRVRHeaderPtr drvrHeaderPtr; + StringPtr driverName; + + if ( dqPtr == NULL ) + return false; + + // Now look at the name of the Driver name. If it is .BlueBoxShared keep it out of the list of available disks. + driverDCtlHandle = GetDCtlEntry(dqPtr->dQRefNum); + driverDCtlPtr = *driverDCtlHandle; + if((((driverDCtlPtr->dCtlFlags) & Is_Native_Mask) == 0) && (driverDCtlPtr->dCtlDriver != nil)) + { + if (((driverDCtlPtr->dCtlFlags) & Is_Ram_Based_Mask) == 0) + { + drvrHeaderPtr = (DRVRHeaderPtr)driverDCtlPtr->dCtlDriver; + } + else + { + //¥¥¥ bek - lock w/o unlock/restore? should be getstate/setstate? + HLock((Handle)(driverDCtlPtr)->dCtlDriver); + drvrHeaderPtr = (DRVRHeaderPtr)*((Handle)(driverDCtlPtr->dCtlDriver)); + + } + driverName = (StringPtr)&(drvrHeaderPtr->drvrName); + if (!(IdenticalString(driverName,blueBoxSharedDriverName,nil))) + { + return( true ); + } + + // Special case for the ".Sony" floppy driver which might be accessed in Shared mode inside the Blue Box + // Test its "where" string instead of the driver name. + if (!(IdenticalString(driverName,sonyDriverName,nil))) + { + CntrlParam paramBlock; + + paramBlock.ioCompletion = nil; + paramBlock.ioNamePtr = nil; + paramBlock.ioVRefNum = dqPtr->dQDrive; + paramBlock.ioCRefNum = dqPtr->dQRefNum; + paramBlock.csCode = kDriveIcon; // return physical icon + + // If PBControl(kDriveIcon) returns an error then the driver is not the Blue Box driver. + if ( noErr == PBControlSync( (ParmBlkPtr) ¶mBlock ) ) + { + IconAndStringRecPtr iconAndStringRecPtr; + StringPtr whereStringPtr; + + iconAndStringRecPtr = * (IconAndStringRecPtr*) & paramBlock.csParam; + whereStringPtr = (StringPtr) & iconAndStringRecPtr->string; + if (!(IdenticalString(whereStringPtr,blueBoxFloppyWhereString,nil))) + { + return( true ); + } + } + } + } +#endif + + return false; +} + + + + +/*------------------------------------------------------------------------------ + +Function: printVerifyStatus - (Print Verify Status) + +Function: Prints out the Verify Status words. + +Input: GPtr - pointer to scavenger global area + +Output: None. +------------------------------------------------------------------------------*/ +static +void printVerifyStatus(SGlobPtr GPtr) +{ + UInt32 stat; + + stat = GPtr->VIStat | GPtr->ABTStat | GPtr->EBTStat | GPtr->CBTStat | GPtr->CatStat; + + if ( stat != 0 ) { + plog(" Verify Status: VIStat = 0x%04x, ABTStat = 0x%04x EBTStat = 0x%04x\n", + GPtr->VIStat, GPtr->ABTStat, GPtr->EBTStat); + plog(" CBTStat = 0x%04x CatStat = 0x%08x\n", + GPtr->CBTStat, GPtr->CatStat); + } +} diff --git a/fsck_hfs/dfalib/SDevice.c b/fsck_hfs/dfalib/SDevice.c new file mode 100644 index 0000000..d1c7d1f --- /dev/null +++ b/fsck_hfs/dfalib/SDevice.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 1999-2000, 2002, 2005, 2007-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include "SRuntime.h" +#include "../fsck_hfs.h" + +#if BSD + +#include <unistd.h> +#include <errno.h> +#include <sys/ioctl.h> + +#include <IOKit/storage/IOMediaBSDClient.h> + +#else + +#include <Files.h> +#include <Device.h> +#include <Disks.h> + +#endif + + +OSErr GetDeviceSize(int driveRefNum, UInt64 *numBlocks, UInt32 *blockSize) +{ +#if BSD + UInt64 devBlockCount = 0; + int devBlockSize = 0; + + if (ioctl(driveRefNum, DKIOCGETBLOCKCOUNT, &devBlockCount) < 0) { + plog("ioctl(DKIOCGETBLOCKCOUNT) for fd %d: %s\n", driveRefNum, strerror(errno)); + return (-1); + } + + if (ioctl(driveRefNum, DKIOCGETBLOCKSIZE, &devBlockSize) < 0) { + plog("ioctl(DKIOCGETBLOCKSIZE) for fd %d: %s\n", driveRefNum, strerror(errno)); + return (-1); + } + + if (devBlockSize != 512) { + *numBlocks = (devBlockCount * (UInt64)devBlockSize) / 512; + *blockSize = 512; + } else { + *numBlocks = devBlockCount; + *blockSize = devBlockSize; + } + return (0); +#else + /* Various Mac OS device constants */ + enum + { + /* return format list status code */ + kFmtLstCode = 6, + + /* reference number of .SONY driver */ + kSonyRefNum = 0xfffb, + + /* values returned by DriveStatus in DrvSts.twoSideFmt */ + kSingleSided = 0, + kDoubleSided = -1, + kSingleSidedSize = 800, /* 400K */ + kDoubleSidedSize = 1600, /* 800K */ + + /* values in DrvQEl.qType */ + kWordDrvSiz = 0, + kLongDrvSiz = 1, + + /* more than enough formatListRecords */ + kMaxFormatListRecs = 16 + }; + + ParamBlockRec pb; + FormatListRec formatListRecords[kMaxFormatListRecs]; + DrvSts status; + short formatListRecIndex; + OSErr result; + unsigned long blocks = 0; + + + /* Attempt to get the drive's format list. */ + /* (see the Technical Note "What Your Sony Drives For You") */ + + pb.cntrlParam.ioVRefNum = driveQElementPtr->dQDrive; + pb.cntrlParam.ioCRefNum = driveQElementPtr->dQRefNum; + pb.cntrlParam.csCode = kFmtLstCode; + pb.cntrlParam.csParam[0] = kMaxFormatListRecs; + *(long *)&pb.cntrlParam.csParam[1] = (long)&formatListRecords[0]; + + result = PBStatusSync(&pb); + + if ( result == noErr ) + { + /* The drive supports ReturnFormatList status call. */ + + /* Get the current disk's size. */ + for( formatListRecIndex = 0; + formatListRecIndex < pb.cntrlParam.csParam[0]; + ++formatListRecIndex ) + { + if ( (formatListRecords[formatListRecIndex].formatFlags & + diCIFmtFlagsCurrentMask) != 0 ) + { + blocks = formatListRecords[formatListRecIndex].volSize; + } + } + if ( blocks == 0 ) + { + /* This should never happen */ + result = paramErr; + } + } + else if ( driveQElementPtr->dQRefNum == (short)kSonyRefNum ) + { + /* The drive is a non-SuperDrive floppy which only supports 400K and 800K disks */ + + result = DriveStatus(driveQElementPtr->dQDrive, &status); + if ( result == noErr ) + { + switch ( status.twoSideFmt ) + { + case kSingleSided: + blocks = kSingleSidedSize; + break; + + case kDoubleSided: + blocks = kDoubleSidedSize; + break; + + default: // This should never happen + result = paramErr; + break; + } + } + } + else + { + /* The drive is not a floppy and it doesn't support ReturnFormatList */ + /* so use the dQDrvSz field(s) */ + + result = noErr; /* reset result */ + + switch ( driveQElementPtr->qType ) + { + case kWordDrvSiz: + blocks = driveQElementPtr->dQDrvSz; + break; + + case kLongDrvSiz: + blocks = ((unsigned long)driveQElementPtr->dQDrvSz2 << 16) + + driveQElementPtr->dQDrvSz; + break; + + default: // This should never happen + result = paramErr; + break; + } + } + + *numBlocks = blocks; + *blockSize = 512; + + return( result ); +#endif +} + + +OSErr DeviceRead(int device, int drive, void* buffer, SInt64 offset, UInt32 reqBytes, UInt32 *actBytes) +{ +#if BSD + off_t seek_off; + ssize_t nbytes; + + *actBytes = 0; + + seek_off = lseek(device, offset, SEEK_SET); + if (seek_off == -1) { + plog("# DeviceRead: lseek(%qd) failed with %d\n", offset, errno); + return (errno); + } + + nbytes = read(device, buffer, reqBytes); + if (nbytes == -1) + return (errno); + if (nbytes == 0) { + plog("CANNOT READ: BLK %ld\n", (long)offset/512); + return (5); + } + + *actBytes = nbytes; + return (0); + +#else + OSErr err; + XIOParam pb; + + pb.ioVRefNum = drive; + pb.ioRefNum = device; + pb.ioPosMode = fsFromStart; + pb.ioReqCount = reqBytes; + pb.ioBuffer = buffer; + + if ( (offset & 0xFFFFFFFF00000000) != 0 ) + { + *(SInt64*)&pb.ioWPosOffset = offset; + pb.ioPosMode |= (1 << kWidePosOffsetBit); + } + else + { + ((IOParam*)&pb)->ioPosOffset = offset; + } + + err = PBReadSync( (ParamBlockRec *)&pb ); + + return (err); +#endif +} + + +OSErr DeviceWrite(int device, int drive, void* buffer, SInt64 offset, UInt32 reqBytes, UInt32 *actBytes) +{ +#if BSD + off_t seek_off; + ssize_t nbytes; + + *actBytes = 0; + + seek_off = lseek(device, offset, SEEK_SET); + if (seek_off == -1) { + plog("# DeviceRead: lseek(%qd) failed with %d\n", offset, errno); + return (errno); + } + + nbytes = write(device, buffer, reqBytes); + if (nbytes == -1) { + return (errno); + } + if (nbytes == 0) { + plog("CANNOT WRITE: BLK %ld\n", (long)offset/512); + return (5); + } + + *actBytes = nbytes; + return (0); +#else + OSErr err; + XIOParam pb; + + pb.ioVRefNum = drive; + pb.ioRefNum = device; + pb.ioPosMode = fsFromStart; + pb.ioReqCount = reqBytes; + pb.ioBuffer = buffer; + + if ( (offset & 0xFFFFFFFF00000000) != 0 ) + { + *(SInt64*)&pb.ioWPosOffset = offset; + pb.ioPosMode |= (1 << kWidePosOffsetBit); + } + else + { + ((IOParam*)&pb)->ioPosOffset = offset; + } + + err = PBWriteSync( (ParamBlockRec *)&pb ); + + return (err); +#endif +} diff --git a/fsck_hfs/dfalib/SExtents.c b/fsck_hfs/dfalib/SExtents.c new file mode 100644 index 0000000..8619b28 --- /dev/null +++ b/fsck_hfs/dfalib/SExtents.c @@ -0,0 +1,1835 @@ +/* + * Copyright (c) 1999-2002, 2005, 2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: SExtents.c + + Contains: Routines to map file positions to volume positions, and manipulate the extents B-Tree. + + Version: HFS Plus 1.0 + + Written by: Dave Heller, Mark Day + + Copyright: © 1996-1999 by Apple Computer, Inc., all rights reserved. +*/ + + +#include "BTree.h" +#include "Scavenger.h" + +/* +============================================================ +Public (Exported) Routines: +============================================================ + DeallocateFile Deallocate all disk space allocated to a specified file. + Both forks are deallocated. + + ExtendFileC Allocate more space to a given file. + + + MapFileBlockC Convert (map) an offset within a given file into a + physical disk address. + + TruncateFileC Truncates the disk space allocated to a file. The file + space is truncated to a specified new physical EOF, rounded + up to the next allocation block boundry. There is an option + to truncate to the end of the extent containing the new EOF. + + FlushExtentFile + Flush the extents file for a given volume. + + AdjustEOF + Copy EOF, physical length, and extent records from one FCB + to all other FCBs for that fork. This is used when a file is + grown or shrunk as the result of a Write, SetEOF, or Allocate. + + MapLogicalToPhysical + Map some position in a file to a volume block number. Also + returns the number of contiguous bytes that are mapped there. + This is a queued HFSDispatch call that does the equivalent of + MapFileBlockC, using a parameter block. + + UpdateExtentRecord + If the extent record came from the extents file, write out + the updated record; otherwise, copy the updated record into + the FCB resident extent record. If the record has no extents, + and was in the extents file, then delete the record instead. + + ReleaseExtents + Deallocate all allocation blocks in all extents of an extent + data record. +============================================================ +Internal Routines: +============================================================ + FindExtentRecord + Search the extents BTree for a particular extent record. + SearchExtentFile + Search the FCB and extents file for an extent record that + contains a given file position (in bytes). + SearchExtentRecord + Search a given extent record to see if it contains a given + file position (in bytes). Used by SearchExtentFile. + TruncateExtents + Deallocate blocks and delete extent records for all allocation + blocks beyond a certain point in a file. The starting point + must be the first file allocation block for some extent record + for the file. + DeallocateFork + Deallocate all allocation blocks belonging to a given fork. +*/ + +enum +{ + kTwoGigSectors = 0x00400000, + + kDataForkType = 0, + kResourceForkType = 0xFF, + + kPreviousRecord = -1, + + kSectorSize = 512 // Size of a physical sector +}; + +static OSErr ExtentsToExtDataRec( + HFSPlusExtentRecord oldExtents, + HFSExtentRecord newExtents); + +OSErr FindExtentRecord( + const SVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock, + Boolean allowPrevious, + HFSPlusExtentKey *foundKey, + HFSPlusExtentRecord foundData, + UInt32 *foundHint); + +OSErr DeleteExtentRecord( + const SVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock); + +static OSErr CreateExtentRecord( + const SVCB *vcb, + HFSPlusExtentKey *key, + HFSPlusExtentRecord extents, + UInt32 *hint); + +OSErr GetFCBExtentRecord( + const SVCB *vcb, + const SFCB *fcb, + HFSPlusExtentRecord extents); + +static OSErr SetFCBExtentRecord( + const SVCB *vcb, + SFCB *fcb, + HFSPlusExtentRecord extents); + +static OSErr SearchExtentFile( + const SVCB *vcb, + const SFCB *fcb, + UInt64 filePosition, + HFSPlusExtentKey *foundExtentKey, + HFSPlusExtentRecord foundExtentData, + UInt32 *foundExtentDataIndex, + UInt32 *extentBTreeHint, + UInt32 *endingFABNPlusOne ); + +static OSErr SearchExtentRecord( + const SVCB *vcb, + UInt32 searchFABN, + const HFSPlusExtentRecord extentData, + UInt32 extentDataStartFABN, + UInt32 *foundExtentDataOffset, + UInt32 *endingFABNPlusOne, + Boolean *noMoreExtents); + +#if 0 +static OSErr DeallocateFork( + SVCB *vcb, + HFSCatalogNodeID fileID, + UInt8 forkType, + HFSPlusExtentRecord catalogExtents, + Boolean * recordDeleted); + +static OSErr TruncateExtents( + SVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock, + Boolean * recordDeleted); +#endif + +static OSErr MapFileBlockFromFCB( + const SVCB *vcb, + const SFCB *fcb, + UInt64 offset, // Desired offset in bytes from start of file + UInt32 *firstFABN, // FABN of first block of found extent + UInt32 *firstBlock, // Corresponding allocation block number + UInt32 *nextFABN); // FABN of block after end of extent + +static Boolean ExtentsAreIntegral( + const HFSPlusExtentRecord extentRecord, + UInt32 mask, + UInt32 *blocksChecked, + Boolean *checkedLastExtent); + +//_________________________________________________________________________________ +// +// Routine: FindExtentRecord +// +// Purpose: Search the extents BTree for an extent record matching the given +// FileID, fork, and starting file allocation block number. +// +// Inputs: +// vcb Volume to search +// forkType 0 = data fork, -1 = resource fork +// fileID File's FileID (HFSCatalogNodeID) +// startBlock Starting file allocation block number +// allowPrevious If the desired record isn't found and this flag is set, +// then see if the previous record belongs to the same fork. +// If so, then return it. +// +// Outputs: +// foundKey The key data for the record actually found +// foundData The extent record actually found (NOTE: on an HFS volume, the +// fourth entry will be zeroes. +// foundHint The BTree hint to find the node again +//_________________________________________________________________________________ +OSErr FindExtentRecord( + const SVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock, + Boolean allowPrevious, + HFSPlusExtentKey *foundKey, + HFSPlusExtentRecord foundData, + UInt32 *foundHint) +{ + OSErr err; + UInt16 foundSize; + + err = noErr; + + if (vcb->vcbSignature == kHFSSigWord) { + HFSExtentKey key; + HFSExtentKey extentKey; + HFSExtentRecord extentData; + + key.keyLength = kHFSExtentKeyMaximumLength; + key.forkType = forkType; + key.fileID = fileID; + key.startBlock = startBlock; + + err = SearchBTreeRecord(vcb->vcbExtentsFile, &key, kNoHint, &extentKey, &extentData, + &foundSize, foundHint); + + if (err == btNotFound && allowPrevious) { + err = GetBTreeRecord(vcb->vcbExtentsFile, kPreviousRecord, &extentKey, &extentData, + &foundSize, foundHint); + + // A previous record may not exist, so just return btNotFound (like we would if + // it was for the wrong file/fork). + if (err == (OSErr) fsBTStartOfIterationErr) //¥¥ fsBTStartOfIterationErr is type unsigned long + err = btNotFound; + + if (err == noErr) { + // Found a previous record. Does it belong to the same fork of the same file? + if (extentKey.fileID != fileID || extentKey.forkType != forkType) + err = btNotFound; + } + } + + if (err == noErr) { + UInt16 i; + + // Copy the found key back for the caller + foundKey->keyLength = kHFSPlusExtentKeyMaximumLength; + foundKey->forkType = extentKey.forkType; + foundKey->pad = 0; + foundKey->fileID = extentKey.fileID; + foundKey->startBlock = extentKey.startBlock; + + // Copy the found data back for the caller + foundData[0].startBlock = extentData[0].startBlock; + foundData[0].blockCount = extentData[0].blockCount; + foundData[1].startBlock = extentData[1].startBlock; + foundData[1].blockCount = extentData[1].blockCount; + foundData[2].startBlock = extentData[2].startBlock; + foundData[2].blockCount = extentData[2].blockCount; + + for (i = 3; i < kHFSPlusExtentDensity; ++i) + { + foundData[i].startBlock = 0; + foundData[i].blockCount = 0; + } + } + } + else { // HFS Plus volume + HFSPlusExtentKey key; + HFSPlusExtentKey extentKey; + HFSPlusExtentRecord extentData; + + key.keyLength = kHFSPlusExtentKeyMaximumLength; + key.forkType = forkType; + key.pad = 0; + key.fileID = fileID; + key.startBlock = startBlock; + + err = SearchBTreeRecord(vcb->vcbExtentsFile, &key, kNoHint, &extentKey, &extentData, + &foundSize, foundHint); + + if (err == btNotFound && allowPrevious) { + err = GetBTreeRecord(vcb->vcbExtentsFile, kPreviousRecord, &extentKey, &extentData, + &foundSize, foundHint); + + // A previous record may not exist, so just return btNotFound (like we would if + // it was for the wrong file/fork). + if (err == (OSErr) fsBTStartOfIterationErr) //¥¥ fsBTStartOfIterationErr is type unsigned long + err = btNotFound; + + if (err == noErr) { + // Found a previous record. Does it belong to the same fork of the same file? + if (extentKey.fileID != fileID || extentKey.forkType != forkType) + err = btNotFound; + } + } + + if (err == noErr) { + // Copy the found key back for the caller + CopyMemory(&extentKey, foundKey, sizeof(HFSPlusExtentKey)); + // Copy the found data back for the caller + CopyMemory(&extentData, foundData, sizeof(HFSPlusExtentRecord)); + } + } + + return err; +} + + + +static OSErr CreateExtentRecord( + const SVCB *vcb, + HFSPlusExtentKey *key, + HFSPlusExtentRecord extents, + UInt32 *hint) +{ + OSErr err; + + err = noErr; + + if (vcb->vcbSignature == kHFSSigWord) { + HFSExtentKey hfsKey; + HFSExtentRecord data; + + hfsKey.keyLength = kHFSExtentKeyMaximumLength; + hfsKey.forkType = key->forkType; + hfsKey.fileID = key->fileID; + hfsKey.startBlock = key->startBlock; + + err = ExtentsToExtDataRec(extents, data); + if (err == noErr) + err = InsertBTreeRecord(vcb->vcbExtentsFile, &hfsKey, data, sizeof(HFSExtentRecord), hint); + } + else { // HFS Plus volume + err = InsertBTreeRecord(vcb->vcbExtentsFile, key, extents, sizeof(HFSPlusExtentRecord), hint); + } + + return err; +} + + +OSErr DeleteExtentRecord( + const SVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock) +{ + OSErr err; + + err = noErr; + + if (vcb->vcbSignature == kHFSSigWord) { + HFSExtentKey key; + + key.keyLength = kHFSExtentKeyMaximumLength; + key.forkType = forkType; + key.fileID = fileID; + key.startBlock = startBlock; + + err = DeleteBTreeRecord( vcb->vcbExtentsFile, &key ); + } + else { // HFS Plus volume + HFSPlusExtentKey key; + + key.keyLength = kHFSPlusExtentKeyMaximumLength; + key.forkType = forkType; + key.pad = 0; + key.fileID = fileID; + key.startBlock = startBlock; + + err = DeleteBTreeRecord( vcb->vcbExtentsFile, &key ); + } + + return err; +} + + + +//_________________________________________________________________________________ +// +// Routine: MapFileBlock +// +// Function: Maps a file position into a physical disk address. +// +// Input: A2.L - VCB pointer +// (A1,D1.W) - FCB pointer +// D4.L - number of bytes desired +// D5.L - file position (byte address) +// +// Output: D3.L - physical start block +// D6.L - number of contiguous bytes available (up to D4 bytes) +// D0.L - result code <01Oct85> +// 0 = ok +// FXRangeErr = file position beyond mapped range <17Oct85> +// FXOvFlErr = extents file overflow <17Oct85> +// other = error <17Oct85> +// +// Called By: Log2Phys (read/write in place), Cache (map a file block). +//_________________________________________________________________________________ + +OSErr MapFileBlockC ( + SVCB *vcb, // volume that file resides on + SFCB *fcb, // FCB of file + UInt32 numberOfBytes, // number of contiguous bytes desired + UInt64 sectorOffset, // starting offset within file (in 512-byte sectors) + UInt64 *startSector, // first 512-byte volume sector (NOT an allocation block) + UInt32 *availableBytes) // number of contiguous bytes (up to numberOfBytes) +{ + OSErr err; + UInt32 allocBlockSize; // Size of the volume's allocation block, in sectors + HFSPlusExtentKey foundKey; + HFSPlusExtentRecord foundData; + UInt32 foundIndex; + UInt32 hint; + UInt32 firstFABN = 0; // file allocation block of first block in found extent + UInt32 nextFABN; // file allocation block of block after end of found extent + UInt64 dataEnd; // (offset) end of range that is contiguous (in sectors) + UInt32 startBlock = 0; // volume allocation block corresponding to firstFABN + UInt64 temp; + + +// LogStartTime(kTraceMapFileBlock); + + allocBlockSize = vcb->vcbBlockSize >> kSectorShift; + + err = MapFileBlockFromFCB(vcb, fcb, sectorOffset, &firstFABN, &startBlock, &nextFABN); + if (err != noErr) { + err = SearchExtentFile(vcb, fcb, sectorOffset, &foundKey, foundData, &foundIndex, &hint, &nextFABN); + if (err == noErr) { + startBlock = foundData[foundIndex].startBlock; + firstFABN = nextFABN - foundData[foundIndex].blockCount; + } + } + + if (err != noErr) + { + // LogEndTime(kTraceMapFileBlock, err); + + return err; + } + + // + // Determine the end of the available space. It will either be the end of the extent, + // or the file's PEOF, whichever is smaller. + // + + // Get fork's physical size, in sectors + temp = fcb->fcbPhysicalSize >> kSectorShift; + dataEnd = (UInt64) nextFABN * allocBlockSize; // Assume valid data through end of this extent + if (temp < dataEnd) // Is PEOF shorter? + dataEnd = temp; // Yes, so only map up to PEOF + + // + // Compute the absolute sector number that contains the offset of the given file + // + temp = sectorOffset - ((UInt64) firstFABN * allocBlockSize); // offset in sectors from start of this extent + temp += (UInt64)startBlock * (UInt64)allocBlockSize; // offset in sectors from start of allocation block space + if (vcb->vcbSignature == kHFSPlusSigWord) + temp += vcb->vcbEmbeddedOffset/512; // offset into the wrapper + else + temp += vcb->vcbAlBlSt; // offset in sectors from start of volume + + // Return the desired sector for file position "offset" + *startSector = temp; + + // + // Determine the number of contiguous sectors until the end of the extent + // (or the amount they asked for, whichever comes first). In any case, + // we never map more than 2GB per call. + // + temp = dataEnd - sectorOffset; + if (temp >= kTwoGigSectors) + temp = kTwoGigSectors-1; // never map more than 2GB per call + temp <<= kSectorShift; // convert sectors to bytes + if (temp > numberOfBytes) + *availableBytes = numberOfBytes; // more there than they asked for, so pin the output + else + *availableBytes = temp; + +// LogEndTime(kTraceMapFileBlock, noErr); + + return noErr; +} + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: ReleaseExtents +// +// Function: Release the extents of a single extent data record. +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +#if 1 +OSErr ReleaseExtents( + SVCB *vcb, + const HFSPlusExtentRecord extentRecord, + UInt32 *numReleasedAllocationBlocks, + Boolean *releasedLastExtent) +{ + UInt32 extentIndex; + UInt32 numberOfExtents; + OSErr err = noErr; + + *numReleasedAllocationBlocks = 0; + *releasedLastExtent = false; + + if (vcb->vcbSignature == kHFSPlusSigWord) + numberOfExtents = kHFSPlusExtentDensity; + else + numberOfExtents = kHFSExtentDensity; + + for( extentIndex = 0; extentIndex < numberOfExtents; extentIndex++) + { + UInt32 numAllocationBlocks; + + // Loop over the extent record and release the blocks associated with each extent. + + numAllocationBlocks = extentRecord[extentIndex].blockCount; + if ( numAllocationBlocks == 0 ) + { + *releasedLastExtent = true; + break; + } + + err = ReleaseBitmapBits( extentRecord[extentIndex].startBlock, numAllocationBlocks ); + if ( err != noErr ) + break; + + *numReleasedAllocationBlocks += numAllocationBlocks; // bump FABN to beg of next extent + } + + return( err ); +} +#endif + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: TruncateExtents +// +// Purpose: Delete extent records whose starting file allocation block number +// is greater than or equal to a given starting block number. The +// allocation blocks represented by the extents are deallocated. +// +// Inputs: +// vcb Volume to operate on +// fileID Which file to operate on +// startBlock Starting file allocation block number for first extent +// record to delete. +// +// Outputs: +// recordDeleted Set to true if any extents B-tree record was deleted. +// Unchanged otherwise. +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +static OSErr TruncateExtents( + SVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock, + Boolean * recordDeleted) +{ + OSErr err; + Boolean releasedLastExtent; + UInt32 numberExtentsReleased; + UInt32 hint; + HFSPlusExtentKey key; + HFSPlusExtentRecord extents; + + while (true) { + err = FindExtentRecord(vcb, forkType, fileID, startBlock, false, &key, extents, &hint); + if (err != noErr) { + if (err == btNotFound) + err = noErr; + break; + } + + err = ReleaseExtents( vcb, extents, &numberExtentsReleased, &releasedLastExtent ); + if (err != noErr) break; + + err = DeleteExtentRecord(vcb, forkType, fileID, startBlock); + if (err != noErr) break; + + *recordDeleted = true; // We did delete a record + startBlock += numberExtentsReleased; + } + + return err; +} + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: DeallocateFork +// +// Function: De-allocates all disk space allocated to a specified fork. +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +static OSErr DeallocateFork( + SVCB *vcb, + HFSCatalogNodeID fileID, + UInt8 forkType, + HFSPlusExtentRecord catalogExtents, + Boolean * recordDeleted) // set to true if any record was deleted +{ + OSErr err; + UInt32 numReleasedAllocationBlocks; + Boolean releasedLastExtent; + + // Release the catalog extents + err = ReleaseExtents( vcb, catalogExtents, &numReleasedAllocationBlocks, &releasedLastExtent ); + + // Release the extra extents, if present + if (err == noErr && !releasedLastExtent) + err = TruncateExtents(vcb, forkType, fileID, numReleasedAllocationBlocks, recordDeleted); + + return( err ); +} + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: FlushExtentFile +// +// Function: Flushes the extent file for a specified volume +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +OSErr FlushExtentFile( SVCB *vcb ) +{ + OSErr err; + + err = BTFlushPath(vcb->vcbExtentsFile); + if ( err == noErr ) + { + // If the FCB for the extent "file" is dirty, mark the VCB as dirty. + + if( ( vcb->vcbExtentsFile->fcbFlags & fcbModifiedMask ) != 0 ) + { + (void) MarkVCBDirty( vcb ); + err = FlushVolumeControlBlock( vcb ); + } + } + + return( err ); +} + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: DeallocateFile +// +// Function: De-allocates all disk space allocated to a specified file. +// The space occupied by both forks is deallocated. +// +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +OSErr DeallocateFile(SVCB *vcb, CatalogRecord * fileRec) +{ + int i; + OSErr errDF, errRF; + Boolean recordDeleted = false; + + errDF = errRF = 0; + + if (fileRec->recordType == kHFSFileRecord) { + HFSPlusExtentRecord dataForkExtents; + HFSPlusExtentRecord rsrcForkExtents; + + for (i = 0; i < kHFSExtentDensity; ++i) { + dataForkExtents[i].startBlock = + (UInt32) (fileRec->hfsFile.dataExtents[i].startBlock); + dataForkExtents[i].blockCount = + (UInt32) (fileRec->hfsFile.dataExtents[i].blockCount); + + rsrcForkExtents[i].startBlock = + (UInt32) (fileRec->hfsFile.rsrcExtents[i].startBlock); + rsrcForkExtents[i].blockCount = + (UInt32) (fileRec->hfsFile.rsrcExtents[i].blockCount); + } + ClearMemory(&dataForkExtents[i].startBlock, + sizeof(HFSPlusExtentRecord) - sizeof(HFSExtentRecord)); + + ClearMemory(&rsrcForkExtents[i].startBlock, + sizeof(HFSPlusExtentRecord) - sizeof(HFSExtentRecord)); + + errDF = DeallocateFork(vcb, fileRec->hfsFile.fileID, kDataForkType, + dataForkExtents, &recordDeleted ); + + errRF = DeallocateFork(vcb, fileRec->hfsFile.fileID, kResourceForkType, + rsrcForkExtents, &recordDeleted ); + } + else if (fileRec->recordType == kHFSPlusFileRecord) { + errDF = DeallocateFork(vcb, fileRec->hfsPlusFile.fileID, kDataForkType, + fileRec->hfsPlusFile.dataFork.extents, &recordDeleted ); + + errRF = DeallocateFork(vcb, fileRec->hfsPlusFile.fileID, kResourceForkType, + fileRec->hfsPlusFile.resourceFork.extents, &recordDeleted ); + } + + if (recordDeleted) + (void) FlushExtentFile(vcb); + + MarkVCBDirty(vcb); + + return (errDF ? errDF : errRF); +} + + +//_________________________________________________________________________________ +// +// Routine: Extendfile +// +// Function: Extends the disk space allocated to a file. +// +// Input: A2.L - VCB pointer +// A1.L - pointer to FCB array +// D1.W - file refnum +// D3.B - option flags +// kEFContigMask - force contiguous allocation +// kEFAllMask - allocate all requested bytes or none +// NOTE: You may not set both options. +// D4.L - number of additional bytes to allocate +// +// Output: D0.W - result code +// 0 = ok +// -n = IO error +// D6.L - number of bytes allocated +// +// Called by: FileAloc,FileWrite,SetEof +// +// Note: ExtendFile updates the PEOF in the FCB. +//_________________________________________________________________________________ + +OSErr ExtendFileC ( + SVCB *vcb, // volume that file resides on + SFCB *fcb, // FCB of file to truncate + UInt32 sectorsToAdd, // number of sectors to allocate + UInt32 flags, // EFContig and/or EFAll + UInt32 *actualSectorsAdded)// number of bytes actually allocated +{ + OSErr err; + Boolean wantContig; + Boolean needsFlush; + UInt32 sectorsPerBlock; + UInt32 blocksToAdd; // number of blocks we'd like to add + UInt32 blocksPerClump; // number of blocks in clump size + UInt32 maxBlocksToAdd; // max blocks we want to add + UInt32 eofBlocks; // current EOF in blocks + HFSPlusExtentKey foundKey; // from SearchExtentFile + HFSPlusExtentRecord foundData; + UInt32 foundIndex; // from SearchExtentFile + UInt32 hint; // from SearchExtentFile + UInt32 nextBlock; // from SearchExtentFile + UInt32 startBlock; + UInt32 actualStartBlock; + UInt32 actualNumBlocks; + UInt32 numExtentsPerRecord; + UInt32 blocksAdded; + + needsFlush = false; // Assume the B-tree header doesn't need to be updated + blocksAdded = 0; + *actualSectorsAdded = 0; + + if (vcb->vcbSignature == kHFSPlusSigWord) + numExtentsPerRecord = kHFSPlusExtentDensity; + else + numExtentsPerRecord = kHFSExtentDensity; + + // + // Round up the request to whole allocation blocks + // + sectorsPerBlock = vcb->vcbBlockSize >> kSectorShift; + blocksToAdd = DivideAndRoundUp(sectorsToAdd, sectorsPerBlock); + + // + // Determine the physical EOF in allocation blocks + // + eofBlocks = fcb->fcbPhysicalSize / vcb->vcbBlockSize; + + // + // Make sure the request won't make the file too big (>=2GB). + // [2350148] Always limit HFS files. + // ¥¥ Shouldn't really fail if allOrNothing is false + // ¥¥ Adjust for clump size here? + // + if ( vcb->vcbSignature == kHFSPlusSigWord ) + { + // Allow it to grow beyond 2GB. + } + else + { + UInt32 maxFileBlocks; // max legal EOF, in blocks + maxFileBlocks = (kTwoGigSectors-1) / sectorsPerBlock; + if (blocksToAdd > maxFileBlocks || (blocksToAdd + eofBlocks) > maxFileBlocks) { + err = fileBoundsErr; + goto ErrorExit; + } + } + + // + // If allocation is all-or-nothing, then make sure there + // are enough free blocks. (A quick test) + // + if ((flags & kEFAllMask) && blocksToAdd > vcb->vcbFreeBlocks) { + err = dskFulErr; + goto ErrorExit; + } + + // + // There may be blocks allocated beyond the physical EOF + // (because we allocated the rest of the clump size, or + // because of a PBAllocate or PBAllocContig call). + // If these extra blocks exist, then use them to satisfy + // part or all of the request. + // + // ¥¥ What, if anything, would break if the physical EOF always + // ¥¥ represented ALL extents allocated to the file (including + // ¥¥ the clump size roundup)? + // + // Note: (blocks * sectorsPerBlock - 1) is the sector offset + // of the last sector in the last block. + // + err = SearchExtentFile(vcb, fcb, (eofBlocks+blocksToAdd) * sectorsPerBlock - 1, &foundKey, foundData, &foundIndex, &hint, &nextBlock); + if (err == noErr) { + // Enough blocks are already allocated. Just update the FCB to reflect the new length. + eofBlocks += blocksToAdd; // new EOF, in blocks + blocksAdded += blocksToAdd; + goto Exit; + } + if (err != fxRangeErr) // Any real error? + goto ErrorExit; // Yes, so exit immediately + + // + // There wasn't enough already allocated. But there might have been + // a few allocated blocks beyond the physical EOF. So, set the physical + // EOF to match the end of the last extent. + // + if (nextBlock > eofBlocks) { + // There were (nextBlock - eofBlocks) extra blocks past physical EOF + blocksAdded += nextBlock - eofBlocks; + blocksToAdd -= nextBlock - eofBlocks; + eofBlocks = nextBlock; + } + + // + // We still need to allocate more blocks. + // + // First try a contiguous allocation (of the whole amount). + // If that fails, get whatever we can. + // If forceContig, then take whatever we got + // else, keep getting bits and pieces (non-contig) + // + // ¥¥ Need to do clump size calculations + // + blocksPerClump = fcb->fcbClumpSize / vcb->vcbBlockSize; + if (blocksPerClump == 0) + blocksPerClump = 1; + + err = noErr; + wantContig = true; + do { + // Make maxBlocksToAdd equal to blocksToAdd rounded up to a multiple + // of the file's clump size. This gives the file room to grow some + // more without fragmenting. + if (flags & kEFNoClumpMask) { + // Caller said not to round up, so only allocate what was asked for. + maxBlocksToAdd = blocksToAdd; + } + else { + // Round up to multiple of clump size + maxBlocksToAdd = DivideAndRoundUp(blocksToAdd, blocksPerClump); + maxBlocksToAdd *= blocksPerClump; + } + + // Try to allocate the new space contiguous with the end of the previous + // extent. If this succeeds, the last extent grows and the file does not + // become any more fragmented. + startBlock = foundData[foundIndex].startBlock + foundData[foundIndex].blockCount; + err = BlockAllocate(vcb, startBlock, blocksToAdd, maxBlocksToAdd, wantContig, &actualStartBlock, &actualNumBlocks); + if (err == dskFulErr) { + if (flags & kEFContigMask) + break; // AllocContig failed because not enough contiguous space + if (wantContig) { + // Couldn't get one big chunk, so get whatever we can. + err = noErr; + wantContig = false; + continue; + } + if (actualNumBlocks != 0) + err = noErr; + } + if (err == noErr) { + // Add the new extent to the existing extent record, or create a new one. + if (actualStartBlock == startBlock) { + // We grew the file's last extent, so just adjust the number of blocks. + foundData[foundIndex].blockCount += actualNumBlocks; + err = UpdateExtentRecord(vcb, fcb, &foundKey, foundData, hint); + if (err != noErr) break; + } + else { + UInt16 i; + + // Need to add a new extent. See if there is room in the current record. + if (foundData[foundIndex].blockCount != 0) // Is current extent free to use? + ++foundIndex; // No, so use the next one. + if (foundIndex == numExtentsPerRecord) { + // This record is full. Need to create a new one. + if (fcb->fcbFileID == kHFSExtentsFileID || (flags & kEFNoExtOvflwMask)) { + (void) BlockDeallocate(vcb, actualStartBlock, actualNumBlocks); + err = fxOvFlErr; // Oops. Can't extend extents file past first record. + break; + } + + foundKey.keyLength = kHFSPlusExtentKeyMaximumLength; + if (fcb->fcbFlags & fcbResourceMask) + foundKey.forkType = kResourceForkType; + else + foundKey.forkType = kDataForkType; + foundKey.pad = 0; + foundKey.fileID = fcb->fcbFileID; + foundKey.startBlock = nextBlock; + + foundData[0].startBlock = actualStartBlock; + foundData[0].blockCount = actualNumBlocks; + + // zero out remaining extents... + for (i = 1; i < kHFSPlusExtentDensity; ++i) + { + foundData[i].startBlock = 0; + foundData[i].blockCount = 0; + } + + foundIndex = 0; + + err = CreateExtentRecord(vcb, &foundKey, foundData, &hint); + if (err == fxOvFlErr) { + // We couldn't create an extent record because extents B-tree + // couldn't grow. Dellocate the extent just allocated and + // return a disk full error. + (void) BlockDeallocate(vcb, actualStartBlock, actualNumBlocks); + err = dskFulErr; + } + if (err != noErr) break; + + needsFlush = true; // We need to update the B-tree header + } + else { + // Add a new extent into this record and update. + foundData[foundIndex].startBlock = actualStartBlock; + foundData[foundIndex].blockCount = actualNumBlocks; + err = UpdateExtentRecord(vcb, fcb, &foundKey, foundData, hint); + if (err != noErr) break; + } + } + + // Figure out how many bytes were actually allocated. + // NOTE: BlockAllocate could have allocated more than the minimum + // we asked for (up to our requested maximum). + // Don't set the PEOF beyond what our client asked for. + nextBlock += actualNumBlocks; + if (actualNumBlocks > blocksToAdd) { + blocksAdded += blocksToAdd; + eofBlocks += blocksToAdd; + blocksToAdd = 0; + } + else { + blocksAdded += actualNumBlocks; + blocksToAdd -= actualNumBlocks; + eofBlocks += actualNumBlocks; + } + + // If contiguous allocation was requested, then we've already got one contiguous + // chunk. If we didn't get all we wanted, then adjust the error to disk full. + if (flags & kEFContigMask) { + if (blocksToAdd != 0) + err = dskFulErr; + break; // We've already got everything that's contiguous + } + } + } while (err == noErr && blocksToAdd); + +ErrorExit: +Exit: + *actualSectorsAdded = blocksAdded * sectorsPerBlock; + if (blocksAdded) { + fcb->fcbPhysicalSize = (UInt64)eofBlocks * (UInt64)vcb->vcbBlockSize; + fcb->fcbFlags |= fcbModifiedMask; + } + + // [2355121] If we created a new extent record, then update the B-tree header + if (needsFlush) + (void) FlushExtentFile(vcb); + + return err; +} + + + +//_________________________________________________________________________________ +// +// Routine: TruncateFileC +// +// Function: Truncates the disk space allocated to a file. The file space is +// truncated to a specified new PEOF rounded up to the next allocation +// block boundry. If the 'TFTrunExt' option is specified, the file is +// truncated to the end of the extent containing the new PEOF. +// +// Input: A2.L - VCB pointer +// A1.L - pointer to FCB array +// D1.W - file refnum +// D2.B - option flags +// TFTrunExt - truncate to the extent containing new PEOF +// D3.L - new PEOF +// +// Output: D0.W - result code +// 0 = ok +// -n = IO error +// +// Note: TruncateFile updates the PEOF in the FCB. +//_________________________________________________________________________________ + +#if 0 +OSErr TruncateFileC ( + SVCB *vcb, // volume that file resides on + SFCB *fcb, // FCB of file to truncate + UInt32 eofSectors, // new physical size for file + Boolean truncateToExtent) // if true, truncate to end of extent containing newPEOF +{ + OSErr err; + UInt32 nextBlock; // next file allocation block to consider + UInt32 startBlock; // Physical (volume) allocation block number of start of a range + UInt32 physNumBlocks; // Number of allocation blocks in file (according to PEOF) + UInt32 numBlocks; + HFSPlusExtentKey key; // key for current extent record; key->keyLength == 0 if FCB's extent record + UInt32 hint; // BTree hint corresponding to key + HFSPlusExtentRecord extentRecord; + UInt32 extentIndex; + UInt32 extentNextBlock; + UInt32 numExtentsPerRecord; + UInt32 sectorsPerBlock; + UInt8 forkType; + Boolean extentChanged; // true if we actually changed an extent + Boolean recordDeleted; // true if an extent record got deleted + + recordDeleted = false; + sectorsPerBlock = vcb->vcbBlockSize >> kSectorShift; + + if (vcb->vcbSignature == kHFSPlusSigWord) + numExtentsPerRecord = kHFSPlusExtentDensity; + else + numExtentsPerRecord = kHFSExtentDensity; + + if (fcb->fcbFlags & fcbResourceMask) + forkType = kResourceForkType; + else + forkType = kDataForkType; + + // Compute number of allocation blocks currently in file + physNumBlocks = fcb->fcbPhysicalSize / vcb->vcbBlockSize; + + // + // Round newPEOF up to a multiple of the allocation block size. If new size is + // two gigabytes or more, then round down by one allocation block (??? really? + // shouldn't that be an error?). + // + nextBlock = DivideAndRoundUp(eofSectors, sectorsPerBlock); // number of allocation blocks to remain in file + eofSectors = nextBlock * sectorsPerBlock; // rounded up to multiple of block size + if ((fcb->fcbFlags & fcbLargeFileMask) == 0 && eofSectors >= kTwoGigSectors) { + #if DEBUG_BUILD + DebugStr("\pHFS: Trying to truncate a file to 2GB or more"); + #endif + err = fileBoundsErr; + goto ErrorExit; + } + + // + // Update FCB's length + // + fcb->fcbPhysicalSize = (UInt64)nextBlock * (UInt64)vcb->vcbBlockSize; + fcb->fcbFlags |= fcbModifiedMask; + + // + // If the new PEOF is 0, then truncateToExtent has no meaning (we should always deallocate + // all storage). + // + if (eofSectors == 0) { + int i; + + // Find the catalog extent record + err = GetFCBExtentRecord(vcb, fcb, extentRecord); + if (err != noErr) goto ErrorExit; // got some error, so return it + + // Deallocate all the extents for this fork + err = DeallocateFork(vcb, fcb->fcbFileID, forkType, extentRecord, &recordDeleted); + if (err != noErr) goto ErrorExit; // got some error, so return it + + // Update the catalog extent record (making sure it's zeroed out) + if (err == noErr) { + for (i=0; i < numExtentsPerRecord; i++) { + extentRecord[i].startBlock = 0; + extentRecord[i].blockCount = 0; + } + } + err = SetFCBExtentRecord((VCB *) vcb, fcb, extentRecord); + goto Done; + } + + // + // Find the extent containing byte (peof-1). This is the last extent we'll keep. + // (If truncateToExtent is true, we'll keep the whole extent; otherwise, we'll only + // keep up through peof). The search will tell us how many allocation blocks exist + // in the found extent plus all previous extents. + // + err = SearchExtentFile(vcb, fcb, eofSectors-1, &key, extentRecord, &extentIndex, &hint, &extentNextBlock); + if (err != noErr) goto ErrorExit; + + extentChanged = false; // haven't changed the extent yet + + if (!truncateToExtent) { + // + // Shorten this extent. It may be the case that the entire extent gets + // freed here. + // + numBlocks = extentNextBlock - nextBlock; // How many blocks in this extent to free up + if (numBlocks != 0) { + // Compute first volume allocation block to free + startBlock = extentRecord[extentIndex].startBlock + extentRecord[extentIndex].blockCount - numBlocks; + // Free the blocks in bitmap + err = BlockDeallocate(vcb, startBlock, numBlocks); + if (err != noErr) goto ErrorExit; + // Adjust length of this extent + extentRecord[extentIndex].blockCount -= numBlocks; + // If extent is empty, set start block to 0 + if (extentRecord[extentIndex].blockCount == 0) + extentRecord[extentIndex].startBlock = 0; + // Remember that we changed the extent record + extentChanged = true; + } + } + + // + // Now move to the next extent in the record, and set up the file allocation block number + // + nextBlock = extentNextBlock; // Next file allocation block to free + ++extentIndex; // Its index within the extent record + + // + // Release all following extents in this extent record. Update the record. + // + while (extentIndex < numExtentsPerRecord && extentRecord[extentIndex].blockCount != 0) { + numBlocks = extentRecord[extentIndex].blockCount; + // Deallocate this extent + err = BlockDeallocate(vcb, extentRecord[extentIndex].startBlock, numBlocks); + if (err != noErr) goto ErrorExit; + // Update next file allocation block number + nextBlock += numBlocks; + // Zero out start and length of this extent to delete it from record + extentRecord[extentIndex].startBlock = 0; + extentRecord[extentIndex].blockCount = 0; + // Remember that we changed an extent + extentChanged = true; + // Move to next extent in record + ++extentIndex; + } + + // + // If any of the extents in the current record were changed, then update that + // record (in the FCB, or extents file). + // + if (extentChanged) { + err = UpdateExtentRecord(vcb, fcb, &key, extentRecord, hint); + if (err != noErr) goto ErrorExit; + } + + // + // If there are any following allocation blocks, then we need + // to seach for their extent records and delete those allocation + // blocks. + // + if (nextBlock < physNumBlocks) + err = TruncateExtents(vcb, forkType, fcb->fcbFileID, nextBlock, &recordDeleted); + +Done: +ErrorExit: + +#if DEBUG_BUILD + if (err == fxRangeErr) + DebugStr("\pAbout to return fxRangeErr"); +#endif + + // [2355121] If we actually deleted extent records, then update the B-tree header + if (recordDeleted) + (void) FlushExtentFile(vcb); + + return err; +} +#endif + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: SearchExtentRecord (was XRSearch) +// +// Function: Searches extent record for the extent mapping a given file +// allocation block number (FABN). +// +// Input: searchFABN - desired FABN +// extentData - pointer to extent data record (xdr) +// extentDataStartFABN - beginning FABN for extent record +// +// Output: foundExtentDataOffset - offset to extent entry within xdr +// result = noErr, offset to extent mapping desired FABN +// result = FXRangeErr, offset to last extent in record +// endingFABNPlusOne - ending FABN +1 +// noMoreExtents - True if the extent was not found, and the +// extent record was not full (so don't bother +// looking in subsequent records); false otherwise. +// +// Result: noErr = ok +// FXRangeErr = desired FABN > last mapped FABN in record +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +static OSErr SearchExtentRecord( + const SVCB *vcb, + UInt32 searchFABN, + const HFSPlusExtentRecord extentData, + UInt32 extentDataStartFABN, + UInt32 *foundExtentIndex, + UInt32 *endingFABNPlusOne, + Boolean *noMoreExtents) +{ + OSErr err = noErr; + UInt32 extentIndex; + UInt32 numberOfExtents; + UInt32 numAllocationBlocks; + Boolean foundExtent; + + *endingFABNPlusOne = extentDataStartFABN; + *noMoreExtents = false; + foundExtent = false; + + if (vcb->vcbSignature == kHFSPlusSigWord) + numberOfExtents = kHFSPlusExtentDensity; + else + numberOfExtents = kHFSExtentDensity; + + for( extentIndex = 0; extentIndex < numberOfExtents; ++extentIndex ) + { + + // Loop over the extent record and find the search FABN. + + numAllocationBlocks = extentData[extentIndex].blockCount; + if ( numAllocationBlocks == 0 ) + { + break; + } + + *endingFABNPlusOne += numAllocationBlocks; + + if( searchFABN < *endingFABNPlusOne ) + { + // Found the extent. + foundExtent = true; + break; + } + } + + if( foundExtent ) + { + // Found the extent. Note the extent offset + *foundExtentIndex = extentIndex; + } + else + { + // Did not find the extent. Set foundExtentDataOffset accordingly + if( extentIndex > 0 ) + { + *foundExtentIndex = extentIndex - 1; + } + else + { + *foundExtentIndex = 0; + } + + // If we found an empty extent, then set noMoreExtents. + if (extentIndex < numberOfExtents) + *noMoreExtents = true; + + // Finally, return an error to the caller + err = fxRangeErr; + } + + return( err ); +} + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: SearchExtentFile (was XFSearch) +// +// Function: Searches extent file (including the FCB resident extent record) +// for the extent mapping a given file position. +// +// Input: vcb - VCB pointer +// fcb - FCB pointer +// filePosition - file position (byte address) +// +// Output: foundExtentKey - extent key record (xkr) +// If extent was found in the FCB's resident extent record, +// then foundExtentKey->keyLength will be set to 0. +// foundExtentData - extent data record(xdr) +// foundExtentIndex - index to extent entry in xdr +// result = 0, offset to extent mapping desired FABN +// result = FXRangeErr, offset to last extent in record +// (i.e., kNumExtentsPerRecord-1) +// extentBTreeHint - BTree hint for extent record +// kNoHint = Resident extent record +// endingFABNPlusOne - ending FABN +1 +// +// Result: +// noErr Found an extent that contains the given file position +// FXRangeErr Given position is beyond the last allocated extent +// (other) (some other internal I/O error) +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +static OSErr SearchExtentFile( + const SVCB *vcb, + const SFCB *fcb, + UInt64 sectorOffset, + HFSPlusExtentKey *foundExtentKey, + HFSPlusExtentRecord foundExtentData, + UInt32 *foundExtentIndex, + UInt32 *extentBTreeHint, + UInt32 *endingFABNPlusOne ) +{ + OSErr err; + UInt32 filePositionBlock; + Boolean noMoreExtents = true; + + filePositionBlock = sectorOffset / (vcb->vcbBlockSize >> kSectorShift); + + // Search the resident FCB first. + err = GetFCBExtentRecord(vcb, fcb, foundExtentData); + if (err == noErr) + err = SearchExtentRecord( vcb, filePositionBlock, foundExtentData, 0, + foundExtentIndex, endingFABNPlusOne, &noMoreExtents ); + + if( err == noErr ) { + // Found the extent. Set results accordingly + *extentBTreeHint = kNoHint; // no hint, because not in the BTree + foundExtentKey->keyLength = 0; // 0 = the FCB itself + + goto Exit; + } + + // Didn't find extent in FCB. If FCB's extent record wasn't full, there's no point + // in searching the extents file. Note that SearchExtentRecord left us pointing at + // the last valid extent (or the first one, if none were valid). This means we need + // to fill in the hint and key outputs, just like the "if" statement above. + if ( noMoreExtents ) { + *extentBTreeHint = kNoHint; // no hint, because not in the BTree + foundExtentKey->keyLength = 0; // 0 = the FCB itself + err = fxRangeErr; // There are no more extents, so must be beyond PEOF + goto Exit; + } + + // + // Find the desired record, or the previous record if it is the same fork + // + err = FindExtentRecord(vcb, (fcb->fcbFlags & fcbResourceMask) ? kResourceForkType : kDataForkType, + fcb->fcbFileID, filePositionBlock, true, foundExtentKey, foundExtentData, extentBTreeHint); + + if (err == btNotFound) { + // + // If we get here, the desired position is beyond the extents in the FCB, and there are no extents + // in the extents file. Return the FCB's extents and a range error. + // + *extentBTreeHint = kNoHint; + foundExtentKey->keyLength = 0; + err = GetFCBExtentRecord(vcb, fcb, foundExtentData); + // Note: foundExtentIndex and endingFABNPlusOne have already been set as a result of the very + // first SearchExtentRecord call in this function (when searching in the FCB's extents, and + // we got a range error). + + return fxRangeErr; + } + + // + // If we get here, there was either a BTree error, or we found an appropriate record. + // If we found a record, then search it for the correct index into the extents. + // + if (err == noErr) { + // Find appropriate index into extent record + err = SearchExtentRecord(vcb, filePositionBlock, foundExtentData, foundExtentKey->startBlock, + foundExtentIndex, endingFABNPlusOne, &noMoreExtents); + } + +Exit: + return err; +} + + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: UpdateExtentRecord +// +// Function: Write new extent data to an existing extent record with a given key. +// If all of the extents are empty, and the extent record is in the +// extents file, then the record is deleted. +// +// Input: vcb - the volume containing the extents +// fcb - the file that owns the extents +// extentFileKey - pointer to extent key record (xkr) +// If the key length is 0, then the extents are actually part +// of the catalog record, stored in the FCB. +// extentData - pointer to extent data record (xdr) +// extentBTreeHint - hint for given key, or kNoHint +// +// Result: noErr = ok +// (other) = error from BTree +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +OSErr UpdateExtentRecord ( + const SVCB *vcb, + SFCB *fcb, + const HFSPlusExtentKey *extentFileKey, + HFSPlusExtentRecord extentData, + UInt32 extentBTreeHint) +{ + OSErr err; + UInt32 foundHint; + UInt16 foundDataSize; + + if (extentFileKey->keyLength == 0) { // keyLength == 0 means the FCB's extent record + err = SetFCBExtentRecord(vcb, fcb, extentData); + fcb->fcbFlags |= fcbModifiedMask; + } + else { + // + // Need to find and change a record in Extents BTree + // + if (vcb->vcbSignature == kHFSSigWord) { + HFSExtentKey key; // Actual extent key used on disk in HFS + HFSExtentKey foundKey; // The key actually found during search + HFSExtentRecord foundData; // The extent data actually found + + key.keyLength = kHFSExtentKeyMaximumLength; + key.forkType = extentFileKey->forkType; + key.fileID = extentFileKey->fileID; + key.startBlock = extentFileKey->startBlock; + + err = SearchBTreeRecord(vcb->vcbExtentsFile, &key, extentBTreeHint, + &foundKey, &foundData, &foundDataSize, &foundHint); + + if (err == noErr) + err = ExtentsToExtDataRec(extentData, (HFSExtentDescriptor *)&foundData); + + if (err == noErr) + err = ReplaceBTreeRecord(vcb->vcbExtentsFile, &foundKey, foundHint, &foundData, foundDataSize, &foundHint); + } + else { // HFS Plus volume + HFSPlusExtentKey foundKey; // The key actually found during search + HFSPlusExtentRecord foundData; // The extent data actually found + + err = SearchBTreeRecord(vcb->vcbExtentsFile, extentFileKey, extentBTreeHint, + &foundKey, &foundData, &foundDataSize, &foundHint); + + if (err == noErr) + CopyMemory(extentData, &foundData, sizeof(HFSPlusExtentRecord)); + + if (err == noErr) + err = ReplaceBTreeRecord(vcb->vcbExtentsFile, &foundKey, foundHint, &foundData, foundDataSize, &foundHint); + } + } + + return err; +} + + +void ExtDataRecToExtents( + const HFSExtentRecord oldExtents, + HFSPlusExtentRecord newExtents) +{ + UInt32 i; + + // copy the first 3 extents + newExtents[0].startBlock = oldExtents[0].startBlock; + newExtents[0].blockCount = oldExtents[0].blockCount; + newExtents[1].startBlock = oldExtents[1].startBlock; + newExtents[1].blockCount = oldExtents[1].blockCount; + newExtents[2].startBlock = oldExtents[2].startBlock; + newExtents[2].blockCount = oldExtents[2].blockCount; + + // zero out the remaining ones + for (i = 3; i < kHFSPlusExtentDensity; ++i) + { + newExtents[i].startBlock = 0; + newExtents[i].blockCount = 0; + } +} + + + +static OSErr ExtentsToExtDataRec( + HFSPlusExtentRecord oldExtents, + HFSExtentRecord newExtents) +{ + OSErr err; + + err = noErr; + + // copy the first 3 extents + newExtents[0].startBlock = oldExtents[0].startBlock; + newExtents[0].blockCount = oldExtents[0].blockCount; + newExtents[1].startBlock = oldExtents[1].startBlock; + newExtents[1].blockCount = oldExtents[1].blockCount; + newExtents[2].startBlock = oldExtents[2].startBlock; + newExtents[2].blockCount = oldExtents[2].blockCount; + + #if DEBUG_BUILD + if (oldExtents[3].startBlock || oldExtents[3].blockCount) { + DebugStr("\pExtentRecord with > 3 extents is invalid for HFS"); + err = fsDSIntErr; + } + #endif + + return err; +} + + +OSErr GetFCBExtentRecord( + const SVCB *vcb, + const SFCB *fcb, + HFSPlusExtentRecord extents) +{ + if (vcb->vcbSignature == kHFSPlusSigWord) + CopyMemory(fcb->fcbExtents32, extents, sizeof(HFSPlusExtentRecord)); + else + ExtDataRecToExtents(fcb->fcbExtents16, extents); + return noErr; +} + + + +static OSErr SetFCBExtentRecord( + const SVCB *vcb, + SFCB *fcb, + HFSPlusExtentRecord extents) +{ + + #if DEBUG_BUILD + if (fcb->fcbVolume != vcb) + DebugStr("\pVCB does not match FCB"); + #endif + + if (vcb->vcbSignature == kHFSPlusSigWord) + CopyMemory(extents, fcb->fcbExtents32, sizeof(HFSPlusExtentRecord)); + else + (void) ExtentsToExtDataRec(extents, fcb->fcbExtents16); + + return noErr; +} + + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: MapFileBlockFromFCB +// +// Function: Determine if the given file offset is within the set of extents +// stored in the FCB. If so, return the file allocation +// block number of the start of the extent, volume allocation block number +// of the start of the extent, and file allocation block number immediately +// following the extent. +// +// Input: vcb - the volume containing the extents +// fcb - the file that owns the extents +// offset - desired offset in 512-byte sectors +// +// Output: firstFABN - file alloc block number of start of extent +// firstBlock - volume alloc block number of start of extent +// nextFABN - file alloc block number of next extent +// +// Result: noErr = ok +// fxRangeErr = beyond FCB's extents +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +static OSErr MapFileBlockFromFCB( + const SVCB *vcb, + const SFCB *fcb, + UInt64 sectorOffset, // Desired offset in sectors from start of file + UInt32 *firstFABN, // FABN of first block of found extent + UInt32 *firstBlock, // Corresponding allocation block number + UInt32 *nextFABN) // FABN of block after end of extent +{ + UInt32 index; + UInt32 offsetBlocks; + + offsetBlocks = sectorOffset / (vcb->vcbBlockSize >> kSectorShift); + + if (vcb->vcbSignature == kHFSSigWord) { + const HFSExtentDescriptor *extent; + UInt32 blockCount; + UInt32 currentFABN; + + extent = fcb->fcbExtents16; + currentFABN = 0; + + for (index=0; index<kHFSExtentDensity; index++) { + + blockCount = extent->blockCount; + + if (blockCount == 0) + return fxRangeErr; // ran out of extents! + + // Is it in this extent? + if (offsetBlocks < blockCount) { + *firstFABN = currentFABN; + *firstBlock = extent->startBlock; + currentFABN += blockCount; // faster to add these as UInt16 first, then extend to UInt32 + *nextFABN = currentFABN; + return noErr; // found the right extent + } + + // Not in current extent, so adjust counters and loop again + offsetBlocks -= blockCount; + currentFABN += blockCount; + extent++; + } + } + else { + const HFSPlusExtentDescriptor *extent; + UInt32 blockCount; + UInt32 currentFABN; + + extent = fcb->fcbExtents32; + currentFABN = 0; + + for (index=0; index<kHFSPlusExtentDensity; index++) { + + blockCount = extent->blockCount; + + if (blockCount == 0) + return fxRangeErr; // ran out of extents! + + // Is it in this extent? + if (offsetBlocks < blockCount) { + *firstFABN = currentFABN; + *firstBlock = extent->startBlock; + *nextFABN = currentFABN + blockCount; + return noErr; // found the right extent + } + + // Not in current extent, so adjust counters and loop again + offsetBlocks -= blockCount; + currentFABN += blockCount; + extent++; + } + } + + // If we fall through here, the extent record was full, but the offset was + // beyond those extents. + + return fxRangeErr; +} + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: ZeroFileBlocks +// +// Function: Write all zeros to a range of a file. Currently used when +// extending a B-Tree, so that all the new allocation blocks +// contain zeros (to prevent them from accidentally looking +// like real data). +// +// Input: vcb - the volume +// fcb - the file +// startingSector - the first 512-byte sector to write +// numberOfSectors - the number of sectors to zero +// +// Result: noErr = ok +// fxRangeErr = beyond FCB's extents +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +#define FSBufferSize 32768 + +OSErr ZeroFileBlocks( SVCB *vcb, SFCB *fcb, UInt32 startingSector, UInt32 numberOfSectors ) +{ + Ptr buffer; + OSErr err; + HIOParam iopb; + UInt32 requestedBytes; + UInt32 actualBytes; // Bytes actually read by CacheReadInPlace + UInt32 bufferSizeSectors = FSBufferSize >> kSectorShift; + UInt64 currentPosition = startingSector << kSectorShift; + + buffer = AllocateMemory(FSBufferSize); + if ( buffer == NULL ) + return( fileBoundsErr ); + + ClearMemory( buffer, FSBufferSize ); // Zero our buffer + ClearMemory( &iopb, sizeof(iopb) ); // Zero our param block + + iopb.ioRefNum = ResolveFileRefNum( fcb ); + iopb.ioBuffer = buffer; + iopb.ioPosMode |= noCacheMask; // OR with the high byte + + do + { + if ( numberOfSectors > bufferSizeSectors ) + requestedBytes = FSBufferSize; + else + requestedBytes = numberOfSectors << kSectorShift; + + err = CacheWriteInPlace( vcb, iopb.ioRefNum, &iopb, currentPosition, requestedBytes, &actualBytes ); + + if ( err || actualBytes == 0 ) + goto BAIL; + + // Don't update ioActCount to force writing from beginning of zero buffer + currentPosition += actualBytes; + numberOfSectors -= (actualBytes >> kSectorShift); + + } while( numberOfSectors > 0 ); + +BAIL: + DisposeMemory(buffer); + + if ( err == noErr && numberOfSectors != 0 ) + err = eofErr; + + return( err ); +} + +//_________________________________________________________________________________ +// +// Routine: ExtentsAreIntegral +// +// Purpose: Ensure that each extent can hold an integral number of nodes +// Called by the NodesAreContiguous function +//_________________________________________________________________________________ + +static Boolean ExtentsAreIntegral( + const HFSPlusExtentRecord extentRecord, + UInt32 mask, + UInt32 *blocksChecked, + Boolean *checkedLastExtent) +{ + UInt32 blocks; + UInt32 extentIndex; + + *blocksChecked = 0; + *checkedLastExtent = false; + + for(extentIndex = 0; extentIndex < kHFSPlusExtentDensity; extentIndex++) + { + blocks = extentRecord[extentIndex].blockCount; + + if ( blocks == 0 ) + { + *checkedLastExtent = true; + break; + } + + *blocksChecked += blocks; + + if (blocks & mask) + return false; + } + + return true; +} + +//_________________________________________________________________________________ +// +// Routine: NodesAreContiguous +// +// Purpose: Ensure that all b-tree nodes are contiguous on disk +// Called by BTOpenPath during volume mount +//_________________________________________________________________________________ + +Boolean NodesAreContiguous( + SFCB *fcb, + UInt32 nodeSize) +{ + SVCB *vcb; + UInt32 mask; + UInt32 startBlock; + UInt32 blocksChecked; + UInt32 hint; + HFSPlusExtentKey key; + HFSPlusExtentRecord extents; + OSErr result; + Boolean lastExtentReached; + + + vcb = (SVCB *)fcb->fcbVolume; + + if (vcb->vcbBlockSize >= nodeSize) + return true; + + mask = (nodeSize / vcb->vcbBlockSize) - 1; + + // check the local extents + (void) GetFCBExtentRecord(vcb, fcb, extents); + if ( !ExtentsAreIntegral(extents, mask, &blocksChecked, &lastExtentReached) ) + return false; + + if (lastExtentReached || ((UInt64)blocksChecked * (UInt64)vcb->vcbBlockSize) >= fcb->fcbPhysicalSize) + return true; + + startBlock = blocksChecked; + + // check the overflow extents (if any) + while ( !lastExtentReached ) + { + result = FindExtentRecord(vcb, kDataForkType, fcb->fcbFileID, startBlock, false, &key, extents, &hint); + if (result) break; + + if ( !ExtentsAreIntegral(extents, mask, &blocksChecked, &lastExtentReached) ) + return false; + + startBlock += blocksChecked; + } + + return true; +} diff --git a/fsck_hfs/dfalib/SKeyCompare.c b/fsck_hfs/dfalib/SKeyCompare.c new file mode 100644 index 0000000..57967e9 --- /dev/null +++ b/fsck_hfs/dfalib/SKeyCompare.c @@ -0,0 +1,515 @@ +/* + * Copyright (c) 1999-2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + + +#include "Scavenger.h" +#include "BTree.h" +#include "CaseFolding.h" + + +SInt32 FastUnicodeCompare ( register ConstUniCharArrayPtr str1, register ItemCount length1, + register ConstUniCharArrayPtr str2, register ItemCount length2); + +//_______________________________________________________________________ +// +// Routine: FastRelString +// +// Output: returns -1 if str1 < str2 +// returns 1 if str1 > str2 +// return 0 if equal +// +//_______________________________________________________________________ + +SInt32 FastRelString( ConstStr255Param str1, ConstStr255Param str2 ) +{ + SInt32 bestGuess; + UInt8 length, length2; + + + length = *(str1++); + length2 = *(str2++); + + if (length == length2) + bestGuess = 0; + else if (length < length2) + bestGuess = -1; + else + { + bestGuess = 1; + length = length2; + } + + while (length--) + { + UInt32 aChar, bChar; + + aChar = *(str1++); + bChar = *(str2++); + + if (aChar != bChar) /* If they don't match exacly, do case conversion */ + { + UInt16 aSortWord, bSortWord; + + aSortWord = gCompareTable[aChar]; + bSortWord = gCompareTable[bChar]; + + if (aSortWord > bSortWord) + return 1; + + if (aSortWord < bSortWord) + return -1; + } + + /* + * If characters match exactly, then go on to next character + * immediately without doing any extra work. + */ + } + + /* if you got to here, then return bestGuess */ + return bestGuess; +} + + + +// +// FastUnicodeCompare - Compare two Unicode strings; produce a relative ordering +// +// IF RESULT +// -------------------------- +// str1 < str2 => -1 +// str1 = str2 => 0 +// str1 > str2 => +1 +// +// The lower case table starts with 256 entries (one for each of the upper bytes +// of the original Unicode char). If that entry is zero, then all characters with +// that upper byte are already case folded. If the entry is non-zero, then it is +// the _index_ (not byte offset) of the start of the sub-table for the characters +// with that upper byte. All ignorable characters are folded to the value zero. +// +// In pseudocode: +// +// Let c = source Unicode character +// Let table[] = lower case table +// +// lower = table[highbyte(c)] +// if (lower == 0) +// lower = c +// else +// lower = table[lower+lowbyte(c)] +// +// if (lower == 0) +// ignore this character +// +// To handle ignorable characters, we now need a loop to find the next valid character. +// Also, we can't pre-compute the number of characters to compare; the string length might +// be larger than the number of non-ignorable characters. Further, we must be able to handle +// ignorable characters at any point in the string, including as the first or last characters. +// We use a zero value as a sentinel to detect both end-of-string and ignorable characters. +// Since the File Manager doesn't prevent the NUL character (value zero) as part of a filename, +// the case mapping table is assumed to map u+0000 to some non-zero value (like 0xFFFF, which is +// an invalid Unicode character). +// +// Pseudocode: +// +// while (1) { +// c1 = GetNextValidChar(str1) // returns zero if at end of string +// c2 = GetNextValidChar(str2) +// +// if (c1 != c2) break // found a difference +// +// if (c1 == 0) // reached end of string on both strings at once? +// return 0; // yes, so strings are equal +// } +// +// // When we get here, c1 != c2. So, we just need to determine which one is less. +// if (c1 < c2) +// return -1; +// else +// return 1; +// + +SInt32 FastUnicodeCompare ( register ConstUniCharArrayPtr str1, register ItemCount length1, + register ConstUniCharArrayPtr str2, register ItemCount length2) +{ + register UInt16 c1,c2; + register UInt16 temp; + + while (1) { + /* Set default values for c1, c2 in case there are no more valid chars */ + c1 = 0; + c2 = 0; + + /* Find next non-ignorable char from str1, or zero if no more */ + while (length1 && c1 == 0) { + c1 = *(str1++); + --length1; + if ((temp = gLowerCaseTable[c1>>8]) != 0) // is there a subtable for this upper byte? + c1 = gLowerCaseTable[temp + (c1 & 0x00FF)]; // yes, so fold the char + } + + + /* Find next non-ignorable char from str2, or zero if no more */ + while (length2 && c2 == 0) { + c2 = *(str2++); + --length2; + if ((temp = gLowerCaseTable[c2>>8]) != 0) // is there a subtable for this upper byte? + c2 = gLowerCaseTable[temp + (c2 & 0x00FF)]; // yes, so fold the char + } + + if (c1 != c2) /* found a difference, so stop looping */ + break; + + if (c1 == 0) /* did we reach the end of both strings at the same time? */ + return 0; /* yes, so strings are equal */ + } + + if (c1 < c2) + return -1; + else + return 1; +} + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: CompareCatalogKeys +// +// Function: Compares two catalog keys (a search key and a trial key). +// +// Result: +n search key > trial key +// 0 search key = trial key +// -n search key < trial key +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +SInt32 +CompareCatalogKeys(HFSCatalogKey *searchKey, HFSCatalogKey *trialKey) +{ + HFSCatalogNodeID searchParentID, trialParentID; + SInt32 result; + + searchParentID = searchKey->parentID; + trialParentID = trialKey->parentID; + + if ( searchParentID > trialParentID ) /* parent dirID is unsigned */ + result = 1; + else if ( searchParentID < trialParentID ) + result = -1; + else /* parent dirID's are equal, compare names */ + result = FastRelString(searchKey->nodeName, trialKey->nodeName); + + return result; +} + + +/* + * Routine: CompareExtendedCatalogKeys + * + * Function: Compares two large catalog keys (a search key and a trial key). + * + * Result: +n search key > trial key + * 0 search key = trial key + * -n search key < trial key + */ + +SInt32 +CompareExtendedCatalogKeys(HFSPlusCatalogKey *searchKey, HFSPlusCatalogKey *trialKey) +{ + SInt32 result; + HFSCatalogNodeID searchParentID, trialParentID; + + searchParentID = searchKey->parentID; + trialParentID = trialKey->parentID; + + if ( searchParentID > trialParentID ) // parent node IDs are unsigned + { + result = 1; + } + else if ( searchParentID < trialParentID ) + { + result = -1; + } + else // parent node ID's are equal, compare names + { + if ( searchKey->nodeName.length == 0 || trialKey->nodeName.length == 0 ) + result = searchKey->nodeName.length - trialKey->nodeName.length; + else + result = FastUnicodeCompare(&searchKey->nodeName.unicode[0], searchKey->nodeName.length, + &trialKey->nodeName.unicode[0], trialKey->nodeName.length); + } + + return result; +} + + +/* + * Routine: CaseSensitiveCatalogKeyCompare + * + * Function: Compares two catalog keys using a 16-bit binary comparison + * for the name portion of the key. + * + * Result: +n search key > trial key + * 0 search key = trial key + * -n search key < trial key + */ + +SInt32 +CaseSensitiveCatalogKeyCompare(HFSPlusCatalogKey *searchKey, HFSPlusCatalogKey *trialKey) +{ + HFSCatalogNodeID searchParentID, trialParentID; + SInt32 result; + + searchParentID = searchKey->parentID; + trialParentID = trialKey->parentID; + result = 0; + + if (searchParentID > trialParentID) { + ++result; + } else if (searchParentID < trialParentID) { + --result; + } else { + UInt16 * str1 = &searchKey->nodeName.unicode[0]; + UInt16 * str2 = &trialKey->nodeName.unicode[0]; + int length1 = searchKey->nodeName.length; + int length2 = trialKey->nodeName.length; + UInt16 c1, c2; + int length; + + if (length1 < length2) { + length = length1; + --result; + } else if (length1 > length2) { + length = length2; + ++result; + } else { + length = length1; + } + + while (length--) { + c1 = *(str1++); + c2 = *(str2++); + if (c1 > c2) { + result = 1; + break; + } + if (c1 < c2) { + result = -1; + break; + } + } + } + + return result; +} + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: CompareExtentKeys +// +// Function: Compares two extent file keys (a search key and a trial key) for +// an HFS volume. +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +SInt32 CompareExtentKeys( const HFSExtentKey *searchKey, const HFSExtentKey *trialKey ) +{ + SInt32 result; // ± 1 + + #if DEBUG_BUILD + if (searchKey->keyLength != kHFSExtentKeyMaximumLength) + DebugStr("\pHFS: search Key is wrong length"); + if (trialKey->keyLength != kHFSExtentKeyMaximumLength) + DebugStr("\pHFS: trial Key is wrong length"); + #endif + + result = -1; // assume searchKey < trialKey + + if (searchKey->fileID == trialKey->fileID) { + // + // FileNum's are equal; compare fork types + // + if (searchKey->forkType == trialKey->forkType) { + // + // Fork types are equal; compare allocation block number + // + if (searchKey->startBlock == trialKey->startBlock) { + // + // Everything is equal + // + result = 0; + } + else { + // + // Allocation block numbers differ; determine sign + // + if (searchKey->startBlock > trialKey->startBlock) + result = 1; + } + } + else { + // + // Fork types differ; determine sign + // + if (searchKey->forkType > trialKey->forkType) + result = 1; + } + } + else { + // + // FileNums differ; determine sign + // + if (searchKey->fileID > trialKey->fileID) + result = 1; + } + + return( result ); +} + + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: CompareExtentKeysPlus +// +// Function: Compares two extent file keys (a search key and a trial key) for +// an HFS volume. +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +SInt32 CompareExtentKeysPlus( const HFSPlusExtentKey *searchKey, const HFSPlusExtentKey *trialKey ) +{ + SInt32 result; // ± 1 + + #if DEBUG_BUILD + if (searchKey->keyLength != kHFSPlusExtentKeyMaximumLength) + DebugStr("\pHFS: search Key is wrong length"); + if (trialKey->keyLength != kHFSPlusExtentKeyMaximumLength) + DebugStr("\pHFS: trial Key is wrong length"); + #endif + + result = -1; // assume searchKey < trialKey + + if (searchKey->fileID == trialKey->fileID) { + // + // FileNum's are equal; compare fork types + // + if (searchKey->forkType == trialKey->forkType) { + // + // Fork types are equal; compare allocation block number + // + if (searchKey->startBlock == trialKey->startBlock) { + // + // Everything is equal + // + result = 0; + } + else { + // + // Allocation block numbers differ; determine sign + // + if (searchKey->startBlock > trialKey->startBlock) + result = 1; + } + } + else { + // + // Fork types differ; determine sign + // + if (searchKey->forkType > trialKey->forkType) + result = 1; + } + } + else { + // + // FileNums differ; determine sign + // + if (searchKey->fileID > trialKey->fileID) + result = 1; + } + + return( result ); +} + + +/* + * Compare two attribute b-tree keys. + * + * The name portion of the key is compared using a 16-bit binary comparison. + * This is called from the b-tree code. + */ +__private_extern__ +SInt32 +CompareAttributeKeys(const AttributeKey *searchKey, const AttributeKey *trialKey) +{ + UInt32 searchFileID, trialFileID; + SInt32 result; + + searchFileID = searchKey->cnid; + trialFileID = trialKey->cnid; + result = 0; + + if (searchFileID > trialFileID) { + ++result; + } else if (searchFileID < trialFileID) { + --result; + } else { + const UInt16 * str1 = searchKey->attrName; + const UInt16 * str2 = trialKey->attrName; + int length1 = searchKey->attrNameLen; + int length2 = trialKey->attrNameLen; + UInt16 c1, c2; + int length; + + if (length1 < length2) { + length = length1; + --result; + } else if (length1 > length2) { + length = length2; + ++result; + } else { + length = length1; + } + + while (length--) { + c1 = *(str1++); + c2 = *(str2++); + + if (c1 > c2) { + result = 1; + break; + } + if (c1 < c2) { + result = -1; + break; + } + } + if (result) + return (result); + /* + * Names are equal; compare startBlock + */ + if (searchKey->startBlock == trialKey->startBlock) + return (0); + else + return (searchKey->startBlock < trialKey->startBlock ? -1 : 1); + } + + return result; +} + diff --git a/fsck_hfs/dfalib/SRebuildBTree.c b/fsck_hfs/dfalib/SRebuildBTree.c new file mode 100755 index 0000000..79ef408 --- /dev/null +++ b/fsck_hfs/dfalib/SRebuildBTree.c @@ -0,0 +1,1226 @@ +/* + * Copyright (c) 2002-2005, 2007-2009 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: SRebuildBTree.c + + Contains: This file contains BTree rebuilding code. + + Written by: Jerry Cottingham + + Copyright: © 1986, 1990, 1992-2002 by Apple Computer, Inc., all rights reserved. + +*/ + +#define SHOW_ELAPSED_TIMES 0 +#define DEBUG_REBUILD 1 + +extern void MyIndirectLog(const char *); + +#if SHOW_ELAPSED_TIMES +#include <sys/time.h> +#endif + +#include "Scavenger.h" +#include "../cache.h" + +/* internal routine prototypes */ + +/*static*/ OSErr CreateNewBTree( SGlobPtr theSGlobPtr, int FileID ); +static OSErr DeleteBTree( SGlobPtr theSGlobPtr, SFCB * theFCBPtr ); +static OSErr InitializeBTree( BTreeControlBlock * theBTreeCBPtr, + UInt32 * theBytesUsedPtr, + UInt32 * theMapNodeCountPtr ); +static OSErr ReleaseExtentsInExtentsBTree( SGlobPtr theSGlobPtr, + SFCB * theFCBPtr ); +static OSErr ValidateCatalogRecordLength( SGlobPtr theSGlobPtr, + CatalogRecord * theRecPtr, + UInt32 theRecSize ); +static OSErr ValidateAttributeRecordLength (SGlobPtr s, HFSPlusAttrRecord * theRecPtr, UInt32 theRecSize); +static OSErr ValidateExtentRecordLength (SGlobPtr s, ExtentRecord * theRecPtr, UInt32 theRecSize); +static OSErr WriteMapNodes( BTreeControlBlock * theBTreeCBPtr, + UInt32 theFirstMapNode, + UInt32 theNodeCount ); + +#if DEBUG_REBUILD +static void PrintBTHeaderRec( BTHeaderRec * thePtr ); +static void PrintNodeDescriptor( NodeDescPtr thePtr ); +static void PrintBTreeKey( KeyPtr thePtr, BTreeControlBlock * theBTreeCBPtr ); +static void PrintBTreeData(void *data, UInt32 length); +static void PrintIndexNodeRec( UInt32 theNodeNum ); +static void PrintLeafNodeRec( HFSPlusCatalogFolder * thePtr ); +#endif + +void SETOFFSET ( void *buffer, UInt16 btNodeSize, SInt16 recOffset, SInt16 vecOffset ); +#define SETOFFSET( buf,ndsiz,offset,rec ) \ + ( *(SInt16 *)((UInt8 *)(buf) + (ndsiz) + (-2 * (rec))) = (offset) ) + + +//_________________________________________________________________________________ +// +// Routine: RebuildBTree +// +// Purpose: Attempt to rebuild a B-Tree file using an existing B-Tree +// file. When successful a new BT-ree file will exist and +// the old one will be deleted. The MDB an alternate MDB +// will be updated to point to the new file. +// +// The tree is rebuilt by walking through every record. We use +// BTScanNextRecord(), which iterates sequentially through the +// nodes in the tree (starting at the first node), and extracts +// each record from each leaf node. It does not use the node +// forward or backward links; this allows it to rebuild the tree +// when the index nodes are non-reliable, or the leaf node links +// are damaged. +// +// The rebuild will be aborted (leaving the existing btree +// as it was found) if there are errors retreiving the nodes or +// records, or if there are errors inserting the records into +// the new tree. +// +// Inputs: +// SGlobPtr->calculatedCatalogBTCB or SGlobPtr->calculatedAttributesBTCB +// need this as a model and in order to extract leaf records. +// SGlobPtr->calculatedCatalogFCB or SGlobPtr->calculatedAttributesFCB +// need this as a model and in order to extract leaf records. +// SGlobPtr->calculatedRepairFCB +// pointer to our repair FCB. +// SGlobPtr->calculatedRepairBTCB +// pointer to our repair BTreeControlBlock. +// +// Outputs: +// SGlobPtr->calculatedVCB +// this will get mostly filled in here. On input it is not fully +// set up. +// SGlobPtr->calculatedCatalogFCB or SGlobPtr->calculatedAttributesFCB +// tis will refer to the new catalog file. +// +// Result: +// various error codes when problem occur or noErr if rebuild completed +// +// to do: +// have an option where we get back what we can. +// +// Notes: +// - requires input BTCB and FCBs to be valid! +//_________________________________________________________________________________ + +OSErr RebuildBTree( SGlobPtr theSGlobPtr, int FileID ) +{ + BlockDescriptor myBlockDescriptor; + BTreeKeyPtr myCurrentKeyPtr; + void * myCurrentDataPtr; + SFCB * myFCBPtr = NULL; + SFCB * oldFCBPtr = NULL; + SVCB * myVCBPtr; + UInt32 myDataSize; + UInt32 myHint; + OSErr myErr; + Boolean isHFSPlus; + UInt32 numRecords = 0; + +#if SHOW_ELAPSED_TIMES + struct timeval myStartTime; + struct timeval myEndTime; + struct timeval myElapsedTime; + struct timezone zone; +#endif + + theSGlobPtr->TarID = FileID; + theSGlobPtr->TarBlock = 0; + myBlockDescriptor.buffer = NULL; + myVCBPtr = theSGlobPtr->calculatedVCB; + if (kHFSCatalogFileID == FileID) { + oldFCBPtr = theSGlobPtr->calculatedCatalogFCB; + } else if (kHFSAttributesFileID == FileID) { + oldFCBPtr = theSGlobPtr->calculatedAttributesFCB; + /* + * 12447845 + * If we don't have an attributes btree, then we should just + * quit now -- nothing to rebuild. + */ + if (oldFCBPtr->fcbLogicalSize == 0) { + if (debug) + plog("Requested attributes btree rebuild, but attributes file size is 0\n"); + myErr = 0; + goto ExitThisRoutine; + } + } else if (kHFSExtentsFileID == FileID) { + oldFCBPtr = theSGlobPtr->calculatedExtentsFCB; + } else { + abort(); + } + + myErr = BTScanInitialize( oldFCBPtr, &theSGlobPtr->scanState ); + if ( noErr != myErr ) + goto ExitThisRoutine; + + // some VCB fields that we need may not have been calculated so we get it from the MDB. + // this can happen because the fsck_hfs code path to fully set up the VCB may have been + // aborted if an error was found that would trigger a rebuild. For example, + // if a leaf record was found to have a keys out of order then the verify phase of the + // B-Tree check would be aborted and we would come directly (if allowable) to here. + isHFSPlus = ( myVCBPtr->vcbSignature == kHFSPlusSigWord ); + + if (!isHFSPlus) { + myErr = noMacDskErr; + goto ExitThisRoutine; + } + + myErr = GetVolumeObjectVHBorMDB( &myBlockDescriptor ); + if ( noErr != myErr ) + goto ExitThisRoutine; + + if ( isHFSPlus ) + { + HFSPlusVolumeHeader * myVHBPtr; + + myVHBPtr = (HFSPlusVolumeHeader *) myBlockDescriptor.buffer; + myVCBPtr->vcbFreeBlocks = myVHBPtr->freeBlocks; + myVCBPtr->vcbFileCount = myVHBPtr->fileCount; + myVCBPtr->vcbFolderCount = myVHBPtr->folderCount; + myVCBPtr->vcbEncodingsBitmap = myVHBPtr->encodingsBitmap; + myVCBPtr->vcbRsrcClumpSize = myVHBPtr->rsrcClumpSize; + myVCBPtr->vcbDataClumpSize = myVHBPtr->dataClumpSize; + + // check out creation and last mod dates + myVCBPtr->vcbCreateDate = myVHBPtr->createDate; + myVCBPtr->vcbModifyDate = myVHBPtr->modifyDate; + myVCBPtr->vcbCheckedDate = myVHBPtr->checkedDate; + myVCBPtr->vcbBackupDate = myVHBPtr->backupDate; + myVCBPtr->vcbCatalogFile->fcbClumpSize = myVHBPtr->catalogFile.clumpSize; + if (myVCBPtr->vcbAttributesFile != NULL) { + myVCBPtr->vcbAttributesFile->fcbClumpSize = myVHBPtr->attributesFile.clumpSize; + } + myVCBPtr->vcbExtentsFile->fcbClumpSize = myVHBPtr->extentsFile.clumpSize; + + // 3882639: Removed check for volume attributes in HFS Plus + myVCBPtr->vcbAttributes = myVHBPtr->attributes; + + CopyMemory( myVHBPtr->finderInfo, myVCBPtr->vcbFinderInfo, sizeof(myVCBPtr->vcbFinderInfo) ); + } + else + { + HFSMasterDirectoryBlock * myMDBPtr; + myMDBPtr = (HFSMasterDirectoryBlock *) myBlockDescriptor.buffer; + myVCBPtr->vcbFreeBlocks = myMDBPtr->drFreeBks; + myVCBPtr->vcbFileCount = myMDBPtr->drFilCnt; + myVCBPtr->vcbFolderCount = myMDBPtr->drDirCnt; + myVCBPtr->vcbDataClumpSize = myMDBPtr->drClpSiz; + myVCBPtr->vcbCatalogFile->fcbClumpSize = myMDBPtr->drCTClpSiz; + myVCBPtr->vcbNmRtDirs = myMDBPtr->drNmRtDirs; + + // check out creation and last mod dates + myVCBPtr->vcbCreateDate = myMDBPtr->drCrDate; + myVCBPtr->vcbModifyDate = myMDBPtr->drLsMod; + + // verify volume attribute flags + if ( (myMDBPtr->drAtrb & VAtrb_Msk) == 0 ) + myVCBPtr->vcbAttributes = myMDBPtr->drAtrb; + else + myVCBPtr->vcbAttributes = VAtrb_DFlt; + myVCBPtr->vcbBackupDate = myMDBPtr->drVolBkUp; + myVCBPtr->vcbVSeqNum = myMDBPtr->drVSeqNum; + CopyMemory( myMDBPtr->drFndrInfo, myVCBPtr->vcbFinderInfo, sizeof(myMDBPtr->drFndrInfo) ); + } + (void) ReleaseVolumeBlock( myVCBPtr, &myBlockDescriptor, kReleaseBlock ); + myBlockDescriptor.buffer = NULL; + + // create the new BTree file + if (FileID == kHFSCatalogFileID || FileID == kHFSAttributesFileID || FileID == kHFSExtentsFileID) { + myErr = CreateNewBTree( theSGlobPtr, FileID ); + } else { + myErr = EINVAL; + } + if ( noErr != myErr ) { +#if DEBUG_REBUILD + plog("CreateNewBTree returned %d\n", myErr); +#endif + if (myErr == dskFulErr) { + fsckPrint(theSGlobPtr->context, E_DiskFull); + } + goto ExitThisRoutine; + } + myFCBPtr = theSGlobPtr->calculatedRepairFCB; + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myStartTime, &zone ); +#endif + +#if DEBUG_REBUILD + if (debug) { + int i; + HFSPlusExtentDescriptor *te = (HFSPlusExtentDescriptor*)&theSGlobPtr->calculatedRepairFCB->fcbExtents32; + printf("Extent records for rebuilt file %u:\n", FileID); + for (i = 0; i < kHFSPlusExtentDensity; i++) { + printf("\t[ %u, %u ]\n", te[i].startBlock, te[i].blockCount); + } + } +#endif + + while ( true ) + { + /* scan the btree for leaf records */ + myErr = BTScanNextRecord( &theSGlobPtr->scanState, + (void **) &myCurrentKeyPtr, + (void **) &myCurrentDataPtr, + &myDataSize ); + if ( noErr != myErr ) + break; + + /* do some validation on the record */ + theSGlobPtr->TarBlock = theSGlobPtr->scanState.nodeNum; + if (FileID == kHFSCatalogFileID) { + myErr = ValidateCatalogRecordLength( theSGlobPtr, myCurrentDataPtr, myDataSize ); + } else if (FileID == kHFSAttributesFileID) { + myErr = ValidateAttributeRecordLength( theSGlobPtr, myCurrentDataPtr, myDataSize ); + } else if (FileID == kHFSExtentsFileID) { + myErr = ValidateExtentRecordLength( theSGlobPtr, myCurrentDataPtr, myDataSize ); + } + if ( noErr != myErr ) + { +#if DEBUG_REBUILD + { + plog( "%s - Record length validation (file %d) failed! \n", __FUNCTION__, FileID ); + plog( "%s - record %d in node %d is not recoverable. \n", + __FUNCTION__, (theSGlobPtr->scanState.recordNum - 1), + theSGlobPtr->scanState.nodeNum ); + } +#endif + myErr = R_RFail; + break; // this implementation does not handle partial rebuilds (all or none) + } + + /* insert this record into the new btree file */ + myErr = InsertBTreeRecord( myFCBPtr, myCurrentKeyPtr, + myCurrentDataPtr, myDataSize, &myHint ); + if ( noErr != myErr ) + { +#if DEBUG_REBUILD + { + plog( "%s - InsertBTreeRecord failed with err %d 0x%02X \n", + __FUNCTION__, myErr, myErr ); + plog( "%s - numRecords = %d\n", __FUNCTION__, numRecords); + plog( "%s - record %d in node %d is not recoverable. \n", + __FUNCTION__, (theSGlobPtr->scanState.recordNum - 1), + theSGlobPtr->scanState.nodeNum ); + PrintBTreeKey( myCurrentKeyPtr, theSGlobPtr->calculatedCatalogBTCB ); + PrintBTreeData( myCurrentDataPtr, myDataSize ); + } + if (myErr == btExists) + continue; +#endif + if (dskFulErr == myErr) + { + fsckPrint(theSGlobPtr->context, E_DiskFull); + } + myErr = R_RFail; + break; // this implementation does not handle partial rebuilds (all or none) + } + numRecords++; +#if DEBUG_REBUILD + if (debug && ((numRecords % 1000) == 0)) + plog("btree file %d: %u records\n", FileID, numRecords); +#endif + } + +#if SHOW_ELAPSED_TIMES + gettimeofday( &myEndTime, &zone ); + timersub( &myEndTime, &myStartTime, &myElapsedTime ); + plog( "\n%s - rebuild btree %u %u records elapsed time \n", __FUNCTION__, FileID, numRecords ); + plog( ">>>>>>>>>>>>> secs %d msecs %d \n\n", myElapsedTime.tv_sec, myElapsedTime.tv_usec ); +#endif + + if ( btNotFound == myErr ) + myErr = noErr; + if ( noErr != myErr ) + goto ExitThisRoutine; + + /* update our header node on disk from our BTreeControlBlock */ + myErr = BTFlushPath( myFCBPtr ); + if ( noErr != myErr ) + goto ExitThisRoutine; + myErr = CacheFlush( myVCBPtr->vcbBlockCache ); + if ( noErr != myErr ) + goto ExitThisRoutine; + + /* switch old file with our new one */ + if (FileID == kHFSCatalogFileID) { + theSGlobPtr->calculatedRepairFCB = theSGlobPtr->calculatedCatalogFCB; + theSGlobPtr->calculatedCatalogFCB = myFCBPtr; + myVCBPtr->vcbCatalogFile = myFCBPtr; + theSGlobPtr->calculatedCatalogFCB->fcbFileID = kHFSCatalogFileID; + theSGlobPtr->calculatedRepairBTCB = theSGlobPtr->calculatedCatalogBTCB; + } else if (FileID == kHFSAttributesFileID) { + theSGlobPtr->calculatedRepairFCB = theSGlobPtr->calculatedAttributesFCB; + theSGlobPtr->calculatedAttributesFCB = myFCBPtr; + myVCBPtr->vcbAttributesFile = myFCBPtr; + if (theSGlobPtr->calculatedAttributesFCB == NULL) { + if (debug) { + plog("Can't rebuilt attributes btree when there is no attributes file\n"); + } + myErr = noErr; + goto ExitThisRoutine; + } + theSGlobPtr->calculatedAttributesFCB->fcbFileID = kHFSAttributesFileID; + theSGlobPtr->calculatedRepairBTCB = theSGlobPtr->calculatedAttributesBTCB; + } else if (FileID == kHFSExtentsFileID) { + theSGlobPtr->calculatedRepairFCB = theSGlobPtr->calculatedExtentsFCB; + theSGlobPtr->calculatedExtentsFCB = myFCBPtr; + myVCBPtr->vcbExtentsFile = myFCBPtr; + theSGlobPtr->calculatedExtentsFCB->fcbFileID = kHFSExtentsFileID; + theSGlobPtr->calculatedRepairBTCB = theSGlobPtr->calculatedExtentsBTCB; + } + + // todo - add code to allow new btree file to be allocated in extents. + // Note when we do allow this the swap of btree files gets even more + // tricky since extent record key contains the file ID. The rebuilt + // file has file ID kHFSCatalogFileID/kHFSCatalogFileID when it is created. + + MarkVCBDirty( myVCBPtr ); + myErr = FlushAlternateVolumeControlBlock( myVCBPtr, isHFSPlus ); + if ( noErr != myErr ) + { + // we may be totally screwed if we get here, try to recover + if (FileID == kHFSCatalogFileID) { + theSGlobPtr->calculatedCatalogFCB = theSGlobPtr->calculatedRepairFCB; + theSGlobPtr->calculatedRepairFCB = myFCBPtr; + myVCBPtr->vcbCatalogFile = theSGlobPtr->calculatedCatalogFCB; + } else if (FileID == kHFSAttributesFileID) { + theSGlobPtr->calculatedAttributesFCB = theSGlobPtr->calculatedRepairFCB; + theSGlobPtr->calculatedRepairFCB = myFCBPtr; + myVCBPtr->vcbAttributesFile = theSGlobPtr->calculatedAttributesFCB; + } else if (FileID == kHFSExtentsFileID) { + theSGlobPtr->calculatedExtentsFCB = theSGlobPtr->calculatedRepairFCB; + theSGlobPtr->calculatedRepairFCB = myFCBPtr; + myVCBPtr->vcbExtentsFile = theSGlobPtr->calculatedExtentsFCB; + } + MarkVCBDirty( myVCBPtr ); + (void) FlushAlternateVolumeControlBlock( myVCBPtr, isHFSPlus ); + goto ExitThisRoutine; + } + + /* release space occupied by old BTree file */ + (void) DeleteBTree( theSGlobPtr, theSGlobPtr->calculatedRepairFCB ); + if (FileID == kHFSExtentsFileID) + (void)FlushExtentFile(myVCBPtr); + +ExitThisRoutine: + if ( myBlockDescriptor.buffer != NULL ) + (void) ReleaseVolumeBlock( myVCBPtr, &myBlockDescriptor, kReleaseBlock ); + + if ( myErr != noErr && myFCBPtr != NULL ) + (void) DeleteBTree( theSGlobPtr, myFCBPtr ); + BTScanTerminate( &theSGlobPtr->scanState ); + + return( myErr ); + +} /* RebuildBTree */ + +//_________________________________________________________________________________ +// +// Routine: CreateNewBTree +// +// Purpose: Create and Initialize a new B-Tree on the target volume +// using the physical size of the old (being rebuilt) file as an initial +// size. +// +// NOTES: we force this to be contiguous in order to get this into Jaguar. +// Allowing the new file to go into extents makes the swap +// of the old and new files complicated. The extent records +// are keyed by file ID and the new (rebuilt) btree file starts out as +// file Id kHFSCatalogFileID/kHFSCatalogFileID/kHFSCatalogFileID. +// If there were extents then we would have to fix up the extent records in the extent B-Tree. +// +// todo: Don't force new file to be contiguous +// +// Inputs: +// SGlobPtr global state set up by fsck_hfs. We depend upon the +// manufactured and repair FCBs. +// +// Outputs: +// calculatedRepairBTCB fill in the BTreeControlBlock for new B-Tree file. +// calculatedRepairFCB fill in the SFCB for the new B-Tree file +// +// Result: +// various error codes when problems occur or noErr if all is well +// +//_________________________________________________________________________________ + +/*static*/ OSErr CreateNewBTree( SGlobPtr theSGlobPtr, int FileID ) +{ + OSErr myErr; + BTreeControlBlock * myBTreeCBPtr, * oldBCBPtr; + SVCB * myVCBPtr; + SFCB * myFCBPtr, * oldFCBPtr; + UInt32 myBytesUsed = 0; + UInt32 myMapNodeCount; + UInt64 myNumBlocks; + FSSize myNewEOF; + BTHeaderRec myHeaderRec; + + myBTreeCBPtr = theSGlobPtr->calculatedRepairBTCB; + myFCBPtr = theSGlobPtr->calculatedRepairFCB; + ClearMemory( (Ptr) myFCBPtr, sizeof( *myFCBPtr ) ); + ClearMemory( (Ptr) myBTreeCBPtr, sizeof( *myBTreeCBPtr ) ); + + if (FileID == kHFSCatalogFileID) { + oldFCBPtr = theSGlobPtr->calculatedCatalogFCB; + oldBCBPtr = theSGlobPtr->calculatedCatalogBTCB; + } else if (FileID == kHFSAttributesFileID) { + oldFCBPtr = theSGlobPtr->calculatedAttributesFCB; + oldBCBPtr = theSGlobPtr->calculatedAttributesBTCB; + } else if (FileID == kHFSExtentsFileID) { + oldFCBPtr = theSGlobPtr->calculatedExtentsFCB; + oldBCBPtr = theSGlobPtr->calculatedExtentsBTCB; + } else + abort(); + + // Create new FCB + myVCBPtr = oldFCBPtr->fcbVolume; + if (FileID == kHFSCatalogFileID) + myFCBPtr->fcbFileID = kHFSCatalogFileID; + else if (FileID == kHFSAttributesFileID) + myFCBPtr->fcbFileID = kHFSAttributesFileID; + else if (FileID == kHFSExtentsFileID) + myFCBPtr->fcbFileID = kHFSExtentsFileID; + + myFCBPtr->fcbVolume = myVCBPtr; + myFCBPtr->fcbBtree = myBTreeCBPtr; + myFCBPtr->fcbBlockSize = oldBCBPtr->nodeSize; + + // Create new BTree Control Block + myBTreeCBPtr->fcbPtr = myFCBPtr; + myBTreeCBPtr->btreeType = kHFSBTreeType; + myBTreeCBPtr->keyCompareType = oldBCBPtr->keyCompareType; + myBTreeCBPtr->keyCompareProc = oldBCBPtr->keyCompareProc; + myBTreeCBPtr->nodeSize = oldBCBPtr->nodeSize; + myBTreeCBPtr->maxKeyLength = oldBCBPtr->maxKeyLength; + if (myVCBPtr->vcbSignature == kHFSPlusSigWord) { + if (FileID == kHFSExtentsFileID) + myBTreeCBPtr->attributes = kBTBigKeysMask; + else + myBTreeCBPtr->attributes = ( kBTBigKeysMask + kBTVariableIndexKeysMask ); + } + + myBTreeCBPtr->getBlockProc = GetFileBlock; + myBTreeCBPtr->releaseBlockProc = ReleaseFileBlock; + myBTreeCBPtr->setEndOfForkProc = SetEndOfForkProc; + + myNewEOF = oldFCBPtr->fcbPhysicalSize; + + myNumBlocks = myNewEOF / myVCBPtr->vcbBlockSize; + myErr = BlockFindAll( myBTreeCBPtr->fcbPtr, myNumBlocks); + ReturnIfError( myErr ); + myBTreeCBPtr->fcbPtr->fcbPhysicalSize = myNewEOF; + myErr = ZeroFileBlocks( myVCBPtr, myBTreeCBPtr->fcbPtr, 0, myNewEOF >> kSectorShift ); + ReturnIfError( myErr ); + + /* now set real values in our BTree Control Block */ + myFCBPtr->fcbLogicalSize = myFCBPtr->fcbPhysicalSize; // new B-tree looks at fcbLogicalSize + if (FileID == kHFSCatalogFileID) + myFCBPtr->fcbClumpSize = myVCBPtr->vcbCatalogFile->fcbClumpSize; + else if (FileID == kHFSAttributesFileID) + myFCBPtr->fcbClumpSize = myVCBPtr->vcbAttributesFile->fcbClumpSize; + else if (FileID == kHFSExtentsFileID) + myFCBPtr->fcbClumpSize = myVCBPtr->vcbExtentsFile->fcbClumpSize; + + myBTreeCBPtr->totalNodes = ( myFCBPtr->fcbPhysicalSize / myBTreeCBPtr->nodeSize ); + myBTreeCBPtr->freeNodes = myBTreeCBPtr->totalNodes; + + // Initialize our new BTree (write out header node and an empty leaf node) + myErr = InitializeBTree( myBTreeCBPtr, &myBytesUsed, &myMapNodeCount ); + ReturnIfError( myErr ); + + // Update our BTreeControlBlock from BTHeaderRec we just wrote out + myErr = GetBTreeHeader( theSGlobPtr, myFCBPtr, &myHeaderRec ); + ReturnIfError( myErr ); + + myBTreeCBPtr->treeDepth = myHeaderRec.treeDepth; + myBTreeCBPtr->rootNode = myHeaderRec.rootNode; + myBTreeCBPtr->leafRecords = myHeaderRec.leafRecords; + myBTreeCBPtr->firstLeafNode = myHeaderRec.firstLeafNode; + myBTreeCBPtr->lastLeafNode = myHeaderRec.lastLeafNode; + myBTreeCBPtr->totalNodes = myHeaderRec.totalNodes; + myBTreeCBPtr->freeNodes = myHeaderRec.freeNodes; + myBTreeCBPtr->maxKeyLength = myHeaderRec.maxKeyLength; + + if ( myMapNodeCount > 0 ) + { + myErr = WriteMapNodes( myBTreeCBPtr, (myBytesUsed / myBTreeCBPtr->nodeSize ), myMapNodeCount ); + ReturnIfError( myErr ); + } + + return( myErr ); + +} /* CreateNewBTree */ + + +/* + * InitializeBTree + * + * This routine manufactures and writes out a B-Tree header + * node and an empty leaf node. + * + * Note: Since large volumes can have bigger b-trees they + * might need to have map nodes setup. + * + * this routine originally came from newfs_hfs.tproj ( see + * WriteCatalogFile in file makehfs.c) and was modified for fsck_hfs. + */ +static OSErr InitializeBTree( BTreeControlBlock * theBTreeCBPtr, + UInt32 * theBytesUsedPtr, + UInt32 * theMapNodeCountPtr ) +{ + OSErr myErr; + BlockDescriptor myNode; + Boolean isHFSPlus = false; + SVCB * myVCBPtr; + BTNodeDescriptor * myNodeDescPtr; + BTHeaderRec * myHeaderRecPtr; + UInt8 * myBufferPtr; + UInt8 * myBitMapPtr; + UInt32 myNodeBitsInHeader; + UInt32 temp; + SInt16 myOffset; + + myVCBPtr = theBTreeCBPtr->fcbPtr->fcbVolume; + isHFSPlus = ( myVCBPtr->vcbSignature == kHFSPlusSigWord) ; + *theMapNodeCountPtr = 0; + + myErr = GetNewNode( theBTreeCBPtr, kHeaderNodeNum, &myNode ); + ReturnIfError( myErr ); + + myBufferPtr = (UInt8 *) myNode.buffer; + + /* FILL IN THE NODE DESCRIPTOR: */ + myNodeDescPtr = (BTNodeDescriptor *) myBufferPtr; + myNodeDescPtr->kind = kBTHeaderNode; + myNodeDescPtr->numRecords = 3; + myOffset = sizeof( BTNodeDescriptor ); + + SETOFFSET( myBufferPtr, theBTreeCBPtr->nodeSize, myOffset, 1 ); + + /* FILL IN THE HEADER RECORD: */ + myHeaderRecPtr = (BTHeaderRec *)((UInt8 *)myBufferPtr + myOffset); + myHeaderRecPtr->treeDepth = 0; + myHeaderRecPtr->rootNode = 0; + myHeaderRecPtr->firstLeafNode = 0; + myHeaderRecPtr->lastLeafNode = 0; + myHeaderRecPtr->nodeSize = theBTreeCBPtr->nodeSize; + myHeaderRecPtr->totalNodes = theBTreeCBPtr->totalNodes; + myHeaderRecPtr->freeNodes = myHeaderRecPtr->totalNodes - 1; /* header node */ + myHeaderRecPtr->clumpSize = theBTreeCBPtr->fcbPtr->fcbClumpSize; + + myHeaderRecPtr->attributes = theBTreeCBPtr->attributes; + myHeaderRecPtr->maxKeyLength = theBTreeCBPtr->maxKeyLength; + myHeaderRecPtr->keyCompareType = theBTreeCBPtr->keyCompareType; + + myOffset += sizeof( BTHeaderRec ); + SETOFFSET( myBufferPtr, theBTreeCBPtr->nodeSize, myOffset, 2 ); + + myOffset += kBTreeHeaderUserBytes; + SETOFFSET( myBufferPtr, theBTreeCBPtr->nodeSize, myOffset, 3 ); + + /* FIGURE OUT HOW MANY MAP NODES (IF ANY): */ + myNodeBitsInHeader = 8 * (theBTreeCBPtr->nodeSize + - sizeof(BTNodeDescriptor) + - sizeof(BTHeaderRec) + - kBTreeHeaderUserBytes + - (4 * sizeof(SInt16)) ); + + if ( myHeaderRecPtr->totalNodes > myNodeBitsInHeader ) + { + UInt32 nodeBitsInMapNode; + + myNodeDescPtr->fLink = myHeaderRecPtr->lastLeafNode + 1; + nodeBitsInMapNode = 8 * (theBTreeCBPtr->nodeSize + - sizeof(BTNodeDescriptor) + - (2 * sizeof(SInt16)) + - 2 ); + *theMapNodeCountPtr = (myHeaderRecPtr->totalNodes - myNodeBitsInHeader + + (nodeBitsInMapNode - 1)) / nodeBitsInMapNode; + myHeaderRecPtr->freeNodes = myHeaderRecPtr->freeNodes - *theMapNodeCountPtr; + } + + /* + * FILL IN THE MAP RECORD, MARKING NODES THAT ARE IN USE. + * Note - worst case (32MB alloc blk) will have only 18 nodes in use. + */ + myBitMapPtr = ((UInt8 *)myBufferPtr + myOffset); + temp = myHeaderRecPtr->totalNodes - myHeaderRecPtr->freeNodes; + + /* Working a byte at a time is endian safe */ + while ( temp >= 8 ) + { + *myBitMapPtr = 0xFF; + temp -= 8; + myBitMapPtr++; + } + *myBitMapPtr = ~(0xFF >> temp); + myOffset += myNodeBitsInHeader / 8; + + SETOFFSET( myBufferPtr, theBTreeCBPtr->nodeSize, myOffset, 4 ); + + *theBytesUsedPtr = + ( myHeaderRecPtr->totalNodes - myHeaderRecPtr->freeNodes - *theMapNodeCountPtr ) + * theBTreeCBPtr->nodeSize; + + /* write header node */ + myErr = UpdateNode( theBTreeCBPtr, &myNode ); + M_ExitOnError( myErr ); + + return noErr; + +ErrorExit: + (void) ReleaseNode( theBTreeCBPtr, &myNode ); + + return( myErr ); + +} /* InitializeBTree */ + + +/* + * WriteMapNodes + * + * This routine manufactures and writes out a B-Tree map + * node (or nodes if there are more than one). + * + * this routine originally came from newfs_hfs.tproj ( see + * WriteMapNodes in file makehfs.c) and was modified for fsck_hfs. + */ + +static OSErr WriteMapNodes( BTreeControlBlock * theBTreeCBPtr, + UInt32 theFirstMapNode, + UInt32 theNodeCount ) +{ + OSErr myErr; + UInt16 i; + UInt32 mapRecordBytes; + BTNodeDescriptor * myNodeDescPtr; + BlockDescriptor myNode; + + myNode.buffer = NULL; + + /* + * Note - worst case (32MB alloc blk) will have + * only 18 map nodes. So don't bother optimizing + * this section to do multiblock writes! + */ + for ( i = 0; i < theNodeCount; i++ ) + { + myErr = GetNewNode( theBTreeCBPtr, theFirstMapNode, &myNode ); + M_ExitOnError( myErr ); + + myNodeDescPtr = (BTNodeDescriptor *) myNode.buffer; + myNodeDescPtr->kind = kBTMapNode; + myNodeDescPtr->numRecords = 1; + + /* note: must be long word aligned (hence the extra -2) */ + mapRecordBytes = theBTreeCBPtr->nodeSize - sizeof(BTNodeDescriptor) - 2 * sizeof(SInt16) - 2; + + SETOFFSET( myNodeDescPtr, theBTreeCBPtr->nodeSize, sizeof(BTNodeDescriptor), 1 ); + SETOFFSET( myNodeDescPtr, theBTreeCBPtr->nodeSize, sizeof(BTNodeDescriptor) + mapRecordBytes, 2) ; + + if ( (i + 1) < theNodeCount ) + myNodeDescPtr->fLink = ++theFirstMapNode; /* point to next map node */ + else + myNodeDescPtr->fLink = 0; /* this is the last map node */ + + myErr = UpdateNode( theBTreeCBPtr, &myNode ); + M_ExitOnError( myErr ); + } + + return noErr; + +ErrorExit: + (void) ReleaseNode( theBTreeCBPtr, &myNode ); + + return( myErr ); + +} /* WriteMapNodes */ + + +/* + * DeleteBTree + * + * This routine will realease all space associated with the BTree + * file represented by the FCB passed in. + * + */ + +enum +{ + kDataForkType = 0, + kResourceForkType = 0xFF +}; + +static OSErr DeleteBTree( SGlobPtr theSGlobPtr, SFCB * theFCBPtr ) +{ + OSErr myErr; + SVCB * myVCBPtr; + int i; + Boolean isHFSPlus; + Boolean checkExtentsBTree = true; + + myVCBPtr = theFCBPtr->fcbVolume; + isHFSPlus = ( myVCBPtr->vcbSignature == kHFSPlusSigWord) ; + + if ( isHFSPlus ) + { + for ( i = 0; i < kHFSPlusExtentDensity; ++i ) + { + if ( theFCBPtr->fcbExtents32[ i ].blockCount == 0 ) + { + checkExtentsBTree = false; + break; + } + (void) BlockDeallocate( myVCBPtr, + theFCBPtr->fcbExtents32[ i ].startBlock, + theFCBPtr->fcbExtents32[ i ].blockCount ); + theFCBPtr->fcbExtents32[ i ].startBlock = 0; + theFCBPtr->fcbExtents32[ i ].blockCount = 0; + } + } + else + { + for ( i = 0; i < kHFSExtentDensity; ++i ) + { + if ( theFCBPtr->fcbExtents16[ i ].blockCount == 0 ) + { + checkExtentsBTree = false; + break; + } + (void) BlockDeallocate( myVCBPtr, + theFCBPtr->fcbExtents16[ i ].startBlock, + theFCBPtr->fcbExtents16[ i ].blockCount ); + theFCBPtr->fcbExtents16[ i ].startBlock = 0; + theFCBPtr->fcbExtents16[ i ].blockCount = 0; + } + } + + if ( checkExtentsBTree ) + { + (void) ReleaseExtentsInExtentsBTree( theSGlobPtr, theFCBPtr ); + (void) FlushExtentFile( myVCBPtr ); + } + + (void) MarkVCBDirty( myVCBPtr ); + (void) FlushAlternateVolumeControlBlock( myVCBPtr, isHFSPlus ); + myErr = noErr; + + return( myErr ); + +} /* DeleteBTree */ + + +/* + * ReleaseExtentsInExtentsBTree + * + * This routine will locate extents in the extent BTree then release the space + * associated with the extents. It will also delete the BTree record for the + * extent. + * + */ + +static OSErr ReleaseExtentsInExtentsBTree( SGlobPtr theSGlobPtr, + SFCB * theFCBPtr ) +{ + BTreeIterator myIterator; + ExtentRecord myExtentRecord; + FSBufferDescriptor myBTRec; + ExtentKey * myKeyPtr; + SVCB * myVCBPtr; + UInt16 myRecLen; + UInt16 i; + OSErr myErr; + Boolean isHFSPlus; + + myVCBPtr = theFCBPtr->fcbVolume; + isHFSPlus = ( myVCBPtr->vcbSignature == kHFSPlusSigWord ); + + // position just before the first extent record for the given File ID. We + // pass in the file ID and a start block of 0 which will put us in a + // position for BTIterateRecord (with kBTreeNextRecord) to get the first + // extent record. + ClearMemory( &myIterator, sizeof(myIterator) ); + myBTRec.bufferAddress = &myExtentRecord; + myBTRec.itemCount = 1; + myBTRec.itemSize = sizeof(myExtentRecord); + myKeyPtr = (ExtentKey *) &myIterator.key; + + BuildExtentKey( isHFSPlus, kDataForkType, theFCBPtr->fcbFileID, + 0, (void *) myKeyPtr ); + + // it is now a simple process of getting the next extent record and + // cleaning up the allocated space for each one until we hit a + // different file ID. + for ( ;; ) + { + myErr = BTIterateRecord( theSGlobPtr->calculatedExtentsFCB, + kBTreeNextRecord, &myIterator, + &myBTRec, &myRecLen ); + if ( noErr != myErr ) + { + myErr = noErr; + break; + } + + /* deallocate space for the extents we found */ + if ( isHFSPlus ) + { + // we're done if this is a different File ID + if ( myKeyPtr->hfsPlus.fileID != theFCBPtr->fcbFileID || + myKeyPtr->hfsPlus.forkType != kDataForkType ) + break; + + for ( i = 0; i < kHFSPlusExtentDensity; ++i ) + { + if ( myExtentRecord.hfsPlus[ i ].blockCount == 0 ) + break; + + (void) BlockDeallocate( myVCBPtr, + myExtentRecord.hfsPlus[ i ].startBlock, + myExtentRecord.hfsPlus[ i ].blockCount ); + } + } + else + { + // we're done if this is a different File ID + if ( myKeyPtr->hfs.fileID != theFCBPtr->fcbFileID || + myKeyPtr->hfs.forkType != kDataForkType ) + break; + + for ( i = 0; i < kHFSExtentDensity; ++i ) + { + if ( myExtentRecord.hfs[ i ].blockCount == 0 ) + break; + + (void) BlockDeallocate( myVCBPtr, + myExtentRecord.hfs[ i ].startBlock, + myExtentRecord.hfs[ i ].blockCount ); + } + } + + /* get rid of this extent BTree record */ + myErr = DeleteBTreeRecord( theSGlobPtr->calculatedExtentsFCB, myKeyPtr ); + } + + return( myErr ); + +} /* ReleaseExtentsInExtentsBTree */ + + +/* + * ValidateExtentRecordLength + * This routine will ensure that an extent record is the right size. + * This should always be the size of HFSPlusExtentRecord. + */ +static OSErr ValidateExtentRecordLength (SGlobPtr s, ExtentRecord * theRecPtr, UInt32 theRecSize) +{ + Boolean isHFSPlus = ( s->calculatedVCB->vcbSignature == kHFSPlusSigWord ); + if (isHFSPlus) { + if (theRecSize != sizeof(HFSPlusExtentRecord)) + return -1; + } else { + if (theRecSize != sizeof(HFSExtentRecord)) + return -1; + } + + return noErr; +} + +/* + * ValidateAttributeRecordLength + * + * This routine will make sure that the given HFS+ attributes file record + * is of the correct length. + * + */ +static OSErr ValidateAttributeRecordLength (SGlobPtr s, HFSPlusAttrRecord * theRecPtr, UInt32 theRecSize) +{ + OSErr retval = noErr; + static UInt32 maxInlineSize; + + if (maxInlineSize == 0) { + /* The maximum size of an inline attribute record is nodesize / 2 minus a bit */ + /* These calculations taken from hfs_xattr.c:getmaxinlineattrsize */ + maxInlineSize = s->calculatedAttributesBTCB->nodeSize; + maxInlineSize -= sizeof(BTNodeDescriptor); // Minus node descriptor + maxInlineSize -= 3 * sizeof(u_int16_t); // Minus 3 index slots + maxInlineSize /= 2; // 2 key/rec pairs minimum + maxInlineSize -= sizeof(HFSPlusAttrKey); // Minus maximum key size + maxInlineSize &= 0xFFFFFFFE; // Multiple of two + } + switch (theRecPtr->recordType) { + case kHFSPlusAttrInlineData: + if (theRecSize > maxInlineSize) { + if (debug) + plog("Inline Attribute size %u is larger than maxsize %u\n", theRecSize, maxInlineSize); + retval = -1; + } + break; + case kHFSPlusAttrForkData: + if (theRecSize != sizeof(HFSPlusAttrForkData)) { + if (debug) + plog("Fork Data attribute size %u is larger then HFSPlusAttrForkData size %u\n", theRecSize, sizeof(HFSPlusAttrForkData)); + retval = -1; + } + break; + case kHFSPlusAttrExtents: + if (theRecSize != sizeof(HFSPlusAttrExtents)) { + if (debug) + plog("Extents Data attribute size %u is larger than HFSPlusAttrExtents size %u\n", theRecSize, sizeof(HFSPlusAttrExtents)); + retval = -1; + } + break; + default: + // Right now, we don't support any other kind + if (debug) + plog("Unknown attribute type %u\n", theRecPtr->recordType); + retval = -1; + break; + } + return retval; +} + +/* + * ValidateCatalogRecordLength + * + * This routine will make sure the given HFS (plus and standard) catalog record + * is of the correct length. + * + */ + +static OSErr ValidateCatalogRecordLength( SGlobPtr theSGlobPtr, + CatalogRecord * theRecPtr, + UInt32 theRecSize ) +{ + SVCB * myVCBPtr; + Boolean isHFSPlus = false; + + myVCBPtr = theSGlobPtr->calculatedVCB; + isHFSPlus = ( myVCBPtr->vcbSignature == kHFSPlusSigWord ); + + if ( isHFSPlus ) + { + switch ( theRecPtr->recordType ) + { + case kHFSPlusFolderRecord: + if ( theRecSize != sizeof( HFSPlusCatalogFolder ) ) + { + return( -1 ); + } + break; + + case kHFSPlusFileRecord: + if ( theRecSize != sizeof(HFSPlusCatalogFile) ) + { + return( -1 ); + } + break; + + case kHFSPlusFolderThreadRecord: + /* Fall through */ + + case kHFSPlusFileThreadRecord: + if ( theRecSize > sizeof(HFSPlusCatalogThread) || + theRecSize < sizeof(HFSPlusCatalogThread) - sizeof(HFSUniStr255) + sizeof(UniChar) ) + { + return( -1 ); + } + break; + + default: + return( -1 ); + } + } + else + { + switch ( theRecPtr->recordType ) + { + case kHFSFolderRecord: + if ( theRecSize != sizeof(HFSCatalogFolder) ) + return( -1 ); + break; + + case kHFSFileRecord: + if ( theRecSize != sizeof(HFSCatalogFile) ) + return( -1 ); + break; + + case kHFSFolderThreadRecord: + /* Fall through */ + case kHFSFileThreadRecord: + if ( theRecSize != sizeof(HFSCatalogThread)) + return( -1 ); + break; + + default: + return( -1 ); + } + } + + return( noErr ); + +} /* ValidateCatalogRecordLength */ + + +#if DEBUG_REBUILD +static void PrintNodeDescriptor( NodeDescPtr thePtr ) +{ + plog( "\n xxxxxxxx BTNodeDescriptor xxxxxxxx \n" ); + plog( " fLink %d \n", thePtr->fLink ); + plog( " bLink %d \n", thePtr->bLink ); + plog( " kind %d ", thePtr->kind ); + if ( thePtr->kind == kBTLeafNode ) + plog( "%s \n", "kBTLeafNode" ); + else if ( thePtr->kind == kBTIndexNode ) + plog( "%s \n", "kBTIndexNode" ); + else if ( thePtr->kind == kBTHeaderNode ) + plog( "%s \n", "kBTHeaderNode" ); + else if ( thePtr->kind == kBTMapNode ) + plog( "%s \n", "kBTMapNode" ); + else + plog( "do not know?? \n" ); + plog( " height %d \n", thePtr->height ); + plog( " numRecords %d \n", thePtr->numRecords ); + +} /* PrintNodeDescriptor */ + + +static void PrintBTHeaderRec( BTHeaderRec * thePtr ) +{ + plog( "\n xxxxxxxx BTHeaderRec xxxxxxxx \n" ); + plog( " treeDepth %d \n", thePtr->treeDepth ); + plog( " rootNode %d \n", thePtr->rootNode ); + plog( " leafRecords %d \n", thePtr->leafRecords ); + plog( " firstLeafNode %d \n", thePtr->firstLeafNode ); + plog( " lastLeafNode %d \n", thePtr->lastLeafNode ); + plog( " nodeSize %d \n", thePtr->nodeSize ); + plog( " maxKeyLength %d \n", thePtr->maxKeyLength ); + plog( " totalNodes %d \n", thePtr->totalNodes ); + plog( " freeNodes %d \n", thePtr->freeNodes ); + plog( " clumpSize %d \n", thePtr->clumpSize ); + plog( " btreeType 0x%02X \n", thePtr->btreeType ); + plog( " attributes 0x%02X \n", thePtr->attributes ); + +} /* PrintBTHeaderRec */ + + +static void PrintBTreeKey( KeyPtr thePtr, BTreeControlBlock * theBTreeCBPtr ) +{ + int myKeyLength, i; + UInt8 * myPtr = (UInt8 *)thePtr; + char ascii[17]; + UInt8 byte; + + ascii[16] = '\0'; + + myKeyLength = CalcKeySize( theBTreeCBPtr, thePtr) ; + plog( "\n xxxxxxxx BTreeKey (length %d) xxxxxxxx \n", myKeyLength ); + for ( i = 0; i < myKeyLength; i++ ) + { + byte = *(myPtr + i); + plog( "%02X ", byte ); + if (byte < 32 || byte > 126) + ascii[i & 0xF] = '.'; + else + ascii[i & 0xF] = byte; + + if ((i & 0xF) == 0xF) + { + plog(" %s\n", ascii); + } + } + + if (i & 0xF) + { + int j; + for (j = i & 0xF; j < 16; ++j) + plog(" "); + ascii[i & 0xF] = 0; + plog(" %s\n", ascii); + } +} /* PrintBTreeKey */ + +static void PrintBTreeData(void *data, UInt32 length) +{ + UInt32 i; + UInt8 * myPtr = (UInt8 *)data; + char ascii[17]; + UInt8 byte; + + ascii[16] = '\0'; + + plog( "\n xxxxxxxx BTreeData (length %d) xxxxxxxx \n", length ); + for ( i = 0; i < length; i++ ) + { + byte = *(myPtr + i); + plog( "%02X ", byte ); + if (byte < 32 || byte > 126) + ascii[i & 0xF] = '.'; + else + ascii[i & 0xF] = byte; + + if ((i & 0xF) == 0xF) + { + plog(" %s\n", ascii); + } + } + + if (i & 0xF) + { + int j; + for (j = i & 0xF; j < 16; ++j) + plog(" "); + ascii[i & 0xF] = 0; + plog(" %s\n", ascii); + } +} + +static void PrintIndexNodeRec( UInt32 theNodeNum ) +{ + plog( "\n xxxxxxxx IndexNodeRec xxxxxxxx \n" ); + plog( " node number %d \n", theNodeNum ); + +} /* PrintIndexNodeRec */ + +static void PrintLeafNodeRec( HFSPlusCatalogFolder * thePtr ) +{ + plog( "\n xxxxxxxx LeafNodeRec xxxxxxxx \n" ); + plog( " recordType %d ", thePtr->recordType ); + if ( thePtr->recordType == kHFSPlusFolderRecord ) + plog( "%s \n", "kHFSPlusFolderRecord" ); + else if ( thePtr->recordType == kHFSPlusFileRecord ) + plog( "%s \n", "kHFSPlusFileRecord" ); + else if ( thePtr->recordType == kHFSPlusFolderThreadRecord ) + plog( "%s \n", "kHFSPlusFolderThreadRecord" ); + else if ( thePtr->recordType == kHFSPlusFileThreadRecord ) + plog( "%s \n", "kHFSPlusFileThreadRecord" ); + else + plog( "do not know?? \n" ); + +} /* PrintLeafNodeRec */ + + +#endif // DEBUG_REBUILD diff --git a/fsck_hfs/dfalib/SRepair.c b/fsck_hfs/dfalib/SRepair.c new file mode 100644 index 0000000..4a80776 --- /dev/null +++ b/fsck_hfs/dfalib/SRepair.c @@ -0,0 +1,6541 @@ +/* + * Copyright (c) 1999-2009 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: SRepair.c + + Contains: This file contains the Scavenger repair routines. + + Written by: Bill Bruffey + + Copyright: © 1986, 1990, 1992-1999 by Apple Computer, Inc., all rights reserved. + +*/ + +#include "Scavenger.h" +#include <unistd.h> +#include <sys/stat.h> +#include <stdlib.h> +#include <stddef.h> +#include "../cache.h" + +enum { + clearBlocks, + addBitmapBit, + deleteExtents +}; + +/* internal routine prototypes */ + +static int MRepair( SGlobPtr GPtr ); +void SetOffset (void *buffer, UInt16 btNodeSize, SInt16 recOffset, SInt16 vecOffset); +#define SetOffset(buffer,nodesize,offset,record) (*(SInt16 *) ((Byte *) (buffer) + (nodesize) + (-2 * (record))) = (offset)) +static OSErr UpdateBTreeHeader( SFCB * fcbPtr ); +static OSErr FixBTreeHeaderReservedFields( SGlobPtr GPtr, short refNum ); +static OSErr UpdBTM( SGlobPtr GPtr, short refNum); +static OSErr UpdateVolumeBitMap( SGlobPtr GPtr, Boolean preAllocateOverlappedExtents ); +static OSErr DoMinorOrders( SGlobPtr GPtr ); +static OSErr UpdVal( SGlobPtr GPtr, RepairOrderPtr rP ); +static int DelFThd( SGlobPtr GPtr, UInt32 fid ); +static OSErr FixDirThread( SGlobPtr GPtr, UInt32 did ); +static OSErr FixOrphanedFiles ( SGlobPtr GPtr ); +static OSErr RepairReservedBTreeFields ( SGlobPtr GPtr ); +static OSErr GetCatalogRecord(SGlobPtr GPtr, UInt32 fileID, Boolean isHFSPlus, CatalogKey *catKey, CatalogRecord *catRecord, UInt16 *recordSize); +static OSErr RepairAttributesCheckABT(SGlobPtr GPtr, Boolean isHFSPlus); +static OSErr RepairAttributesCheckCBT(SGlobPtr GPtr, Boolean isHFSPlus); +static OSErr RepairAttributes( SGlobPtr GPtr ); +static OSErr FixFinderFlags( SGlobPtr GPtr, RepairOrderPtr p ); +static OSErr FixLinkCount( SGlobPtr GPtr, RepairOrderPtr p ); +static OSErr FixLinkChainNext( SGlobPtr GPtr, RepairOrderPtr p ); +static OSErr FixLinkChainPrev( SGlobPtr GPtr, RepairOrderPtr p ); +static OSErr FixBSDInfo( SGlobPtr GPtr, RepairOrderPtr p ); +static OSErr DeleteUnlinkedFile( SGlobPtr GPtr, RepairOrderPtr p ); +static OSErr FixOrphanedExtent( SGlobPtr GPtr ); +static OSErr FixFileSize(SGlobPtr GPtr, RepairOrderPtr p); +static OSErr VolumeObjectFixVHBorMDB( Boolean * fixedIt ); +static OSErr VolumeObjectRestoreWrapper( void ); +static OSErr FixBloatedThreadRecords( SGlob *GPtr ); +static OSErr FixMissingThreadRecords( SGlob *GPtr ); +static OSErr FixEmbededVolDescription( SGlobPtr GPtr, RepairOrderPtr p ); +static OSErr FixWrapperExtents( SGlobPtr GPtr, RepairOrderPtr p ); +static OSErr FixIllegalNames( SGlobPtr GPtr, RepairOrderPtr roPtr ); +static HFSCatalogNodeID GetObjectID( CatalogRecord * theRecPtr ); +static OSErr FixMissingDirectory( SGlob *GPtr, UInt32 theObjID, UInt32 theParID ); +static OSErr FixAttrSize(SGlobPtr GPtr, RepairOrderPtr p); +static OSErr FixOrphanAttrRecord(SGlobPtr GPtr); +static OSErr FixBadExtent(SGlobPtr GPtr, RepairOrderPtr p); +static OSErr FixHardLinkFinderInfo(SGlobPtr, RepairOrderPtr); +static OSErr FixOrphanLink(SGlobPtr GPtr, RepairOrderPtr p); +static OSErr FixOrphanInode(SGlobPtr GPtr, RepairOrderPtr p); +static OSErr FixDirLinkOwnerFlags(SGlobPtr GPtr, RepairOrderPtr p); +static int DeleteCatalogRecordByID(SGlobPtr GPtr, uint32_t id, Boolean for_rename); +static int MoveCatalogRecordByID(SGlobPtr GPtr, uint32_t id, uint32_t new_parentid); +static int DeleteAllAttrsByID(SGlobPtr GPtr, uint32_t id); +static int delete_attr_record(SGlobPtr GPtr, HFSPlusAttrKey *attr_key, HFSPlusAttrRecord *attr_record); +static int ZeroFillUnusedNodes(SGlobPtr GPtr, short fileRefNum); + +/* Functions to fix overlapping extents */ +static OSErr FixOverlappingExtents(SGlobPtr GPtr); +static int CompareExtentBlockCount(const void *first, const void *second); +static OSErr MoveExtent(SGlobPtr GPtr, ExtentInfo *extentInfo); +static OSErr CreateCorruptFileSymlink(SGlobPtr GPtr, UInt32 fileID); +static OSErr SearchExtentInAttributeBT(SGlobPtr GPtr, ExtentInfo *extentInfo, HFSPlusAttrKey *attrKey, HFSPlusAttrRecord *attrRecord, UInt16 *recordSize, UInt32 *foundExtentIndex); +static OSErr UpdateExtentInAttributeBT (SGlobPtr GPtr, ExtentInfo *extentInfo, HFSPlusAttrKey *attrKey, HFSPlusAttrRecord *attrRecord, UInt16 *recordSize, UInt32 foundInExtentIndex); +static OSErr SearchExtentInVH(SGlobPtr GPtr, ExtentInfo *extentInfo, UInt32 *foundExtentIndex, Boolean *noMoreExtents); +static OSErr UpdateExtentInVH (SGlobPtr GPtr, ExtentInfo *extentInfo, UInt32 foundExtentIndex); +static OSErr SearchExtentInCatalogBT(SGlobPtr GPtr, ExtentInfo *extentInfo, CatalogKey *catKey, CatalogRecord *catRecord, UInt16 *recordSize, UInt32 *foundExtentIndex, Boolean *noMoreExtents); +static OSErr UpdateExtentInCatalogBT (SGlobPtr GPtr, ExtentInfo *extentInfo, CatalogKey *catKey, CatalogRecord *catRecord, UInt16 *recordSize, UInt32 foundExtentIndex); +static OSErr SearchExtentInExtentBT(SGlobPtr GPtr, ExtentInfo *extentInfo, HFSPlusExtentKey *extentKey, HFSPlusExtentRecord *extentRecord, UInt16 *recordSize, UInt32 *foundExtentIndex); +static OSErr FindExtentInExtentRec (Boolean isHFSPlus, UInt32 startBlock, UInt32 blockCount, const HFSPlusExtentRecord extentData, UInt32 *foundExtentIndex, Boolean *noMoreExtents); + +/* Functions to copy disk blocks or data buffer to disk */ +static OSErr CopyDiskBlocks(SGlobPtr GPtr, const UInt32 startAllocationBlock, const UInt32 blockCount, const UInt32 newStartAllocationBlock ); +static OSErr WriteBufferToDisk(SGlobPtr GPtr, UInt32 startBlock, UInt32 blockCount, u_char *buffer, int buflen); + +/* Functions to create file and directory by name */ +static OSErr CreateFileByName(SGlobPtr GPtr, UInt32 parentID, UInt16 fileType, u_char *fileName, unsigned int filenameLen, u_char *data, unsigned int dataLen); +static UInt32 CreateDirByName(SGlob *GPtr , const u_char *dirName, const UInt32 parentID); + +static int BuildFolderRec( SGlob*, u_int16_t theMode, UInt32 theObjID, Boolean isHFSPlus, CatalogRecord * theRecPtr ); +static int BuildThreadRec( CatalogKey * theKeyPtr, CatalogRecord * theRecPtr, Boolean isHFSPlus, Boolean isDirectory ); +static int BuildFileRec(UInt16 fileType, UInt16 fileMode, UInt32 fileID, Boolean isHFSPlus, CatalogRecord *catRecord); +static void BuildAttributeKey(u_int32_t fileID, u_int32_t startBlock, unsigned char *attrName, u_int16_t attrNameLen, HFSPlusAttrKey *key); + + +OSErr RepairVolume( SGlobPtr GPtr ) +{ + OSErr err; + + SetDFAStage( kAboutToRepairStage ); // Notify callers repair is starting... + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + // + // Do the repair + // + SetDFAStage( kRepairStage ); // Stops GNE from being called, and changes behavior of MountCheck + + err = MRepair( GPtr ); + + return( err ); +} + + +/*------------------------------------------------------------------------------ +Routine: MRepair - (Minor Repair) +Function: Performs minor repair operations. +Input: GPtr - pointer to scavenger global area +Output: MRepair - function result: +------------------------------------------------------------------------------*/ + +static int MRepair( SGlobPtr GPtr ) +{ + OSErr err; + SVCB *calculatedVCB = GPtr->calculatedVCB; + Boolean isHFSPlus; + Boolean didRebuild = false; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + + if ( GPtr->EBTStat & S_RebuildBTree ) + { + fsckPrint(GPtr->context, hfsRebuildExtentBTree); + err = RebuildBTree( GPtr, kHFSExtentsFileID ); + if (err) + return (err); + didRebuild = true; + } + + if ( GPtr->CBTStat & S_RebuildBTree ) + { + /* once we do the rebuild we will force another verify since the */ + /* first verify was aborted when we determined a rebuild was necessary */ + fsckPrint(GPtr->context, hfsRebuildCatalogBTree); + err = RebuildBTree( GPtr, kHFSCatalogFileID ); + if (err) + return (err); + didRebuild = true; + } + + if ( GPtr->ABTStat & S_RebuildBTree ) + { + fsckPrint(GPtr->context, hfsRebuildAttrBTree); + err = RebuildBTree( GPtr, kHFSAttributesFileID ); + if (err) + return (err); + didRebuild = true; + } + + if (didRebuild) + return noErr; // Need to restart the verification + + /* + * If there were unused nodes in the B-trees which were non-zero-filled, + * then zero fill them. + */ + if (GPtr->ABTStat & S_UnusedNodesNotZero) + { + err = ZeroFillUnusedNodes(GPtr, kCalculatedAttributesRefNum); + ReturnIfError(err); + } + if (GPtr->EBTStat & S_UnusedNodesNotZero) + { + err = ZeroFillUnusedNodes(GPtr, kCalculatedExtentRefNum); + ReturnIfError(err); + } + if (GPtr->CBTStat & S_UnusedNodesNotZero) + { + err = ZeroFillUnusedNodes(GPtr, kCalculatedCatalogRefNum); + ReturnIfError(err); + } + if ((calculatedVCB->vcbAttributes & kHFSUnusedNodeFixMask) == 0) + { + calculatedVCB->vcbAttributes |= kHFSUnusedNodeFixMask; + MarkVCBDirty(calculatedVCB); + } + + /* + * We do this check here because it may make set up some minor repair orders; + * however, because determining the repairs to be done is expensive, we have only + * checked to see if there is any sort of problem so far. + * + * After it's done, DoMinorOrders() will take care of any requests that have been + * set up. + */ + if (GPtr->CatStat & S_FileHardLinkChain) { + err = RepairHardLinkChains(GPtr, false); + ReturnIfError(err); + } + + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + if (GPtr->CatStat & S_DirHardLinkChain) { + err = RepairHardLinkChains(GPtr, true); + ReturnIfError(err); + } + + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + // Handle repair orders. Note that these must be done *BEFORE* the MDB is updated. + err = DoMinorOrders( GPtr ); + ReturnIfError( err ); + err = CheckForStop( GPtr ); ReturnIfError( err ); + + /* Clear Catalog status for things repaired by DoMinorOrders */ + GPtr->CatStat &= ~(S_FileAllocation | S_Permissions | S_UnlinkedFile | S_LinkCount | S_IllName | S_BadExtent | S_LinkErrRepair | S_FileHardLinkChain | S_DirHardLinkChain); + + /* + * Fix missing thread records + */ + if (GPtr->CatStat & S_MissingThread) { + err = FixMissingThreadRecords(GPtr); + ReturnIfError(err); + + GPtr->CatStat &= ~S_MissingThread; + GPtr->CBTStat |= S_BTH; /* leaf record count changed */ + } + + // 2210409, in System 8.1, moving file or folder would cause HFS+ thread records to be + // 520 bytes in size. We only shrink the threads if other repairs are needed. + if ( GPtr->VeryMinorErrorsStat & S_BloatedThreadRecordFound ) + { + (void) FixBloatedThreadRecords( GPtr ); + GPtr->VeryMinorErrorsStat &= ~S_BloatedThreadRecordFound; + } + + // + // we will update the following data structures regardless of whether we have done + // major or minor repairs, so we might end up doing this multiple times. Look into this. + // + + // + // Isolate and fix Overlapping Extents + // + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + if ( (GPtr->VIStat & S_OverlappingExtents) != 0 ) + { + if (embedded == 1 && debug == 0) + return R_RFail; + + err = FixOverlappingExtents( GPtr ); // Isolate and fix Overlapping Extents + ReturnIfError( err ); + + GPtr->VIStat &= ~S_OverlappingExtents; + GPtr->VIStat |= S_VBM; // Now that we changed the extents, we need to rebuild the bitmap + InvalidateCalculatedVolumeBitMap( GPtr ); // Invalidate our BitMap + } + + // + // FixOrphanedFiles + // + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + if ( (GPtr->CBTStat & S_Orphan) != 0 ) + { + err = FixOrphanedFiles ( GPtr ); // Orphaned file were found + ReturnIfError( err ); + GPtr->CBTStat |= S_BTH; // leaf record count may change - 2913311 + } + + /* Some minor repairs would have failed at the first + * attempt because of missing thread record or missing + * file/folder record because of ordering of repairs + * (example, deletion of file/folder before setting + * the flag). If any minor repairs orders are left, + * try to repair them again after fixing incorrect + * number of thread records. + */ + if (GPtr->MinorRepairsP) { + err = DoMinorOrders(GPtr); + ReturnIfError( err ); + } + + // + // FixOrphanedExtent records + // + if ( (GPtr->EBTStat & S_OrphanedExtent) != 0 ) // Orphaned extents were found + { + err = FixOrphanedExtent( GPtr ); + GPtr->EBTStat &= ~S_OrphanedExtent; + // if ( err == errRebuildBtree ) + // goto RebuildBtrees; + ReturnIfError( err ); + } + + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + // + // Update the extent BTree header and bit map + // + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + if ( (GPtr->EBTStat & S_BTH) || (GPtr->EBTStat & S_ReservedBTH) ) + { + err = UpdateBTreeHeader( GPtr->calculatedExtentsFCB ); // update extent file BTH + + if ( (err == noErr) && (GPtr->EBTStat & S_ReservedBTH) ) + { + err = FixBTreeHeaderReservedFields( GPtr, kCalculatedExtentRefNum ); + } + + ReturnIfError( err ); + } + + + if ( (GPtr->EBTStat & S_BTM) != 0 ) + { + err = UpdBTM( GPtr, kCalculatedExtentRefNum ); // update extent file BTM + ReturnIfError( err ); + } + + // + // Update the catalog BTree header and bit map + // + + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + if ( (GPtr->CBTStat & S_BTH) || (GPtr->CBTStat & S_ReservedBTH) ) + { + err = UpdateBTreeHeader( GPtr->calculatedCatalogFCB ); // update catalog BTH + + if ( (err == noErr) && (GPtr->CBTStat & S_ReservedBTH) ) + { + err = FixBTreeHeaderReservedFields( GPtr, kCalculatedCatalogRefNum ); + } + + ReturnIfError( err ); + } + + if ( GPtr->CBTStat & S_BTM ) + { + err = UpdBTM( GPtr, kCalculatedCatalogRefNum ); // update catalog BTM + ReturnIfError( err ); + } + + if ( (GPtr->CBTStat & S_ReservedNotZero) != 0 ) + { + err = RepairReservedBTreeFields( GPtr ); // update catalog fields + ReturnIfError( err ); + } + + // Repair orphaned/invalid attribute records + if ( (GPtr->ABTStat & S_AttrRec) ) + { + err = FixOrphanAttrRecord( GPtr ); + ReturnIfError( err ); + } + + // Repair inconsistency of attribute btree and corresponding bits in + // catalog btree + if ( (GPtr->ABTStat & S_AttributeCount) || + (GPtr->ABTStat & S_SecurityCount)) + { + err = RepairAttributes( GPtr ); + ReturnIfError( err ); + } + + // Update the attribute BTree header and bit map + if ( (GPtr->ABTStat & S_BTH) ) + { + err = UpdateBTreeHeader( GPtr->calculatedAttributesFCB ); // update attribute BTH + ReturnIfError( err ); + } + + if ( GPtr->ABTStat & S_BTM ) + { + err = UpdBTM( GPtr, kCalculatedAttributesRefNum ); // update attribute BTM + ReturnIfError( err ); + } + + /* Extended attribute repair can also detect incorrect number + * of thread records, so trigger thread records repair now and + * come back again in next pass for any fallouts and/or repairing + * extended attribute inconsistency. + * Note: This should be removed when Chinese Remainder Theorem + * is used for detecting incorrect number of thread records + * (rdar://3968148). + */ + if ( (GPtr->CBTStat & S_Orphan) != 0 ) + { + err = FixOrphanedFiles ( GPtr ); + ReturnIfError( err ); + } + + // + // Update the volume bit map + // + // Note, moved volume bit map update to end after other repairs + // (except the MDB / VolumeHeader) have been completed + // + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + if ( (GPtr->VIStat & S_VBM) != 0 ) + { + err = UpdateVolumeBitMap( GPtr, false ); // update VolumeBitMap + ReturnIfError( err ); + InvalidateCalculatedVolumeBitMap( GPtr ); // Invalidate our BitMap + } + + // + // Fix missing Primary or Alternate VHB or MDB + // + + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + if ( (GPtr->VIStat & S_MDB) != 0 ) // fix MDB / VolumeHeader + { + Boolean fixedIt = false; + err = VolumeObjectFixVHBorMDB( &fixedIt ); + ReturnIfError( err ); + // don't call FlushAlternateVolumeControlBlock if we fixed it since that would + // mean our calculated VCB has not been completely set up. + if ( fixedIt ) { + GPtr->VIStat &= ~S_MDB; + MarkVCBClean( calculatedVCB ); + } + } + + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + if ( (GPtr->VIStat & S_WMDB) != 0 ) // fix wrapper MDB + { + err = VolumeObjectRestoreWrapper(); + ReturnIfError( err ); + } + + // + // Update the MDB / VolumeHeader + // + // Note, moved MDB / VolumeHeader update to end + // after all other repairs have been completed. + // + + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + if ( (GPtr->VIStat & S_MDB) != 0 || IsVCBDirty(calculatedVCB) ) // update MDB / VolumeHeader + { + MarkVCBDirty(calculatedVCB); // make sure its dirty + calculatedVCB->vcbAttributes |= kHFSVolumeUnmountedMask; + err = FlushAlternateVolumeControlBlock( calculatedVCB, isHFSPlus ); // Writes real & alt blocks + ReturnIfError( err ); + } + + err = CheckForStop( GPtr ); ReturnIfError( err ); // Permit the user to interrupt + + // if we had minor repairs that failed we still want to fix as much as possible + // so we wait until now to indicate the volume still has problems + if ( GPtr->minorRepairErrors ) + err = R_RFail; + + return( err ); // all done +} + + + +// +// Internal Routines +// + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: VolumeObjectFixVHBorMDB +// +// Function: When the primary or alternate Volume Header Block or Master +// Directory Block is damaged or missing use the undamaged one to +// restore the other. +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +static OSErr VolumeObjectFixVHBorMDB( Boolean* fixedItPtr ) +{ + OSErr err; + OSErr err2; + VolumeObjectPtr myVOPtr; + BlockDescriptor myPrimary; + BlockDescriptor myAlternate; + + myVOPtr = GetVolumeObjectPtr( ); + myPrimary.buffer = NULL; + myAlternate.buffer = NULL; + err = noErr; + + // bail if both are OK + if ( VolumeObjectIsHFS() ) { + if ( (myVOPtr->flags & kVO_PriMDBOK) != 0 && + (myVOPtr->flags & kVO_AltMDBOK) != 0 ) + goto ExitThisRoutine; + } + else { + if ( (myVOPtr->flags & kVO_PriVHBOK) != 0 && + (myVOPtr->flags & kVO_AltVHBOK) != 0 ) + goto ExitThisRoutine; + } + + // it's OK if one of the primary or alternate is invalid + err = GetVolumeObjectPrimaryBlock( &myPrimary ); + if ( !(err == noErr || err == badMDBErr || err == noMacDskErr) ) + goto ExitThisRoutine; + + // invalidate if we have not marked the primary as OK + if ( VolumeObjectIsHFS( ) ) { + if ( (myVOPtr->flags & kVO_PriMDBOK) == 0 ) + err = badMDBErr; + } + else if ( (myVOPtr->flags & kVO_PriVHBOK) == 0 ) { + err = badMDBErr; + } + + err2 = GetVolumeObjectAlternateBlock( &myAlternate ); + if ( !(err2 == noErr || err2 == badMDBErr || err2 == noMacDskErr) ) + goto ExitThisRoutine; + + // invalidate if we have not marked the alternate as OK + if ( VolumeObjectIsHFS( ) ) { + if ( (myVOPtr->flags & kVO_AltMDBOK) == 0 ) + err2 = badMDBErr; + } + else if ( (myVOPtr->flags & kVO_AltVHBOK) == 0 ) { + err2 = badMDBErr; + } + + // primary is OK so use it to restore alternate + if ( err == noErr ) { + CopyMemory( myPrimary.buffer, myAlternate.buffer, Blk_Size ); + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myAlternate, kForceWriteBlock ); + myAlternate.buffer = NULL; + *fixedItPtr = true; + if ( VolumeObjectIsHFS( ) ) + myVOPtr->flags |= kVO_AltMDBOK; + else + myVOPtr->flags |= kVO_AltVHBOK; + } + // alternate is OK so use it to restore the primary + else if ( err2 == noErr ) { + CopyMemory( myAlternate.buffer, myPrimary.buffer, Blk_Size ); + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myPrimary, kForceWriteBlock ); + myPrimary.buffer = NULL; + *fixedItPtr = true; + if ( VolumeObjectIsHFS( ) ) + myVOPtr->flags |= kVO_PriMDBOK; + else + myVOPtr->flags |= kVO_PriVHBOK; + err = noErr; + } + else + err = noMacDskErr; + +ExitThisRoutine: + if ( myPrimary.buffer != NULL ) + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myPrimary, kReleaseBlock ); + if ( myAlternate.buffer != NULL ) + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myAlternate, kReleaseBlock ); + + return( err ); + +} /* VolumeObjectFixVHBorMDB */ + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: VolumeObjectRestoreWrapper +// +// Function: When the primary or alternate Master Directory Block is damaged +// or missing use the undamaged one to restore the other. +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +static OSErr VolumeObjectRestoreWrapper( void ) +{ + OSErr err; + OSErr err2; + VolumeObjectPtr myVOPtr; + BlockDescriptor myPrimary; + BlockDescriptor myAlternate; + + myVOPtr = GetVolumeObjectPtr( ); + myPrimary.buffer = NULL; + myAlternate.buffer = NULL; + + // it's OK if one of the MDB is invalid + err = GetVolumeObjectPrimaryMDB( &myPrimary ); + if ( !(err == noErr || err == badMDBErr || err == noMacDskErr) ) + goto ExitThisRoutine; + err2 = GetVolumeObjectAlternateMDB( &myAlternate ); + if ( !(err2 == noErr || err2 == badMDBErr || err2 == noMacDskErr) ) + goto ExitThisRoutine; + + // primary is OK so use it to restore alternate + if ( err == noErr && (myVOPtr->flags & kVO_PriMDBOK) != 0 ) { + CopyMemory( myPrimary.buffer, myAlternate.buffer, Blk_Size ); + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myAlternate, kForceWriteBlock ); + myAlternate.buffer = NULL; + myVOPtr->flags |= kVO_AltMDBOK; + } + // alternate is OK so use it to restore the primary + else if ( err2 == noErr && (myVOPtr->flags & kVO_AltMDBOK) != 0 ) { + CopyMemory( myAlternate.buffer, myPrimary.buffer, Blk_Size ); + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myPrimary, kForceWriteBlock ); + myPrimary.buffer = NULL; + myVOPtr->flags |= kVO_PriMDBOK; + err = noErr; + } + else + err = noMacDskErr; + +ExitThisRoutine: + if ( myPrimary.buffer != NULL ) + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myPrimary, kReleaseBlock ); + if ( myAlternate.buffer != NULL ) + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myAlternate, kReleaseBlock ); + + return( err ); + +} /* VolumeObjectRestoreWrapper */ + + +/*------------------------------------------------------------------------------ +Routine: UpdateBTreeHeader - (Update BTree Header) + +Function: Replaces a BTH on disk with info from a scavenger BTCB. + +Input: GPtr - pointer to scavenger global area + refNum - file refnum + +Output: UpdateBTreeHeader - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +static OSErr UpdateBTreeHeader( SFCB * fcbPtr ) +{ + OSErr err; + + M_BTreeHeaderDirty( ((BTreeControlBlockPtr) fcbPtr->fcbBtree) ); + err = BTFlushPath( fcbPtr ); + + return( err ); + +} /* End UpdateBTreeHeader */ + + + +/*------------------------------------------------------------------------------ +Routine: FixBTreeHeaderReservedFields + +Function: Fix reserved fields in BTree Header + +Input: GPtr - pointer to scavenger global area + refNum - file refnum + +Output: 0 = no error + n = error +------------------------------------------------------------------------------*/ + +static OSErr FixBTreeHeaderReservedFields( SGlobPtr GPtr, short refNum ) +{ + OSErr err; + BTHeaderRec header; + + err = GetBTreeHeader(GPtr, ResolveFCB(refNum), &header); + ReturnIfError( err ); + + if ( (header.clumpSize % GPtr->calculatedVCB->vcbBlockSize) != 0 ) + header.clumpSize = GPtr->calculatedVCB->vcbBlockSize; + + header.reserved1 = 0; + header.btreeType = kHFSBTreeType; // control file +/* + * TBD - we'll need to repair an invlid keyCompareType field. + */ +#if 0 + if (-->TBD<--) + header.keyCompareType = kHFSBinaryCompare; +#endif + ClearMemory( header.reserved3, sizeof(header.reserved3) ); + + return( err ); +} + + + + +/*------------------------------------------------------------------------------ + +Routine: UpdBTM - (Update BTree Map) + +Function: Replaces a BTM on disk with a scavenger BTM. + +Input: GPtr - pointer to scavenger global area + refNum - file refnum + +Output: UpdBTM - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +static OSErr UpdBTM( SGlobPtr GPtr, short refNum ) +{ + OSErr err; + UInt16 recSize; + SInt32 mapSize; + SInt16 size; + SInt16 recIndx; + Ptr p; + Ptr btmP; + Ptr sbtmP; + UInt32 nodeNum; + NodeRec node; + UInt32 fLink; + BTreeControlBlock *calculatedBTCB = GetBTreeControlBlock( refNum ); + + // Set up + mapSize = ((BTreeExtensionsRec*)calculatedBTCB->refCon)->BTCBMSize; + + // + // update the map records + // + if ( mapSize > 0 ) + { + nodeNum = 0; + recIndx = 2; + sbtmP = ((BTreeExtensionsRec*)calculatedBTCB->refCon)->BTCBMPtr; + + do + { + GPtr->TarBlock = nodeNum; // set target node number + + err = GetNode( calculatedBTCB, nodeNum, &node ); + ReturnIfError( err ); // could't get map node + + // Locate the map record + recSize = GetRecordSize( calculatedBTCB, (BTNodeDescriptor *)node.buffer, recIndx ); + btmP = (Ptr)GetRecordAddress( calculatedBTCB, (BTNodeDescriptor *)node.buffer, recIndx ); + fLink = ((NodeDescPtr)node.buffer)->fLink; + size = ( recSize > mapSize ) ? mapSize : recSize; + + CopyMemory( sbtmP, btmP, size ); // update it + + err = UpdateNode( calculatedBTCB, &node ); // write it, and unlock buffer + + mapSize -= size; // move to next map record + if ( mapSize == 0 ) // more to go? + break; // no, zero remainder of record + if ( fLink == 0 ) // out of bitmap blocks in file? + { + RcdError( GPtr, E_ShortBTM ); + (void) ReleaseNode(calculatedBTCB, &node); + return( E_ShortBTM ); + } + + nodeNum = fLink; + sbtmP += size; + recIndx = 0; + + } while ( mapSize > 0 ); + + // clear the unused portion of the map record + for ( p = btmP + size ; p < btmP + recSize ; p++ ) + *p = 0; + + err = UpdateNode( calculatedBTCB, &node ); // Write it, and unlock buffer + } + + return( noErr ); // All done +} // end UpdBTM + + + + +/*------------------------------------------------------------------------------ + +Routine: UpdateVolumeBitMap - (Update Volume Bit Map) + +Function: Replaces the VBM on disk with the scavenger VBM. + +Input: GPtr - pointer to scavenger global area + +Output: UpdateVolumeBitMap - function result: + 0 = no error + n = error + GPtr->VIStat - S_VBM flag set if VBM is damaged. +------------------------------------------------------------------------------*/ + +static OSErr UpdateVolumeBitMap( SGlobPtr GPtr, Boolean preAllocateOverlappedExtents ) +{ + GPtr->TarID = VBM_FNum; + + return ( CheckVolumeBitMap(GPtr, true) ); +} + +/* +Routine: FixBadLinkChainFirst - fix the first link in a hard link chain + +Input: GPtr -- pointer to scavenger global data + p -- pointer to a minor repair order + +Output: funciton result: + 0 -- no error + n -- error +*/ + +OSErr FixBadLinkChainFirst(SGlobPtr GPtr, RepairOrderPtr p) +{ + CatalogRecord rec; + uint16_t recsize; + OSErr retval = 0; + HFSPlusAttrData *attrRec; + HFSPlusAttrKey *attrKey; + BTreeIterator iterator; + FSBufferDescriptor bt_data; + u_int8_t attrdata[FIRST_LINK_XATTR_REC_SIZE]; + size_t unicode_bytes = 0; + + ClearMemory(&iterator, sizeof(iterator)); + retval = GetCatalogRecordByID(GPtr, (UInt32)p->parid, true, (CatalogKey*)&iterator.key, &rec, &recsize); + if (retval != 0) { + if (retval == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + retval = 0; + } + goto done; + } + + switch (rec.recordType) { + case kHFSPlusFolderRecord: // directory hard link + attrKey = (HFSPlusAttrKey*)&iterator.key; + utf_decodestr((unsigned char *)FIRST_LINK_XATTR_NAME, + strlen(FIRST_LINK_XATTR_NAME), attrKey->attrName, + &unicode_bytes, sizeof(attrKey->attrName)); + attrKey->attrNameLen = unicode_bytes / sizeof(UniChar); + attrKey->keyLength = kHFSPlusAttrKeyMinimumLength + unicode_bytes; + attrKey->pad = 0; + attrKey->fileID = p->parid; + attrKey->startBlock = 0; + attrRec = (HFSPlusAttrData*)&attrdata[0]; + attrRec->recordType = kHFSPlusAttrInlineData; + attrRec->reserved[0] = 0; + attrRec->reserved[1] = 0; + (void)snprintf((char*)&attrRec->attrData[0], + sizeof(attrdata) - offsetof(HFSPlusAttrData, attrData), + "%lu", (unsigned long)(p->correct)); + attrRec->attrSize = 1 + strlen((char*)&attrRec->attrData[0]); + bt_data.bufferAddress = attrRec; + recsize = sizeof(HFSPlusAttrData) - 2 + attrRec->attrSize + ((attrRec->attrSize & 1) ? 1 : 0); + bt_data.itemSize = recsize; + bt_data.itemCount = 1; + + retval = BTInsertRecord(GPtr->calculatedAttributesFCB, &iterator, &bt_data, recsize); + if (retval == btExists) { + retval = BTReplaceRecord(GPtr->calculatedAttributesFCB, &iterator, &bt_data, recsize); + } + + if (retval) { + /* If there is error on inserting a new attribute record + * because attribute btree does not exists, print message. + */ + if ((GPtr->calculatedAttributesFCB->fcbPhysicalSize == 0) && + (GPtr->calculatedAttributesFCB->fcbLogicalSize == 0) && + (GPtr->calculatedAttributesFCB->fcbClumpSize == 0) && + (fsckGetVerbosity(GPtr->context) >= kDebugLog)) { + plog ("\tFixBadLinkChainFirst: Attribute btree does not exists.\n"); + } + } + break; + case kHFSPlusFileRecord: // file hard link + rec.hfsPlusFile.hl_firstLinkID = (UInt32)p->correct; + bt_data.bufferAddress = &rec; + bt_data.itemSize = recsize; + bt_data.itemCount = 1; + retval = BTReplaceRecord(GPtr->calculatedCatalogFCB, &iterator, &bt_data, recsize); + break; + default: + retval = IntError(GPtr, R_IntErr); + break; + } +done: + return retval; +} + + +/* +Routine: FixHardLinkBadDate - fix the date of an indirect-node + +Input: GPtr -- pointer to scavenger global data + p -- pointer to a minor repair order + +Output: function result: + 0 -- no error + n -- error +*/ + +OSErr FixHardLinkBadDate(SGlobPtr GPtr, RepairOrderPtr p) +{ + CatalogKey key; + CatalogRecord rec; + uint16_t recsize; + OSErr retval = 0; + UInt32 hint; + + retval = GetCatalogRecordByID(GPtr, (UInt32)p->parid, true, &key, &rec, &recsize); + + if (retval == 0) { + if (rec.recordType != kHFSPlusFileRecord) { + retval = IntError(GPtr, R_IntErr); + } else { + rec.hfsPlusFile.createDate = p->correct; + retval = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &key, kNoHint, &rec, recsize, &hint); + } + } + + return retval; + +} + +/* +Routine: FixFileHardLinkFlag - clear the HardLinkChain flag in a file record + +Input: GPtr -- pointer to scavenger global data + p -- pointer to minor repair order + +Output: function result: + 0 -- no error + n -- error +*/ + +OSErr FixFileHardLinkFlag(SGlobPtr GPtr, RepairOrderPtr p) +{ + CatalogKey key; + CatalogRecord rec; + uint16_t recsize; + OSErr retval = 0; + UInt32 hint; + + retval = GetCatalogRecordByID(GPtr, (UInt32)p->parid, true, &key, &rec, &recsize); + + if (retval == 0) { + if (rec.recordType != kHFSPlusFileRecord) { + retval = IntError(GPtr, R_IntErr); + } else { + rec.hfsPlusFile.flags &= ~kHFSHasLinkChainMask; + retval = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &key, kNoHint, &rec, recsize, &hint); + } + } + return retval; +} + +/* +Routine: FixPrivDirBadPerms - fix the permissions of the directory hard-link private dir + +Input: GPtr -- pointer to scavenger global data + p -- poitner to a minor repair order + +Output: function result: + 0 -- no error + n -- error +*/ + +static OSErr FixPrivDirBadPerms(SGlobPtr GPtr, RepairOrderPtr p) +{ + CatalogKey key; + CatalogRecord rec; + uint16_t recsize; + OSErr retval = 0; + UInt32 hint; + + retval = GetCatalogRecordByID(GPtr, (UInt32)p->parid, true, &key, &rec, &recsize); + + if (retval != 0) { + if (retval == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + retval = 0; + } + goto done; + } + if (rec.recordType != kHFSPlusFolderRecord) { + retval = IntError(GPtr, R_IntErr); + goto done; + } + + rec.hfsPlusFolder.bsdInfo.ownerFlags |= UF_IMMUTABLE; + rec.hfsPlusFolder.bsdInfo.fileMode |= S_ISVTX; + + retval = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &key, kNoHint, &rec, recsize, &hint); + +done: + return retval; +} + +/*------------------------------------------------------------------------------ +Routine: FixOrphanLink + +Function: Delete the orphan directory/file hard link as no corresponding + directory/file inode was found. + +Input: GPtr - ptr to scavenger global data + p - pointer to a minor repair order + +Output: function returns - + 0 - no error, success + n - error +-------------------------------------------------------------------------------*/ +static OSErr FixOrphanLink(SGlobPtr GPtr, RepairOrderPtr p) +{ + int retval; + + retval = DeleteCatalogRecordByID(GPtr, p->parid, false); + if (retval == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + retval = 0; + } + + return retval; +} + +/*------------------------------------------------------------------------------ +Routine: FixOrphanInode + +Function: Repair orphan file/directory inode, i.e. no hard links point + to this file/directory inode by moving them to lost+found. + +Input: GPtr - ptr to scavenger global data + p - pointer to a minor repair order + +Output: function returns - + 0 - no error, success + n - error +-------------------------------------------------------------------------------*/ +static OSErr FixOrphanInode(SGlobPtr GPtr, RepairOrderPtr p) +{ + int retval; + uint32_t lost_found_id; + static int msg_display = 0; + + if (embedded == 1 && debug == 0) { + retval = EPERM; + goto out; + } + + /* Make sure that lost+found exists */ + lost_found_id = CreateDirByName(GPtr, (u_char *)"lost+found", + kHFSRootFolderID); + if (lost_found_id == 0) { + retval = ENOENT; + goto out; + } + + retval = MoveCatalogRecordByID(GPtr, p->parid, lost_found_id); + if (retval == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + retval = 0; + } + if (msg_display == 0) { + fsckPrint(GPtr->context, fsckLostFoundDirectory, "lost+found"); + msg_display = 1; + } + +out: + return retval; +} + +/*------------------------------------------------------------------------------ +Routine: FixDirLinkOwnerFlags + +Function: Fix the owner flags for directory hard link. + +Input: GPtr - ptr to scavenger global data + p - pointer to a minor repair order + +Output: function returns - + 0 - no error, success + n - error +-------------------------------------------------------------------------------*/ +static OSErr FixDirLinkOwnerFlags(SGlobPtr GPtr, RepairOrderPtr p) +{ + CatalogKey key; + CatalogRecord rec; + uint16_t recsize; + OSErr retval = 0; + UInt32 hint; + + retval = GetCatalogRecordByID(GPtr, p->parid, true, &key, &rec, &recsize); + if (retval != 0) { + if (retval == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + retval = 0; + } + goto done; + } + + rec.hfsPlusFile.bsdInfo.ownerFlags = p->correct; + + retval = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &key, kNoHint, + &rec, recsize, &hint); + +done: + return retval; +} + +/*------------------------------------------------------------------------------ +Routine: FixBadFlags + +Function: Update the flags field of a directory or file node + +Input: GPtr -- ptr to scavenger global data + p -- pointer to a minor repair order + +Output: function result: + 0 - no error + n - error +*/ +static OSErr FixBadFlags(SGlobPtr GPtr, RepairOrderPtr p) +{ + CatalogKey key; + CatalogRecord rec; + uint16_t recsize; + OSErr retval = 0; + UInt32 hint; + + retval = GetCatalogRecordByID(GPtr, p->parid, true, &key, &rec, &recsize); + if (retval != 0) { + if (retval == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + retval = 0; + } + goto done; + } + + if (p->type == E_DirInodeBadFlags) { + if ((rec.hfsPlusFolder.flags != p->incorrect) && (fsckGetVerbosity(GPtr->context) >= kDebugLog)) { + fplog(stderr, "\tFixBadFlags (folder): old = %#x, incorrect = %#x, correct = %#x\n", rec.hfsPlusFolder.flags, (int)p->incorrect, (int)p->correct); + } + rec.hfsPlusFolder.flags = p->correct; + } else if (p->type == E_DirLinkAncestorFlags) { + if ((rec.hfsPlusFolder.flags != p->incorrect) && (fsckGetVerbosity(GPtr->context) >= kDebugLog)) { + fplog(stderr, "\tFixBadFlags (parent folder): old = %#x, incorrect = %#x, correct = %#x\n", rec.hfsPlusFolder.flags, (int)p->incorrect, (int)p->correct); + } + rec.hfsPlusFolder.flags = p->correct; + } else { + if ((rec.hfsPlusFolder.flags != p->incorrect) && (fsckGetVerbosity(GPtr->context) >= kDebugLog)) { + fplog(stderr, "\tFixBadFlags (file): old = %#x, incorrect = %#x, correct = %#x\n", rec.hfsPlusFolder.flags, (int)p->incorrect, (int)p->correct); + } + rec.hfsPlusFile.flags = p->correct; + } + + retval = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &key, kNoHint, + &rec, recsize, &hint); + +done: + + return retval; + +} + +/*------------------------------------------------------------------------------ +Routine: UpdFolderCount + +Function: Update the folder count in an HFS+ folder record + +Input: GPtr -- ptr to scavenger global data + p -- pointer to minor repair order + +Output: function result: + 0 - no error + n - error + +------------------------------------------------------------------------------*/ +OSErr UpdFolderCount( SGlobPtr GPtr, RepairOrderPtr p) +{ + OSErr result = -1; + CatalogRecord record; + CatalogKey key, foundKey; + UInt16 recSize = 0; + UInt32 hint = 0; + +#define DPRINT(where, fmt, ...) \ + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) \ + fplog(where, fmt, ## __VA_ARGS__); + + /* + * We do the search in two stages. First, we look for just the + * catalog ID we get from the repair order; this SHOULD give us + * a thread record, which we can then use to get the real record. + */ + BuildCatalogKey( p->parid, NULL, true, &key); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, + &foundKey, &record, &recSize, &hint); + if (result) { + if (result == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + return 0; + } else { + DPRINT(stderr, "\tUpdFolderCount: first SearchBTreeRecord failed, parid = %u, result = %d\n", p->parid, result); + return IntError(GPtr, R_IntErr); + } + } + + if (record.recordType != kHFSPlusFolderThreadRecord) { + GPtr->CBTStat |= S_Orphan; + GPtr->minorRepairFalseSuccess = true; + return 0; + } + + BuildCatalogKey( record.hfsPlusThread.parentID, (const CatalogName *)&record.hfsPlusThread.nodeName, true, &key); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, + &foundKey, &record, &recSize, &hint); + + if (result) { + DPRINT(stderr, "UpdFolderCount: second SearchBTreeRecord failed (thread.parentID = %u, result = %d), just returning without complaint\n", record.hfsPlusThread.parentID, result); + return 0; + } + + if (record.recordType != kHFSPlusFolderRecord) { + DPRINT(stderr, "UpdFolderCount: actual record type (%d) != FolderRecord\n", record.recordType); + return IntError(GPtr, R_IntErr); + } + +#if 0 + /* + * If we've had to make a folder on an HFSX volume, we set the folderCount to what + * it should be -- which may not be what it found at a different part of the pass. + */ + if ((UInt32)p->incorrect != record.hfsPlusFolder.folderCount) { + DPRINT(stderr, "UpdFolderCount: incorrect (%u) != expected folderCount (%u)\n", (UInt32)p->incorrect, record.hfsPlusFolder.folderCount); + return IntError( GPtr, R_IntErr); + } +#else + if (record.hfsPlusFolder.folderCount == p->correct) { + /* We've gotten it already, no need to do anything */ + return noErr; + } +#endif + + record.hfsPlusFolder.folderCount = p->correct; + result = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey, hint, + &record, recSize, &hint); + if (result) { + DPRINT(stderr, "UpdFolderCount: ReplaceBTreeRecord failed (%d)\n", result); + return IntError( GPtr, R_IntErr ); + } + return noErr; +} +#undef DPRINT + +/*------------------------------------------------------------------------------ +Routine: UpdHasFolderCount + +Function: Update the HasFolderCount flag on an HFS+ folder's flags + +Input: GPtr -- ptr to scavenger global data + p -- pointer to minor repair order + +Output: function result: + 0 - no error + n - error + +------------------------------------------------------------------------------*/ +OSErr UpdHasFolderCount( SGlobPtr GPtr, RepairOrderPtr p) +{ + OSErr result = -1; + CatalogRecord record; + CatalogKey key, foundKey; + UInt16 recSize = 0; + UInt32 hint = 0; + + /* + * As above, we do the search in two stages: first to get the + * thread record (based solely on the CNID); second, to get the + * folder record based from the thread record. + */ + BuildCatalogKey( p->parid, NULL, true, &key); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, + &foundKey, &record, &recSize, &hint); + + if (result) { + if (result == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + return 0; + } else { + return IntError(GPtr, R_IntErr); + } + } + + /* If it's not a folder thread record, we've got a problem */ + if (record.recordType != kHFSPlusFolderThreadRecord) { + GPtr->CBTStat |= S_Orphan; + GPtr->minorRepairFalseSuccess = true; + return 0; + } + + BuildCatalogKey( record.hfsPlusThread.parentID, (const CatalogName *)&record.hfsPlusThread.nodeName, true, &key); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, + &foundKey, &record, &recSize, &hint); + + if (result) { + return IntError(GPtr, R_IntErr); + } + + if (record.recordType != kHFSPlusFolderRecord) { + return IntError(GPtr, R_IntErr); + } + + /* Verify that the kHFSHasFolderCountMask bit hasn't been set, and set if necessary */ + if ((record.hfsPlusFolder.flags & kHFSHasFolderCountMask) == 0) { + record.hfsPlusFolder.flags |= kHFSHasFolderCountMask; + result = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey, hint, + &record, recSize, &hint); + if (result) { + return IntError( GPtr, R_IntErr ); + } + } + + return noErr; +} + +/*------------------------------------------------------------------------------ + +Routine: DoMinorOrders + +Function: Execute minor repair orders. + +Input: GPtr - ptr to scavenger global data + +Outut: function result: + 0 - no error + n - error +------------------------------------------------------------------------------*/ + +static OSErr DoMinorOrders( SGlobPtr GPtr ) // the globals +{ + RepairOrderPtr p; + RepairOrderPtr cur; + OSErr err = noErr; // initialize to "no error" + + /* Manipulate the list for minor repairs separately from the global + * list head because global list head will be used to store repair + * orders which returned false success in anticipation of re-repair + * after other corruptioins on the disk. + */ + cur = GPtr->MinorRepairsP; + GPtr->MinorRepairsP = NULL; + + while( (p = cur) && (err == noErr) ) // loop over each repair order + { + cur = p->link; + + GPtr->minorRepairFalseSuccess = false; + + switch( p->type ) // branch on repair type + { + case E_FldCount: // folderCount needs to be updated + err = UpdFolderCount( GPtr, p ); + break; + + case E_HsFldCount: // HasFolderCount bit needs to be set + err = UpdHasFolderCount( GPtr, p ); + break; + + case E_RtDirCnt: // the valence errors + case E_RtFilCnt: // (of which there are several) + case E_DirCnt: + case E_FilCnt: + case E_DirVal: + err = UpdVal( GPtr, p ); // handle a valence error + break; + + case E_LockedDirName: + err = FixFinderFlags( GPtr, p ); + break; + + case E_UnlinkedFile: + err = DeleteUnlinkedFile( GPtr, p ); + break; + + case E_FileLinkCountError: + case E_InvalidLinkCount: + err = FixLinkCount( GPtr, p ); + break; + + case E_InvalidLinkChainPrev: + err = FixLinkChainPrev( GPtr, p ); + break; + + case E_InvalidLinkChainNext: + err = FixLinkChainNext( GPtr, p ); + break; + + case E_DirHardLinkFinderInfo: + case E_FileHardLinkFinderInfo: + err = FixHardLinkFinderInfo( GPtr, p ); + break; + + case E_InvalidPermissions: + err = FixBSDInfo( GPtr, p ); + break; + + case E_NoFile: // dangling file thread + err = DelFThd( GPtr, p->parid ); // delete the dangling thread + break; + + //¥¥ E_NoFile case is never hit since VLockedChk() registers the error, + //¥¥ and returns the error causing the verification to quit. + case E_EntryNotFound: + GPtr->EBTStat |= S_OrphanedExtent; + break; + + //¥¥ Same with E_NoDir + case E_NoDir: // missing directory record + err = FixDirThread( GPtr, p->parid ); // fix the directory thread record + break; + + case E_InvalidMDBdrAlBlSt: + err = FixEmbededVolDescription( GPtr, p ); + break; + + case E_InvalidWrapperExtents: + err = FixWrapperExtents(GPtr, p); + break; + + case E_IllegalName: + err = FixIllegalNames( GPtr, p ); + break; + + case E_PEOF: + case E_LEOF: + err = FixFileSize(GPtr, p); + break; + + case E_PEOAttr: + case E_LEOAttr: + err = FixAttrSize(GPtr, p); + break; + + case E_ExtEnt: + err = FixBadExtent(GPtr, p); + break; + + case E_DirInodeBadFlags: + case E_DirLinkAncestorFlags: + case E_FileInodeBadFlags: + case E_DirLinkBadFlags: + case E_FileLinkBadFlags: + err = FixBadFlags(GPtr, p); + break; + + case E_BadPermPrivDir: + err = FixPrivDirBadPerms(GPtr, p); + break; + + case E_InvalidLinkChainFirst: + err = FixBadLinkChainFirst(GPtr, p); + break; + + case E_OrphanFileLink: + case E_OrphanDirLink: + err = FixOrphanLink(GPtr, p); + break; + + case E_OrphanFileInode: + case E_OrphanDirInode: + err = FixOrphanInode(GPtr, p); + break; + + case E_DirHardLinkOwnerFlags: + err = FixDirLinkOwnerFlags(GPtr, p); + break; + + case E_BadHardLinkDate: + err = FixHardLinkBadDate(GPtr, p); + break; + + case E_LinkChainNonLink: + err = FixFileHardLinkFlag(GPtr, p); + break; + + default: // unknown repair type + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\tUnknown repair order found (type = %d)\n", p->type); + } + err = IntError( GPtr, R_IntErr ); // treat as an internal error + break; + } + + if ((err != 0) && (fsckGetVerbosity(GPtr->context) >= kDebugLog)) { + plog ("\tDoMinorRepair: Repair for type=%d failed (err=%d).\n", p->type, err); + } + + /* A repair order can return false success if lookup of a + * record failed --- which can happen if the corresponding + * thread record is missing or a file/folder record was + * deleted as part of another repair order. If repair + * order returned false success, do not free it up, instead + * add it back to the global minor repair list to retry + * repair after repairing incorrect number of thread records. + * Note: We do not return error when repair of minor + * repair orders fail second time due to missing record + * because if we did not find the catalog record second time, + * it is already deleted and the minor repair order is invalid. + * The minor repair order list is later freed up in clean up + * for the scavenger. + */ + if (GPtr->minorRepairFalseSuccess == true) { + p->link = GPtr->MinorRepairsP; + GPtr->MinorRepairsP = p; + } else { + DisposeMemory( p ); // free the node + } + } + + return( err ); // return error code to our caller +} + + + +/*------------------------------------------------------------------------------ + +Routine: DelFThd - (delete file thread) + +Function: Executes the delete dangling file thread repair orders. These are typically + threads left after system 6 deletes an aliased file, since system 6 is not + aware of aliases and thus will not delete the thread along with the file. + +Input: GPtr - global data + fid - the thread record's key's parent-ID + +Output: 0 - no error + n - deletion failed +Modification History: + 29Oct90 KST CBTDelete was using "k" as key which points to cache buffer. +-------------------------------------------------------------------------------*/ + +static int DelFThd( SGlobPtr GPtr, UInt32 fid ) // the file ID +{ + CatalogRecord record; + CatalogKey foundKey; + CatalogKey key; + UInt32 hint; // as returned by CBTSearch + OSErr result; // status return + UInt16 recSize; + Boolean isHFSPlus; + ExtentRecord zeroExtents; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + + BuildCatalogKey( fid, (const CatalogName*) nil, isHFSPlus, &key ); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &record, &recSize, &hint ); + + if ( result ) return ( IntError( GPtr, result ) ); + + if ( (record.recordType != kHFSFileThreadRecord) && (record.recordType != kHFSPlusFileThreadRecord) ) // quit if not a file thread + return ( IntError( GPtr, R_IntErr ) ); + + // Zero the record on disk + ClearMemory( (Ptr)&zeroExtents, sizeof(ExtentRecord) ); + result = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &key, hint, &zeroExtents, recSize, &hint ); + if ( result ) return ( IntError( GPtr, result ) ); + + result = DeleteBTreeRecord( GPtr->calculatedCatalogFCB, &key ); + if ( result ) return ( IntError( GPtr, result ) ); + + // After deleting a record, we'll need to write back the BT header and map, + // to reflect the updated record count etc. + + GPtr->CBTStat |= S_BTH + S_BTM; // set flags to write back hdr and map + + return( noErr ); // successful return +} + + +/*------------------------------------------------------------------------------ + +Routine: FixDirThread - (fix directory thread record's parent ID info) + +Function: Executes the missing directory record repair orders most likely caused by + disappearing folder bug. This bug causes some folders to jump to Desktop + from the root window. The catalog directory record for such a folder has + the Desktop folder as the parent but its thread record still the root + directory as its parent. + +Input: GPtr - global data + did - the thread record's key's parent-ID + +Output: 0 - no error + n - deletion failed +-------------------------------------------------------------------------------*/ + +static OSErr FixDirThread( SGlobPtr GPtr, UInt32 did ) // the dir ID +{ + UInt8 *dataPtr; + UInt32 hint; // as returned by CBTSearch + OSErr result; // status return + UInt16 recSize; + CatalogName catalogName; // temporary name record + CatalogName *keyName; // temporary name record + register short index; // loop index for all records in the node + UInt32 curLeafNode; // current leaf node being checked + CatalogRecord record; + CatalogKey foundKey; + CatalogKey key; + CatalogKey *keyP; + SInt16 recordType; + UInt32 folderID; + NodeRec node; + NodeDescPtr nodeDescP; + UInt32 newParDirID = 0; // the parent ID where the dir record is really located + Boolean isHFSPlus; + BTreeControlBlock *calculatedBTCB = GetBTreeControlBlock( kCalculatedCatalogRefNum ); + + isHFSPlus = VolumeObjectIsHFSPlus( ); + + BuildCatalogKey( did, (const CatalogName*) nil, isHFSPlus, &key ); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &record, &recSize, &hint ); + + if ( result ) + return( IntError( GPtr, result ) ); + if ( (record.recordType != kHFSFolderThreadRecord) && (record.recordType != kHFSPlusFolderThreadRecord) ) // quit if not a directory thread + return ( IntError( GPtr, R_IntErr ) ); + + curLeafNode = calculatedBTCB->freeNodes; + + while ( curLeafNode ) + { + result = GetNode( calculatedBTCB, curLeafNode, &node ); + if ( result != noErr ) return( IntError( GPtr, result ) ); + + nodeDescP = node.buffer; + + // loop on number of records in node + for ( index = 0 ; index < nodeDescP->numRecords ; index++ ) + { + GetRecordByIndex( calculatedBTCB, (NodeDescPtr)nodeDescP, index, (BTreeKey **)&keyP, &dataPtr, &recSize ); + + recordType = ((CatalogRecord *)dataPtr)->recordType; + folderID = recordType == kHFSPlusFolderRecord ? ((HFSPlusCatalogFolder *)dataPtr)->folderID : ((HFSCatalogFolder *)dataPtr)->folderID; + + // did we locate a directory record whode dirID match the the thread's key's parent dir ID? + if ( (folderID == did) && ( recordType == kHFSPlusFolderRecord || recordType == kHFSFolderRecord ) ) + { + newParDirID = recordType == kHFSPlusFolderRecord ? keyP->hfsPlus.parentID : keyP->hfs.parentID; + keyName = recordType == kHFSPlusFolderRecord ? (CatalogName *)&keyP->hfsPlus.nodeName : (CatalogName *)&keyP->hfs.nodeName; + CopyCatalogName( keyName, &catalogName, isHFSPlus ); + break; + } + } + + if ( newParDirID ) { + (void) ReleaseNode(calculatedBTCB, &node); + break; + } + + curLeafNode = nodeDescP->fLink; // sibling of this leaf node + + (void) ReleaseNode(calculatedBTCB, &node); + } + + if ( newParDirID == 0 ) + { + return ( IntError( GPtr, R_IntErr ) ); // ¥¥ Try fixing by creating a new directory record? + } + else + { + (void) SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &record, &recSize, &hint ); + + if ( isHFSPlus ) + { + HFSPlusCatalogThread *largeCatalogThreadP = (HFSPlusCatalogThread *) &record; + + largeCatalogThreadP->parentID = newParDirID; + CopyCatalogName( &catalogName, (CatalogName *) &largeCatalogThreadP->nodeName, isHFSPlus ); + } + else + { + HFSCatalogThread *smallCatalogThreadP = (HFSCatalogThread *) &record; + + smallCatalogThreadP->parentID = newParDirID; + CopyCatalogName( &catalogName, (CatalogName *)&smallCatalogThreadP->nodeName, isHFSPlus ); + } + + result = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey, hint, &record, recSize, &hint ); + } + + return( noErr ); // successful return +} + + +/*------------------------------------------------------------------------------ + +Routine: UpdVal - (Update Valence) + +Function: Replaces out of date valences with correct vals computed during scavenge. + +Input: GPtr - pointer to scavenger global area + p - pointer to the repair order + +Output: UpdVal - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +static OSErr UpdVal( SGlobPtr GPtr, RepairOrderPtr p ) // the valence repair order +{ + OSErr result; // status return + Boolean isHFSPlus; + UInt32 hint; // as returned by CBTSearch + UInt16 recSize; + CatalogRecord record; + CatalogKey foundKey; + CatalogKey key; + SVCB *calculatedVCB = GPtr->calculatedVCB; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + + switch( p->type ) + { + case E_RtDirCnt: // invalid count of Dirs in Root + if ( (UInt16)p->incorrect != calculatedVCB->vcbNmRtDirs ) + return ( IntError( GPtr, R_IntErr ) ); + calculatedVCB->vcbNmRtDirs = (UInt16)p->correct; + GPtr->VIStat |= S_MDB; + break; + + case E_RtFilCnt: + if ( (UInt16)p->incorrect != calculatedVCB->vcbNmFls ) + return ( IntError( GPtr, R_IntErr ) ); + calculatedVCB->vcbNmFls = (UInt16)p->correct; + GPtr->VIStat |= S_MDB; + break; + + case E_DirCnt: + if ( (UInt32)p->incorrect != calculatedVCB->vcbFolderCount ) + return ( IntError( GPtr, R_IntErr ) ); + calculatedVCB->vcbFolderCount = (UInt32)p->correct; + GPtr->VIStat |= S_MDB; + break; + + case E_FilCnt: + if ( (UInt32)p->incorrect != calculatedVCB->vcbFileCount ) + return ( IntError( GPtr, R_IntErr ) ); + calculatedVCB->vcbFileCount = (UInt32)p->correct; + GPtr->VIStat |= S_MDB; + break; + + case E_DirVal: + BuildCatalogKey( p->parid, (CatalogName *)&p->name, isHFSPlus, &key ); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, + &foundKey, &record, &recSize, &hint ); + if ( result ) + return ( IntError( GPtr, result ) ); + + if ( record.recordType == kHFSPlusFolderRecord ) + { + if ( (UInt32)p->incorrect != record.hfsPlusFolder.valence) + return ( IntError( GPtr, R_IntErr ) ); + record.hfsPlusFolder.valence = (UInt32)p->correct; + } + else + { + if ( (UInt16)p->incorrect != record.hfsFolder.valence ) + return ( IntError( GPtr, R_IntErr ) ); + record.hfsFolder.valence = (UInt16)p->correct; + } + + result = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &key, hint,\ + &record, recSize, &hint ); + if ( result ) + return ( IntError( GPtr, result ) ); + break; + } + + return( noErr ); // no error +} + +/*------------------------------------------------------------------------------ + +Routine: FixFinderFlags + +Function: Changes some of the Finder flag bits for directories. + +Input: GPtr - pointer to scavenger global area + p - pointer to the repair order + +Output: FixFinderFlags - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +static OSErr FixFinderFlags( SGlobPtr GPtr, RepairOrderPtr p ) // the repair order +{ + CatalogRecord record; + CatalogKey foundKey; + CatalogKey key; + UInt32 hint; // as returned by CBTSearch + OSErr result; // status return + UInt16 recSize; + Boolean isHFSPlus; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + + BuildCatalogKey( p->parid, (CatalogName *)&p->name, isHFSPlus, &key ); + + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &record, &recSize, &hint ); + if ( result ) + return ( IntError( GPtr, result ) ); + + if ( record.recordType == kHFSPlusFolderRecord ) + { + HFSPlusCatalogFolder *largeCatalogFolderP = (HFSPlusCatalogFolder *) &record; + if ( (UInt16) p->incorrect != largeCatalogFolderP->userInfo.frFlags) + { + // Another repair order may have affected the flags + if ( p->correct < p->incorrect ) + largeCatalogFolderP->userInfo.frFlags &= ~((UInt16)p->maskBit); + else + largeCatalogFolderP->userInfo.frFlags |= (UInt16)p->maskBit; + } + else + { + largeCatalogFolderP->userInfo.frFlags = (UInt16)p->correct; + } + // largeCatalogFolderP->contentModDate = timeStamp; + } + else + { + HFSCatalogFolder *smallCatalogFolderP = (HFSCatalogFolder *) &record; + if ( p->incorrect != smallCatalogFolderP->userInfo.frFlags) // do we know what we're doing? + { + // Another repair order may have affected the flags + if ( p->correct < p->incorrect ) + smallCatalogFolderP->userInfo.frFlags &= ~((UInt16)p->maskBit); + else + smallCatalogFolderP->userInfo.frFlags |= (UInt16)p->maskBit; + } + else + { + smallCatalogFolderP->userInfo.frFlags = (UInt16)p->correct; + } + + // smallCatalogFolderP->modifyDate = timeStamp; // also update the modify date! -DJB + } + + result = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey, hint, &record, recSize, &hint ); // write the node back to the file + if ( result ) + return( IntError( GPtr, result ) ); + + return( noErr ); // no error +} + +/*------------------------------------------------------------------------------ +FixHardLinkFinderInfo: Set the Finder Info contents to be correct for the type + of hard link (directory or file). + (HFS+ volumes only) +------------------------------------------------------------------------------*/ +static OSErr FixHardLinkFinderInfo(SGlobPtr GPtr, RepairOrderPtr p) +{ + CatalogRecord rec; + CatalogKey key; + uint16_t recsize; + OSErr retval = 0; + UInt32 hint; + + retval = GetCatalogRecordByID(GPtr, (UInt32)p->parid, true, &key, &rec, &recsize); + if (retval != 0) { + if (retval == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + retval = 0; + } + goto done; + } + + if (rec.recordType != kHFSPlusFileRecord) { + retval = IntError(GPtr, R_IntErr); + goto done; + } + + if (p->type == E_DirHardLinkFinderInfo) { + rec.hfsPlusFile.userInfo.fdType = kHFSAliasType; + rec.hfsPlusFile.userInfo.fdCreator = kHFSAliasCreator; + rec.hfsPlusFile.userInfo.fdFlags |= kIsAlias; + } else if (p->type == E_FileHardLinkFinderInfo) { + rec.hfsPlusFile.userInfo.fdType = kHardLinkFileType; + rec.hfsPlusFile.userInfo.fdCreator = kHFSPlusCreator; + } else { + retval = IntError(GPtr, R_IntErr); + goto done; + } + + retval = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &key, kNoHint, &rec, recsize, &hint); + +done: + return retval; +} + +/*------------------------------------------------------------------------------ +FixLinkChainNext: Adjust the link chain in a hard link + (HFS Plus volumes only) +------------------------------------------------------------------------------*/ +static OSErr +FixLinkChainPrev(SGlobPtr GPtr, RepairOrderPtr p) +{ + SFCB *fcb; + CatalogRecord rec; + CatalogKey key, foundKey; + UInt16 recSize; + OSErr result; + Boolean isHFSPlus; + UInt32 hint = 0; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + if (!isHFSPlus) + return (0); + fcb = GPtr->calculatedCatalogFCB; + + BuildCatalogKey( p->parid, NULL, true, &key ); + result = SearchBTreeRecord( fcb, &key, kNoHint, &foundKey, &rec, &recSize, &hint ); + + if (result) { + if (result == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + return 0; + } else { + return IntError(GPtr, R_IntErr); + } + } + + if (rec.recordType != kHFSPlusFileThreadRecord) { + return IntError(GPtr, R_IntErr); + } + + BuildCatalogKey( rec.hfsPlusThread.parentID, (const CatalogName*)&rec.hfsPlusThread.nodeName, true, &key); + result = SearchBTreeRecord( fcb, &key, kNoHint, &foundKey, &rec, &recSize, &hint); + + if (result) { + return IntError(GPtr, R_IntErr); + } + + if (rec.recordType != kHFSPlusFileRecord) { + return IntError(GPtr, R_IntErr); + } + + if ((UInt32)p->incorrect != rec.hfsPlusFile.hl_prevLinkID) { + return IntError(GPtr, R_IntErr); + } + + rec.hfsPlusFile.hl_prevLinkID = (UInt32)p->correct; + result = ReplaceBTreeRecord(fcb, &foundKey, hint, &rec, recSize, &hint); + if (result) { + return IntError(GPtr, R_IntErr); + } + + return noErr; +} + +/*------------------------------------------------------------------------------ +FixLinkChainNext: Adjust the link chain in a hard link + (HFS Plus volumes only) +------------------------------------------------------------------------------*/ +static OSErr +FixLinkChainNext(SGlobPtr GPtr, RepairOrderPtr p) +{ + SFCB *fcb; + CatalogRecord rec; + CatalogKey key, foundKey; + UInt16 recSize; + OSErr result; + Boolean isHFSPlus; + UInt32 hint = 0; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + if (!isHFSPlus) + return (0); + fcb = GPtr->calculatedCatalogFCB; + + BuildCatalogKey( p->parid, NULL, true, &key ); + result = SearchBTreeRecord( fcb, &key, kNoHint, &foundKey, &rec, &recSize, &hint ); + + if (result) { + if (result == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + return 0; + } else { + return IntError(GPtr, R_IntErr); + } + } + + if (rec.recordType != kHFSPlusFileThreadRecord) { + return IntError(GPtr, R_IntErr); + } + + BuildCatalogKey( rec.hfsPlusThread.parentID, (const CatalogName*)&rec.hfsPlusThread.nodeName, true, &key); + result = SearchBTreeRecord( fcb, &key, kNoHint, &foundKey, &rec, &recSize, &hint); + + if (result) { + return IntError(GPtr, R_IntErr); + } + + if (rec.recordType != kHFSPlusFileRecord) { + return IntError(GPtr, R_IntErr); + } + + if ((UInt32)p->incorrect != rec.hfsPlusFile.hl_nextLinkID) { + return IntError(GPtr, R_IntErr); + } + + rec.hfsPlusFile.hl_nextLinkID = (UInt32)p->correct; + result = ReplaceBTreeRecord(fcb, &foundKey, hint, &rec, recSize, &hint); + if (result) { + return IntError(GPtr, R_IntErr); + } + + return noErr; +} + +/*------------------------------------------------------------------------------ +FixLinkCount: Adjust a data node link count (BSD hard link) + (HFS Plus volumes only) +------------------------------------------------------------------------------*/ +static OSErr +FixLinkCount(SGlobPtr GPtr, RepairOrderPtr p) +{ + CatalogRecord rec; + CatalogKey key; + OSErr result; + UInt16 recSize; + UInt32 hint; + Boolean isHFSPlus; + Boolean isdir = 0; + int lc; // linkcount + + isHFSPlus = VolumeObjectIsHFSPlus( ); + if (!isHFSPlus) + return (0); + + result = GetCatalogRecordByID(GPtr, p->parid, isHFSPlus, &key, &rec, &recSize); + if (result) { + if (result == btNotFound) { + /* If the record was not found because either the thread + * record is missing or the file/folder record was deleted by + * another repair order, return false success to retry again + * after thread repair code. + */ + GPtr->minorRepairFalseSuccess = true; + result = 0; + } + return result; + } + + if (rec.recordType != kHFSPlusFileRecord && rec.recordType != kHFSPlusFolderRecord) + return (noErr); + + isdir = (rec.recordType == kHFSPlusFolderRecord); + + lc = (isdir ? rec.hfsPlusFolder.bsdInfo.special.linkCount : rec.hfsPlusFile.bsdInfo.special.linkCount); + if ((UInt32)p->correct != lc) { + if (isdir) + rec.hfsPlusFolder.bsdInfo.special.linkCount = (UInt32)p->correct; + else + rec.hfsPlusFile.bsdInfo.special.linkCount = (UInt32)p->correct; + + result = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &key, + kNoHint, &rec, recSize, &hint); + if (result) + return (IntError(GPtr, result)); + } + + return (noErr); +} + + +/*------------------------------------------------------------------------------ +FixIllegalNames: Fix catalog enties that have illegal names. + RepairOrder.name[] holds the old (illegal) name followed by the new name. + The new name has been checked to make sure it is unique within the target + directory. The names will look like this: + 2 byte length of old name (in unicode characters not bytes) + unicode characters for old name + 2 byte length of new name (in unicode characters not bytes) + unicode characters for new name +------------------------------------------------------------------------------*/ +static OSErr +FixIllegalNames( SGlobPtr GPtr, RepairOrderPtr roPtr ) +{ + OSErr result; + Boolean isHFSPlus; + Boolean isDirectory; + UInt16 recSize; + SFCB * fcbPtr; + CatalogName * oldNamePtr; + CatalogName * newNamePtr; + UInt32 hint; + CatalogRecord record; + CatalogKey key; + CatalogKey newKey; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + fcbPtr = GPtr->calculatedCatalogFCB; + + oldNamePtr = (CatalogName *) &roPtr->name; + if ( isHFSPlus ) + { + int myLength; + u_int16_t * myPtr = (u_int16_t *) oldNamePtr; + myLength = *myPtr; // get length of old name + myPtr += (1 + myLength); // bump past length of old name and old name + newNamePtr = (CatalogName *) myPtr; + } + else + { + int myLength; + u_char * myPtr = (u_char *) oldNamePtr; + myLength = *myPtr; // get length of old name + myPtr += (1 + myLength); // bump past length of old name and old name + newNamePtr = (CatalogName *) myPtr; + } + + // make sure new name isn't already there + BuildCatalogKey( roPtr->parid, newNamePtr, isHFSPlus, &newKey ); + result = SearchBTreeRecord( fcbPtr, &newKey, kNoHint, NULL, &record, &recSize, NULL ); + if ( result == noErr ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\treplacement name already exists \n" ); + plog( "\tduplicate name is 0x" ); + PrintName( newNamePtr->ustr.length, (UInt8 *) &newNamePtr->ustr.unicode, true ); + } + goto ErrorExit; + } + + // get catalog record for object with the illegal name. We will restore this + // info with our new (valid) name. + BuildCatalogKey( roPtr->parid, oldNamePtr, isHFSPlus, &key ); + result = SearchBTreeRecord( fcbPtr, &key, kNoHint, NULL, &record, &recSize, &hint ); + if ( result != noErr ) { + goto ErrorExit; + } + + result = DeleteBTreeRecord( fcbPtr, &key ); + if ( result != noErr ) { + goto ErrorExit; + } + + // insert record back into the catalog using the new name + result = InsertBTreeRecord( fcbPtr, &newKey, &record, recSize, &hint ); + if ( result != noErr ) { + goto ErrorExit; + } + + isDirectory = false; + switch( record.recordType ) + { + case kHFSFolderRecord: + case kHFSPlusFolderRecord: + isDirectory = true; break; + } + + // now we need to remove the old thread record and create a new one with + // our new name. + BuildCatalogKey( GetObjectID( &record ), NULL, isHFSPlus, &key ); + result = SearchBTreeRecord( fcbPtr, &key, kNoHint, NULL, &record, &recSize, &hint ); + if ( result != noErr ) { + goto ErrorExit; + } + + result = DeleteBTreeRecord( fcbPtr, &key ); + if ( result != noErr ) { + goto ErrorExit; + } + + // insert thread record with new name as thread data + recSize = BuildThreadRec( &newKey, &record, isHFSPlus, isDirectory ); + result = InsertBTreeRecord( fcbPtr, &key, &record, recSize, &hint ); + if ( result != noErr ) { + goto ErrorExit; + } + + return( noErr ); + +ErrorExit: + GPtr->minorRepairErrors = true; + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\t%s - repair failed for type 0x%02X %d \n", __FUNCTION__, roPtr->type, roPtr->type ); + return( noErr ); // errors in this routine should not be fatal + +} /* FixIllegalNames */ + + +/*------------------------------------------------------------------------------ +FixBSDInfo: Reset or repair BSD info + (HFS Plus volumes only) +------------------------------------------------------------------------------*/ +static OSErr +FixBSDInfo(SGlobPtr GPtr, RepairOrderPtr p) +{ + SFCB *fcb; + CatalogRecord rec; + FSBufferDescriptor btRecord; + BTreeIterator btIterator; + Boolean isHFSPlus; + OSErr result; + UInt16 recSize; + size_t namelen; + unsigned char filename[256]; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + if (!isHFSPlus) + return (0); + fcb = GPtr->calculatedCatalogFCB; + + ClearMemory(&btIterator, sizeof(btIterator)); + btIterator.hint.nodeNum = p->hint; + BuildCatalogKey(p->parid, (CatalogName *)&p->name, true, + (CatalogKey*)&btIterator.key); + btRecord.bufferAddress = &rec; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(rec); + + result = BTSearchRecord(fcb, &btIterator, kInvalidMRUCacheKey, + &btRecord, &recSize, &btIterator); + if (result) { + return (IntError(GPtr, result)); + } + if (rec.recordType != kHFSPlusFileRecord && + rec.recordType != kHFSPlusFolderRecord) + return (noErr); + + utf_encodestr(((HFSUniStr255 *)&p->name)->unicode, + ((HFSUniStr255 *)&p->name)->length << 1, filename, &namelen, sizeof(filename)); + filename[namelen] = '\0'; + + if (p->type == E_InvalidPermissions && + ((UInt16)p->incorrect == rec.hfsPlusFile.bsdInfo.fileMode)) { + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) + plog("\t\"%s\": fixing mode from %07o to %07o\n", + filename, (int)p->incorrect, (int)p->correct); + + rec.hfsPlusFile.bsdInfo.fileMode = (UInt16)p->correct; + result = BTReplaceRecord(fcb, &btIterator, &btRecord, recSize); + } + + if (result) + return (IntError(GPtr, result)); + else + return (noErr); +} + + +/*------------------------------------------------------------------------------ +DeleteUnlinkedFile: Delete orphaned data node (BSD unlinked file) + Also used to delete empty "HFS+ Private Data" directories + (HFS Plus volumes only) +------------------------------------------------------------------------------*/ +static OSErr +DeleteUnlinkedFile(SGlobPtr GPtr, RepairOrderPtr p) +{ + OSErr result = -1; + CatalogName name; + CatalogName *cNameP; + Boolean isHFSPlus; + size_t len; + CatalogKey key; + CatalogRecord record; + uint32_t id = 0; + UInt16 recSize; + UInt32 hint; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + if (!isHFSPlus) + return (0); + + if (p->name[0] > 0) { + /* name was stored in UTF-8 */ + (void) utf_decodestr(&p->name[1], p->name[0], name.ustr.unicode, &len, sizeof(name.ustr.unicode)); + name.ustr.length = len / 2; + cNameP = &name; + } else { + cNameP = NULL; + goto out; + } + + /* Lookup the record to find out file/folder ID for attribute deletion */ + BuildCatalogKey(p->parid, cNameP, true, &key); + result = SearchBTreeRecord (GPtr->calculatedCatalogFCB, &key, kNoHint, + NULL, &record, &recSize, &hint); + if (result) { + if (result == btNotFound) { + result = 0; + } + goto out; + } + + result = DeleteCatalogNode(GPtr->calculatedVCB, p->parid, cNameP, p->hint, false); + if (result) { + goto out; + } + + GPtr->VIStat |= S_MDB; + GPtr->VIStat |= S_VBM; + + if (record.recordType == kHFSPlusFileRecord) { + id = record.hfsPlusFile.fileID; + } else if (record.recordType == kHFSPlusFolderRecord) { + id = record.hfsPlusFolder.folderID; + } + + /* Delete all extended attributes associated with this file/folder */ + result = DeleteAllAttrsByID(GPtr, id); + +out: + return result; +} + +/* + * Fix a file's PEOF or LEOF (truncate) + * (HFS Plus volumes only) + */ +static OSErr +FixFileSize(SGlobPtr GPtr, RepairOrderPtr p) +{ + SFCB *fcb; + CatalogRecord rec; + HFSPlusCatalogKey * keyp; + FSBufferDescriptor btRecord; + BTreeIterator btIterator; + size_t len; + Boolean isHFSPlus; + Boolean replace; + OSErr result; + UInt16 recSize; + UInt64 bytes; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + if (!isHFSPlus) + return (0); + fcb = GPtr->calculatedCatalogFCB; + replace = false; + + ClearMemory(&btIterator, sizeof(btIterator)); + btIterator.hint.nodeNum = p->hint; + keyp = (HFSPlusCatalogKey*)&btIterator.key; + keyp->parentID = p->parid; + + /* name was stored in UTF-8 */ + (void) utf_decodestr(&p->name[1], p->name[0], keyp->nodeName.unicode, &len, sizeof(keyp->nodeName.unicode)); + keyp->nodeName.length = len / 2; + keyp->keyLength = kHFSPlusCatalogKeyMinimumLength + len; + + btRecord.bufferAddress = &rec; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(rec); + + result = BTSearchRecord(fcb, &btIterator, kInvalidMRUCacheKey, + &btRecord, &recSize, &btIterator); + if (result) + return (IntError(GPtr, result)); + + if (rec.recordType != kHFSPlusFileRecord) + return (noErr); + + if (p->type == E_PEOF) { + bytes = p->correct * (UInt64)GPtr->calculatedVCB->vcbBlockSize; + if ((p->forkType == kRsrcFork) && + ((UInt32)p->incorrect == rec.hfsPlusFile.resourceFork.totalBlocks)) { + + rec.hfsPlusFile.resourceFork.totalBlocks = (UInt32)p->correct; + replace = true; + /* + * Make sure our new block count is large + * enough to cover the current LEOF. If + * its not we must truncate the fork. + */ + if (rec.hfsPlusFile.resourceFork.logicalSize > bytes) { + rec.hfsPlusFile.resourceFork.logicalSize = bytes; + } + } else if ((p->forkType == kDataFork) && + ((UInt32)p->incorrect == rec.hfsPlusFile.dataFork.totalBlocks)) { + + rec.hfsPlusFile.dataFork.totalBlocks = (UInt32)p->correct; + replace = true; + /* + * Make sure our new block count is large + * enough to cover the current LEOF. If + * its not we must truncate the fork. + */ + if (rec.hfsPlusFile.dataFork.logicalSize > bytes) { + rec.hfsPlusFile.dataFork.logicalSize = bytes; + } + } + } else /* E_LEOF */ { + if ((p->forkType == kRsrcFork) && + (p->incorrect == rec.hfsPlusFile.resourceFork.logicalSize)) { + + rec.hfsPlusFile.resourceFork.logicalSize = p->correct; + replace = true; + + } else if ((p->forkType == kDataFork) && + (p->incorrect == rec.hfsPlusFile.dataFork.logicalSize)) { + + rec.hfsPlusFile.dataFork.logicalSize = p->correct; + replace = true; + } + } + + if (replace) { + result = BTReplaceRecord(fcb, &btIterator, &btRecord, recSize); + if (result) + return (IntError(GPtr, result)); + } + + return (noErr); +} + +/*------------------------------------------------------------------------------ + +Routine: FixEmbededVolDescription + +Function: If the "mdb->drAlBlSt" field has been modified, i.e. Norton Disk Doctor + 3.5 tried to "Fix" an HFS+ volume, it reduces the value in the + "mdb->drAlBlSt" field. If this field is changed, the file system can + no longer find the VolumeHeader or AltVolumeHeader. + +Input: GPtr - pointer to scavenger global area + p - pointer to the repair order + +Output: FixMDBdrAlBlSt - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +static OSErr FixEmbededVolDescription( SGlobPtr GPtr, RepairOrderPtr p ) +{ + OSErr err; + HFSMasterDirectoryBlock *mdb; + EmbededVolDescription *desc; + SVCB *vcb = GPtr->calculatedVCB; + BlockDescriptor block; + + desc = (EmbededVolDescription *) &(p->name); + block.buffer = NULL; + + /* Fix the Alternate MDB */ + err = GetVolumeObjectAlternateMDB( &block ); + if ( err != noErr ) + goto ExitThisRoutine; + mdb = (HFSMasterDirectoryBlock *) block.buffer; + + mdb->drAlBlSt = desc->drAlBlSt; + mdb->drEmbedSigWord = desc->drEmbedSigWord; + mdb->drEmbedExtent.startBlock = desc->drEmbedExtent.startBlock; + mdb->drEmbedExtent.blockCount = desc->drEmbedExtent.blockCount; + + err = ReleaseVolumeBlock( vcb, &block, kForceWriteBlock ); + block.buffer = NULL; + if ( err != noErr ) + goto ExitThisRoutine; + + /* Fix the MDB */ + err = GetVolumeObjectPrimaryMDB( &block ); + if ( err != noErr ) + goto ExitThisRoutine; + mdb = (HFSMasterDirectoryBlock *) block.buffer; + + mdb->drAlBlSt = desc->drAlBlSt; + mdb->drEmbedSigWord = desc->drEmbedSigWord; + mdb->drEmbedExtent.startBlock = desc->drEmbedExtent.startBlock; + mdb->drEmbedExtent.blockCount = desc->drEmbedExtent.blockCount; + err = ReleaseVolumeBlock( vcb, &block, kForceWriteBlock ); + block.buffer = NULL; + +ExitThisRoutine: + if ( block.buffer != NULL ) + err = ReleaseVolumeBlock( vcb, &block, kReleaseBlock ); + + return( err ); +} + + + + +/*------------------------------------------------------------------------------ + +Routine: FixWrapperExtents + +Function: When Norton Disk Doctor 2.0 tries to repair an HFS Plus volume, it + assumes that the first catalog extent must be a fixed number of + allocation blocks after the start of the first extents extent (in the + wrapper). In reality, the first catalog extent should start immediately + after the first extents extent. + +Input: GPtr - pointer to scavenger global area + p - pointer to the repair order + +Output: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +static OSErr FixWrapperExtents( SGlobPtr GPtr, RepairOrderPtr p ) +{ +#pragma unused (p) + + OSErr err; + HFSMasterDirectoryBlock *mdb; + SVCB *vcb = GPtr->calculatedVCB; + BlockDescriptor block; + + /* Get the Alternate MDB */ + block.buffer = NULL; + err = GetVolumeObjectAlternateMDB( &block ); + if ( err != noErr ) + goto ExitThisRoutine; + mdb = (HFSMasterDirectoryBlock *) block.buffer; + + /* Fix the wrapper catalog's first (and only) extent */ + mdb->drCTExtRec[0].startBlock = mdb->drXTExtRec[0].startBlock + + mdb->drXTExtRec[0].blockCount; + + err = ReleaseVolumeBlock(vcb, &block, kForceWriteBlock); + block.buffer = NULL; + if ( err != noErr ) + goto ExitThisRoutine; + + /* Fix the MDB */ + err = GetVolumeObjectPrimaryMDB( &block ); + if ( err != noErr ) + goto ExitThisRoutine; + mdb = (HFSMasterDirectoryBlock *) block.buffer; + + mdb->drCTExtRec[0].startBlock = mdb->drXTExtRec[0].startBlock + + mdb->drXTExtRec[0].blockCount; + + err = ReleaseVolumeBlock(vcb, &block, kForceWriteBlock); + block.buffer = NULL; + +ExitThisRoutine: + if ( block.buffer != NULL ) + (void) ReleaseVolumeBlock( vcb, &block, kReleaseBlock ); + + return( err ); +} + + +// +// Entries in the extents BTree which do not have a corresponding catalog entry get fixed here +// This routine will run slowly if the extents file is large because we require a Catalog BTree +// search for each extent record. +// +static OSErr FixOrphanedExtent( SGlobPtr GPtr ) +{ +#if 0 + OSErr err; + UInt32 hint; + UInt32 recordSize; + UInt32 maxRecords; + UInt32 numberOfFilesInList; + ExtentKey *extentKeyPtr; + ExtentRecord *extentDataPtr; + ExtentRecord extents; + ExtentRecord zeroExtents; + CatalogKey foundExtentKey; + CatalogRecord catalogData; + CatalogRecord threadData; + HFSCatalogNodeID fileID; + BTScanState scanState; + + HFSCatalogNodeID lastFileID = -1; + UInt32 recordsFound = 0; + Boolean mustRebuildBTree = false; + Boolean isHFSPlus; + SVCB *calculatedVCB = GPtr->calculatedVCB; + UInt32 **dataHandle = GPtr->validFilesList; + SFCB * fcb = GPtr->calculatedExtentsFCB; + + // Set Up + isHFSPlus = VolumeObjectIsHFSPlus( ); + // + // Use the BTree scanner since we use MountCheck to find orphaned extents, and MountCheck uses the scanner + err = BTScanInitialize( fcb, 0, 0, 0, gFSBufferPtr, gFSBufferSize, &scanState ); + if ( err != noErr ) return( badMDBErr ); + + ClearMemory( (Ptr)&zeroExtents, sizeof(ExtentRecord) ); + + if ( isHFSPlus ) + { + maxRecords = fcb->fcbLogicalSize / sizeof(HFSPlusExtentRecord); + } + else + { + maxRecords = fcb->fcbLogicalSize / sizeof(HFSExtentRecord); + numberOfFilesInList = GetHandleSize((Handle) dataHandle) / sizeof(UInt32); + qsort( *dataHandle, numberOfFilesInList, sizeof (UInt32), cmpLongs ); // Sort the list of found file IDs + } + + + while ( recordsFound < maxRecords ) + { + err = BTScanNextRecord( &scanState, false, (void **) &extentKeyPtr, (void **) &extentDataPtr, &recordSize ); + + if ( err != noErr ) + { + if ( err == btNotFound ) + err = noErr; + break; + } + + ++recordsFound; + fileID = (isHFSPlus == true) ? extentKeyPtr->hfsPlus.fileID : extentKeyPtr->hfs.fileID; + + if ( (fileID > kHFSBadBlockFileID) && (lastFileID != fileID) ) // Keep us out of reserved file trouble + { + lastFileID = fileID; + + if ( isHFSPlus ) + { + err = LocateCatalogThread( calculatedVCB, fileID, &threadData, (UInt16*)&recordSize, &hint ); // This call returns nodeName as either Str31 or HFSUniStr255, no need to call PrepareInputName() + + if ( err == noErr ) // Thread is found, just verify actual record exists. + { + err = LocateCatalogNode( calculatedVCB, threadData.hfsPlusThread.parentID, (const CatalogName *) &(threadData.hfsPlusThread.nodeName), kNoHint, &foundExtentKey, &catalogData, &hint ); + } + else if ( err == cmNotFound ) + { + err = SearchBTreeRecord( GPtr->calculatedExtentsFCB, extentKeyPtr, kNoHint, &foundExtentKey, &extents, (UInt16*)&recordSize, &hint ); + if ( err == noErr ) + { //¥¥ can't we just delete btree record? + err = ReplaceBTreeRecord( GPtr->calculatedExtentsFCB, &foundExtentKey, hint, &zeroExtents, recordSize, &hint ); + err = DeleteBTreeRecord( GPtr->calculatedExtentsFCB, &foundExtentKey ); // Delete the orphaned extent + } + } + + if ( err != noErr ) + mustRebuildBTree = true; // if we have errors here we should rebuild the extents btree + } + else + { + if ( ! bsearch( &fileID, *dataHandle, numberOfFilesInList, sizeof(UInt32), cmpLongs ) ) + { + err = SearchBTreeRecord( GPtr->calculatedExtentsFCB, extentKeyPtr, kNoHint, &foundExtentKey, &extents, (UInt16*)&recordSize, &hint ); + if ( err == noErr ) + { //¥¥ can't we just delete btree record? + err = ReplaceBTreeRecord( GPtr->calculatedExtentsFCB, &foundExtentKey, hint, &zeroExtents, recordSize, &hint ); + err = DeleteBTreeRecord( GPtr->calculatedExtentsFCB, &foundExtentKey ); // Delete the orphaned extent + } + + if ( err != noErr ) + mustRebuildBTree = true; // if we have errors here we should rebuild the extents btree + } + } + } + } + + if ( mustRebuildBTree == true ) + { + GPtr->EBTStat |= S_RebuildBTree; + err = errRebuildBtree; + } + + return( err ); +#else + return (0); +#endif +} + +/* Function: FixAttrSize + * + * Description: + * Fixes the incorrect block count or attribute size for extended attribute + * detected during verify stage. This is a minor repair order function + * i.e. it depends on previously created repair order to repair the disk. + * + * Input: + * GPtr - global scavenger structure pointer + * p - current repair order + * + * Output: + * result - zero indicates success, non-zero failure. + */ +static OSErr FixAttrSize(SGlobPtr GPtr, RepairOrderPtr p) +{ + OSErr result = noErr; + HFSPlusAttrKey *key; + HFSPlusAttrRecord record; + BTreeIterator iterator; + FSBufferDescriptor btRecord; + u_int16_t recSize; + u_int64_t bytes; + Boolean doReplace = false; + + /* Initialize the iterator, attribute key, and attribute record */ + ClearMemory(&iterator, sizeof(iterator)); + key = (HFSPlusAttrKey *)&iterator.key; + BuildAttributeKey(p->parid, 0, &p->name[1], p->name[0], key); + + btRecord.bufferAddress = &record; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(record); + + /* Search for the attribute record. + * Warning: Attribute record of type kHFSPlusAttrInlineData may be + * truncated on read! (4425232). This function only uses recordType + * field from inline attribute record. + */ + result = BTSearchRecord(GPtr->calculatedAttributesFCB, &iterator, + kInvalidMRUCacheKey, &btRecord, &recSize, &iterator); + if (result) { + DPRINTF (d_error|d_xattr, "%s: Cannot find attribute record (err = %d)\n", __FUNCTION__, result); + goto out; + } + + /* We should only get record of type kHFSPlusAttrForkData */ + if (record.recordType != kHFSPlusAttrForkData) { + DPRINTF (d_error|d_xattr, "%s: Record found is not attribute fork data\n", __FUNCTION__); + result = btNotFound; + goto out; + } + + /* Manipulate necessary fields in the attribute record */ + if (p->type == E_PEOAttr) { + if ((u_int32_t)p->incorrect == record.forkData.theFork.totalBlocks) { + record.forkData.theFork.totalBlocks = (u_int32_t)p->correct; + doReplace = true; + + /* Make sure that new block count is large enough to + * cover the current LEOAttr. If not, truncate it. + */ + bytes = p->correct * (u_int64_t)GPtr->calculatedVCB->vcbBlockSize; + if (record.forkData.theFork.logicalSize > bytes) { + record.forkData.theFork.logicalSize = bytes; + } + } + } else if (p->type == E_LEOAttr) { + if (p->incorrect == record.forkData.theFork.logicalSize) { + record.forkData.theFork.logicalSize = p->correct; + doReplace = true; + } + } + + /* Replace the attribute record, if required */ + if (doReplace == true) { + result = BTReplaceRecord(GPtr->calculatedAttributesFCB, &iterator, + &btRecord, recSize); + if (result) { + DPRINTF (d_error|d_xattr, "%s: Cannot replace attribute record (err=%d)\n", __FUNCTION__, result); + goto out; + } + } + +out: + return(result); +} + +/* Function: FixBadExtent + * + * Description: The function repairs bad extent entry by zeroing out the + * bad extent entry and truncating all extent information found after the + * bad extent entry. + * + * 1. The information for repair is retrieved from the repair order passed + * as parameter. + * 2. If the start block of bad extent is zero, bad extent existed in + * catalog record extent information. Lookup the catalog record, zero + * out bad extent entry and all entries after it and update the catalog + * record. + * 3. If the start block of bad extent is non-zero, bad extent existed + * in overflow extent. If the index of bad extent is zero, we want + * to delete the record completely. If the index is non-zero, search + * the extent record, zero out bad extent entry and all entries after it + * and update the extent record. + * 4. Search for any extent record in the overflow extent after the + * the bad extent entry. If found, delete the record. + * 5. If the file was truncated, create symlink in DamagedFiles folder + * and display message to the user. + * + * Input: + * GPtr - global scavenger structure pointer + * p - current repair order + * + * Output: + * result - zero indicates success, non-zero failure. + */ +static OSErr FixBadExtent(SGlobPtr GPtr, RepairOrderPtr p) +{ + OSErr err = noErr; + UInt32 badExtentIndex; + UInt32 extentStartBlock, foundStartBlock; + UInt32 fileID; + int i; + UInt8 forkType; + + UInt16 recordSize; + ExtentRecord extentRecord; + ExtentKey extentKey; + UInt32 hint; + Boolean isHFSPlus; + Boolean didRepair; + + fileID = p->parid; + badExtentIndex = p->correct; + extentStartBlock = p->hint; + forkType = p->forkType; + + isHFSPlus = VolumeObjectIsHFSPlus(); + didRepair = false; + + assert (forkType != kEAData); + + /* extentStartBlock = 0, the bad extent exists in catalog record */ + if (extentStartBlock == 0) { + + CatalogRecord catRecord; + CatalogKey catKey; + HFSPlusExtentDescriptor *hfsPlusExtent; + HFSExtentDescriptor *hfsExtent; + + /* Lookup record in catalog BTree */ + err = GetCatalogRecord(GPtr, fileID, isHFSPlus, &catKey, &catRecord, &recordSize); + if (err) { + plog("%s: Could not get catalog record for fileID %u\n", __FUNCTION__, fileID); + goto out; + } + + /* Check record type */ + assert ((catRecord.recordType == kHFSPlusFileRecord) || + (catRecord.recordType == kHFSFileRecord)); + + /* Zero out the bad extent entry and all entries after it */ + if (isHFSPlus) { + if (forkType == kDataFork) { + hfsPlusExtent = catRecord.hfsPlusFile.dataFork.extents; + } else { + hfsPlusExtent = catRecord.hfsPlusFile.resourceFork.extents; + } + + for (i = badExtentIndex; i < GPtr->numExtents; i++) { + hfsPlusExtent[i].startBlock = 0; + hfsPlusExtent[i].blockCount = 0; + } + } else { + if (forkType == kDataFork) { + hfsExtent = catRecord.hfsFile.dataExtents; + } else { + hfsExtent = catRecord.hfsFile.rsrcExtents; + } + for (i = badExtentIndex; i < GPtr->numExtents; i++) { + hfsExtent[i].startBlock = 0; + hfsExtent[i].blockCount = 0; + } + } + + /* Write the catalog record back */ + err = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &catKey, kNoHint, + &catRecord, recordSize, &hint); + if (err) { + plog("%s: Could not replace catalog record for fileID %u\n", __FUNCTION__, fileID); + goto out; + } + didRepair = true; + + } else { /* bad extent exists in overflow extent record */ + + /* First entry in overflow extent record is bad, delete entire record */ + if (badExtentIndex == 0) { + goto del_overflow_extents; + } + + /* Lookup record in extents overflow BTree */ + BuildExtentKey (isHFSPlus, forkType, fileID, extentStartBlock, &extentKey); + err = SearchBTreeRecord (GPtr->calculatedExtentsFCB, &extentKey, kNoHint, + &extentKey, &extentRecord, &recordSize, &hint); + if (err) { + plog("%s: Could not get overflow extents record for fileID %u, fork %u, start block %u\n", __FUNCTION__, fileID, forkType, extentStartBlock); + goto out; + } + + /* Zero out the bad extent entry and all entries after it */ + if (isHFSPlus) { + for (i = badExtentIndex; i < GPtr->numExtents; i++) { + extentRecord.hfsPlus[i].startBlock = 0; + extentRecord.hfsPlus[i].blockCount = 0; + } + } else { + for (i = badExtentIndex; i < GPtr->numExtents; i++) { + extentRecord.hfs[i].startBlock = 0; + extentRecord.hfs[i].blockCount = 0; + } + } + + /* Write the extent record back */ + err = ReplaceBTreeRecord(GPtr->calculatedExtentsFCB, &extentKey, hint, + &extentRecord, recordSize, &hint); + if (err) { + plog("%s: Could not replace overflow extents record for fileID %u, fork %u, start block %u\n", __FUNCTION__, fileID, forkType, extentStartBlock); + goto out; + } + didRepair = true; + + /* The startBlock for extent record with bad extent entry is updated + * because we use this startBlock later to lookup next extent record + * for this file and forktype in overflow extent btree which should + * be deleted. By incrementing the startBlock by one, we ensure that + * we find the next record, if any, that should be deleted instead of + * finding the same record that was updated above. + */ + extentStartBlock++; + } + +del_overflow_extents: + /* Search for overflow extent records. We should get a valid record only + * if the bad extent entry was the first entry in the extent overflow + * record. For all other cases, the search record will return an error + */ + BuildExtentKey (isHFSPlus, forkType, fileID, extentStartBlock, &extentKey); + err = SearchBTreeRecord (GPtr->calculatedExtentsFCB, &extentKey, kNoHint, + &extentKey, &extentRecord, &recordSize, &hint); + if ((err != noErr) && (err != btNotFound)) { + goto create_symlink; + } + + /* If we got error, check the next record */ + if (err == btNotFound) { + err = GetBTreeRecord(GPtr->calculatedExtentsFCB, 1, &extentKey, &extentRecord, + &recordSize, &hint); + } + + while (err == noErr) { + /* Check if the record has correct fileID, forkType */ + if (isHFSPlus) { + if ((fileID != extentKey.hfsPlus.fileID) || + (forkType != extentKey.hfsPlus.forkType)) { + break; + } + foundStartBlock = extentKey.hfsPlus.startBlock; + } else { + if ((fileID != extentKey.hfs.fileID) || + (forkType != extentKey.hfs.forkType)) { + break; + } + foundStartBlock = extentKey.hfs.startBlock; + } + + /* Delete the extent record */ + err = DeleteBTreeRecord(GPtr->calculatedExtentsFCB, &extentKey); + DPRINTF (d_info, "%s: Deleting extent overflow for fileID=%u, forkType=%u, startBlock=%u\n", __FUNCTION__, fileID, forkType, foundStartBlock); + if (err) { + goto create_symlink; + } + didRepair = true; + + /* Get the next extent record */ + err = GetBTreeRecord(GPtr->calculatedExtentsFCB, 1, &extentKey, &extentRecord, + &recordSize, &hint); + } + + if (err == btNotFound) { + err = noErr; + } + + UpdateBTreeHeader(GPtr->calculatedExtentsFCB); + +create_symlink: + /* Create symlink for repaired files in damaged files folder */ + if (didRepair == true) { + /* Create symlink for damaged files */ + (void) CreateCorruptFileSymlink(GPtr, fileID); + } + +out: + return err; +} + +/* Function: FixOrphanedFiles + * + * Description: + * Incorrect number of thread records get fixed in this function. + * + * The function traverses the entire catalog Btree. + * + * For a file/folder record, it tries to lookup its corresponding thread + * record. If the thread record does not exist, or is not correct, a new + * thread record is created. The parent ID, record type, and the name of + * the file/folder are compared for correctness. + * For plain HFS, a thread record is only looked-up if kHFSThreadExistsMask is set. + * + * For a thread record, it tries to lookup its corresponding file/folder + * record. If its does not exist or is not correct, the thread record + * is deleted. The file/folder ID is compared for correctness. + * + * Input: 1. GPtr - pointer to global scavenger area + * + * Return value: + * zero means success + * non-zero means failure + */ +static OSErr FixOrphanedFiles ( SGlobPtr GPtr ) +{ + CatalogKey key; + CatalogKey foundKey; + CatalogKey tempKey; + CatalogRecord record; + CatalogRecord threadRecord; + CatalogRecord record2; + HFSCatalogNodeID parentID; + HFSCatalogNodeID cNodeID = 0; + BTreeIterator savedIterator; + UInt32 hint; + UInt32 hint2; + UInt32 threadHint; + OSErr err; + UInt16 recordSize; + UInt16 threadRecordSize; + SInt16 recordType; + SInt16 foundRecType; + SInt16 selCode = 0x8001; /* Get first record */ + Boolean isHFSPlus; + BTreeControlBlock *btcb = GetBTreeControlBlock( kCalculatedCatalogRefNum ); + Boolean isDirectory; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + CopyMemory( &btcb->lastIterator, &savedIterator, sizeof(BTreeIterator) ); + + do + { + // Save/Restore Iterator around calls to GetBTreeRecord + CopyMemory( &savedIterator, &btcb->lastIterator, sizeof(BTreeIterator) ); + err = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &foundKey, &record, &recordSize, &hint ); + if ( err != noErr ) break; + CopyMemory( &btcb->lastIterator, &savedIterator, sizeof(BTreeIterator) ); + + selCode = 1; // kNextRecord + recordType = record.recordType; + + isDirectory = false; + + switch( recordType ) + { + case kHFSFileRecord: + // If the small file is not supposed to have a thread, just break + if ( ( record.hfsFile.flags & kHFSThreadExistsMask ) == 0 ) + break; + + case kHFSFolderRecord: + case kHFSPlusFolderRecord: + case kHFSPlusFileRecord: + + // Locate the thread associated with this record + + (void) CheckForStop( GPtr ); // rotate cursor + + parentID = isHFSPlus == true ? foundKey.hfsPlus.parentID : foundKey.hfs.parentID; + threadHint = hint; + + switch( recordType ) + { + case kHFSFolderRecord: + cNodeID = record.hfsFolder.folderID; + isDirectory = true; + break; + case kHFSFileRecord: + cNodeID = record.hfsFile.fileID; + break; + case kHFSPlusFolderRecord: + cNodeID = record.hfsPlusFolder.folderID; + isDirectory = true; + break; + case kHFSPlusFileRecord: + cNodeID = record.hfsPlusFile.fileID; + break; + } + + //-- Build the key for the file thread + BuildCatalogKey( cNodeID, nil, isHFSPlus, &key ); + + err = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, + &tempKey, &threadRecord, &threadRecordSize, &hint2 ); + + /* We found a thread record for this file/folder record. */ + if (err == noErr) { + /* Check if the parent ID and nodeName are same, and recordType is as + * expected. If not, we are missing a correct thread record. Force + * btNotFound in such case. + */ + if (isHFSPlus) { + /* Check thread's recordType */ + foundRecType = threadRecord.hfsPlusThread.recordType; + if (isDirectory == true) { + if (foundRecType != kHFSPlusFolderThreadRecord) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: Folder thread recordType mismatch for id=%u (found=%u)\n", __FUNCTION__, cNodeID, foundRecType); + } + } + } else { + if (foundRecType != kHFSPlusFileThreadRecord) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: File thread recordType mismatch for id=%u (found=%u)\n", __FUNCTION__, cNodeID, foundRecType); + } + } + } + + /* Compare parent ID */ + if (parentID != threadRecord.hfsPlusThread.parentID) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: parentID for id=%u do not match (fileKey=%u threadRecord=%u)\n", __FUNCTION__, cNodeID, parentID, threadRecord.hfsPlusThread.parentID); + } + } + + /* Compare nodeName from file/folder key and thread reocrd */ + if (!((foundKey.hfsPlus.nodeName.length == threadRecord.hfsPlusThread.nodeName.length) + && (!bcmp(foundKey.hfsPlus.nodeName.unicode, + threadRecord.hfsPlusThread.nodeName.unicode, + foundKey.hfsPlus.nodeName.length * 2)))) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + unsigned maxLength = foundKey.hfsPlus.nodeName.length; + if (maxLength < threadRecord.hfsPlusThread.nodeName.length) + maxLength = threadRecord.hfsPlusThread.nodeName.length; + + plog ("\t%s: nodeName for id=%u do not match\n", __FUNCTION__, cNodeID); + if (cur_debug_level & d_dump_record) + { + plog("\tFile/Folder record:\n"); + HexDump(&foundKey, foundKey.hfsPlus.keyLength + 2, FALSE); + plog("--\n"); + HexDump(&record, recordSize, FALSE); + plog("\n"); + plog("\tThread record:\n"); + HexDump(&tempKey, tempKey.hfsPlus.keyLength + 2, FALSE); + plog("--\n"); + HexDump(&threadRecord, threadRecordSize, FALSE); + plog("\n"); + } + } + } + + /* If any of the above checks failed, delete the bad thread record */ + if (err == btNotFound) { + (void) DeleteBTreeRecord(GPtr->calculatedCatalogFCB, &tempKey); + } + } else { /* plain HFS */ + /* Check thread's recordType */ + foundRecType = threadRecord.hfsThread.recordType; + if (isDirectory == true) { + if (foundRecType != kHFSFolderThreadRecord) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: Folder thread recordType mismatch for id=%u (found=%u)\n", __FUNCTION__, cNodeID, foundRecType); + } + } + } else { + if (foundRecType != kHFSFileThreadRecord) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: File thread recordType mismatch for id=%u (found=%u)\n", __FUNCTION__, cNodeID, foundRecType); + } + } + } + + /* Compare parent ID */ + if (parentID != threadRecord.hfsThread.parentID) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: parentID for id=%u do not match (fileKey=%u threadRecord=%u)\n", __FUNCTION__, cNodeID, parentID, threadRecord.hfsThread.parentID); + } + } + + /* Compare nodeName from file/folder key and thread reocrd */ + if (!((foundKey.hfs.nodeName[0] == threadRecord.hfsThread.nodeName[0]) + && (!bcmp(&foundKey.hfs.nodeName[1], + &threadRecord.hfsThread.nodeName[1], + foundKey.hfs.nodeName[0])))) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: nodeName for id=%u do not match\n", __FUNCTION__, cNodeID); + } + } + + /* If any of the above checks failed, delete the bad thread record */ + if (err == btNotFound) { + (void) DeleteBTreeRecord(GPtr->calculatedCatalogFCB, &tempKey); + } + } + } /* err == noErr */ + + // For missing thread records, just create the thread + if ( err == btNotFound ) + { + // Create the missing thread record. + + isDirectory = false; + switch( recordType ) + { + case kHFSFolderRecord: + case kHFSPlusFolderRecord: + isDirectory = true; + break; + } + + //-- Fill out the data for the new file thread from the key + // of catalog file/folder record + recordSize = BuildThreadRec( &foundKey, &threadRecord, isHFSPlus, + isDirectory ); + err = InsertBTreeRecord( GPtr->calculatedCatalogFCB, &key, + &threadRecord, recordSize, &threadHint ); + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: Created thread record for id=%u (err=%u)\n", __FUNCTION__, cNodeID, err); + } + } + + break; + + + case kHFSFolderThreadRecord: + case kHFSPlusFolderThreadRecord: + isDirectory = true; + + case kHFSFileThreadRecord: + case kHFSPlusFileThreadRecord: + + // Find the catalog record, if it does not exist, delete the existing thread. + if ( isHFSPlus ) + BuildCatalogKey( record.hfsPlusThread.parentID, (const CatalogName *)&record.hfsPlusThread.nodeName, isHFSPlus, &key ); + else + BuildCatalogKey( record.hfsThread.parentID, (const CatalogName *)&record.hfsThread.nodeName, isHFSPlus, &key ); + + err = SearchBTreeRecord ( GPtr->calculatedCatalogFCB, &key, kNoHint, &tempKey, &record2, &recordSize, &hint2 ); + + /* We found a file/folder record for this thread record. */ + if (err == noErr) { + /* Check if the file/folder ID are same and if the recordType is as + * expected. If not, we are missing a correct file/folder record. + * Delete the extra thread record + */ + if (isHFSPlus) { + /* Check recordType */ + foundRecType = record2.hfsPlusFile.recordType; + if (isDirectory == true) { + if (foundRecType != kHFSPlusFolderRecord) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: Folder recordType mismatch for id=%u (found=%u)\n", __FUNCTION__, cNodeID, foundRecType); + } + } + } else { + if (foundRecType != kHFSPlusFileRecord) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: File recordType mismatch for id=%u (found=%u)\n", __FUNCTION__, cNodeID, foundRecType); + } + } + } + + /* Compare file/folder ID */ + if (foundKey.hfsPlus.parentID != record2.hfsPlusFile.fileID) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: fileID do not match (threadKey=%u fileRecord=%u), parentID=%u\n", __FUNCTION__, foundKey.hfsPlus.parentID, record2.hfsPlusFile.fileID, record.hfsPlusThread.parentID); + } + } + } else { /* plain HFS */ + /* Check recordType */ + foundRecType = record2.hfsFile.recordType; + if (isDirectory == true) { + if (foundRecType != kHFSFolderRecord) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: Folder recordType mismatch for id=%u (found=%u)\n", __FUNCTION__, cNodeID, foundRecType); + } + } + } else { + if (foundRecType != kHFSFileRecord) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: File recordType mismatch for id=%u (found=%u)\n", __FUNCTION__, cNodeID, foundRecType); + } + } + } + + /* Compare file/folder ID */ + if (foundKey.hfs.parentID != record2.hfsFile.fileID) { + err = btNotFound; + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + if (recordType == kHFSFolderThreadRecord) { + plog ("\t%s: fileID do not match (threadKey=%u fileRecord=%u), parentID=%u\n", __FUNCTION__, foundKey.hfs.parentID, record2.hfsFolder.folderID, record.hfsThread.parentID); + } else { + plog ("\t%s: fileID do not match (threadKey=%u fileRecord=%u), parentID=%u\n", __FUNCTION__, foundKey.hfs.parentID, record2.hfsFile.fileID, record.hfsThread.parentID); + } + } + } + } + } /* if (err == noErr) */ + + if ( err != noErr ) + { + err = DeleteBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey ); + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + if (isHFSPlus) { + plog ("\t%s: Deleted thread record for id=%d (err=%d)\n", __FUNCTION__, foundKey.hfsPlus.parentID, err); + } else { + plog ("\t%s: Deleted thread record for id=%d (err=%d)\n", __FUNCTION__, foundKey.hfs.parentID, err); + } + } + } + + break; + + default: + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: Unknown record type.\n", __FUNCTION__); + } + break; + + } + } while ( err == noErr ); + + if ( err == btNotFound ) + err = noErr; // all done, no more catalog records + +// if (err == noErr) +// err = BTFlushPath( GPtr->calculatedCatalogFCB ); + + return( err ); +} + + +static OSErr RepairReservedBTreeFields ( SGlobPtr GPtr ) +{ + CatalogRecord record; + CatalogKey foundKey; + UInt16 recordSize; + SInt16 selCode; + UInt32 hint; + UInt32 *reserved; + OSErr err; + + selCode = 0x8001; // start with 1st record + + err = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &foundKey, &record, &recordSize, &hint ); + if ( err != noErr ) goto EXIT; + + selCode = 1; // get next record from now on + + do + { + switch( record.recordType ) + { + case kHFSPlusFolderRecord: + /* XXX -- this should not always be cleared out (but doesn't appear to being called) */ + if ( record.hfsPlusFolder.flags != 0 ) + { + record.hfsPlusFolder.flags = 0; + err = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey, hint, &record, recordSize, &hint ); + } + break; + + case kHFSPlusFileRecord: + // Note: bit 7 (mask 0x80) of flags is unused in HFS or HFS Plus. However, Inside Macintosh: Files + // describes it as meaning the file record is in use. Some non-Apple implementations end up setting + // this bit, so we just ignore it. + if ( ( record.hfsPlusFile.flags & (UInt16) ~(0X83) ) != 0 ) + { + record.hfsPlusFile.flags &= 0X83; + err = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey, hint, &record, recordSize, &hint ); + } + break; + + case kHFSFolderRecord: + if ( record.hfsFolder.flags != 0 ) + { + record.hfsFolder.flags = 0; + err = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey, hint, &record, recordSize, &hint ); + } + break; + + case kHFSFolderThreadRecord: + case kHFSFileThreadRecord: + reserved = (UInt32*) &(record.hfsThread.reserved); + if ( reserved[0] || reserved[1] ) + { + reserved[0] = 0; + reserved[1] = 0; + err = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey, hint, &record, recordSize, &hint ); + } + break; + + case kHFSFileRecord: + // Note: bit 7 (mask 0x80) of flags is unused in HFS or HFS Plus. However, Inside Macintosh: Files + // describes it as meaning the file record is in use. Some non-Apple implementations end up setting + // this bit, so we just ignore it. + if ( ( ( record.hfsFile.flags & (UInt8) ~(0X83) ) != 0 ) + || ( record.hfsFile.dataStartBlock != 0 ) + || ( record.hfsFile.rsrcStartBlock != 0 ) + || ( record.hfsFile.reserved != 0 ) ) + { + record.hfsFile.flags &= 0X83; + record.hfsFile.dataStartBlock = 0; + record.hfsFile.rsrcStartBlock = 0; + record.hfsFile.reserved = 0; + err = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey, hint, &record, recordSize, &hint ); + } + break; + + default: + break; + } + + if ( err != noErr ) goto EXIT; + + err = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &foundKey, &record, &recordSize, &hint ); + + } while ( err == noErr ); + + if ( err == btNotFound ) + err = noErr; // all done, no more catalog records + +EXIT: + return( err ); +} + + +/* Function: FixOrphanAttrRecord + * + * Description: + * The function traverses the attribute BTree completely and for every + * leaf record, calls CheckAttributeRecord. CheckAttributeRecord function + * is common function for verify and repair stage. CheckAttributeRecord + * deletes invalid/orphaned extended attribute records under following + * conditions - + * 1. record is overflow extents with no valid fork data or overflow extents + * preceeding it. + * 2. record type is unknown. + * + * Input: + * GPtr - global scavenger structure pointer + * + * Output: + * error code - zero on success, non-zero on failure. + */ +static OSErr FixOrphanAttrRecord(SGlobPtr GPtr) +{ + OSErr err = noErr; + UInt16 selCode; + UInt32 hint; + + HFSPlusAttrRecord record; + HFSPlusAttrKey key; + UInt16 recordSize; + + /* Zero out last attribute information from global scavenger structure */ + bzero (&(GPtr->lastAttrInfo), sizeof(GPtr->lastAttrInfo)); + + /* Warning: Attribute record of type kHFSPlusAttrInlineData may be + * truncated on read! (4425232). CheckAttributeRecord only uses recordType + * field from inline attribute record. + */ + selCode = 0x8001; + err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &key, + &record, &recordSize, &hint); + if (err != noErr) { + goto out; + } + + selCode = 1; + do { + err = CheckAttributeRecord(GPtr, &key, &record, recordSize); + if (err) { + break; + } + + /* Access the next record. + * Warning: Attribute record of type kHFSPlusAttrInlineData may be + * truncated on read! (4425232). CheckAttributeRecord only uses recordType + * field from inline attribute record. + */ + err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &key, + &record, &recordSize, &hint); + } while (err == noErr); + + if (err == btNotFound) { + err = noErr; + } + +out: + return(err); +} + +/* Function: GetCatalogRecord + * + * Description: + * This function returns a catalog file/folder record for a given + * file/folder ID from the catalog BTree. + * + * Input: 1. GPtr - pointer to global scavenger area + * 2. fileID - file ID to search the file/folder record + * 3. isHFSPlus - boolean value to indicate if volume is HFSPlus + * + * Output: 1. catKey - catalog key + * 2. catRecord - catalog record for given ID + * 3. recordSize - size of catalog record return back + * + * Return value: + * zero means success + * non-zero means failure + */ +static OSErr GetCatalogRecord(SGlobPtr GPtr, UInt32 fileID, Boolean isHFSPlus, CatalogKey *catKey, CatalogRecord *catRecord, UInt16 *recordSize) +{ + OSErr err = noErr; + CatalogKey catThreadKey; + CatalogName catalogName; + UInt32 hint; + uint32_t thread_key_parentID; + + /* Look up for catalog thread record for the file that owns attribute */ + BuildCatalogKey(fileID, NULL, isHFSPlus, &catThreadKey); + err = SearchBTreeRecord(GPtr->calculatedCatalogFCB, &catThreadKey, kNoHint, catKey, catRecord, recordSize, &hint); + if (err) { + plog ("%s: No matching catalog thread record found\n", __FUNCTION__); + goto out; + } + +#if DEBUG_XATTR + plog ("%s(%s,%d):1 recordType=%x, flags=%x\n", __FUNCTION__, __FILE__, __LINE__, + catRecord->hfsPlusFile.recordType, + catRecord->hfsPlusFile.flags); +#endif + + /* We were expecting a thread record. The recordType says it is a file + * record or folder record. Return error. + */ + if ((catRecord->hfsPlusFile.recordType == kHFSPlusFolderRecord) || + (catRecord->hfsPlusFile.recordType == kHFSPlusFileRecord)) { + err = fsBTRecordNotFoundErr; + goto out; + } + thread_key_parentID = catKey->hfsPlus.parentID; + + /* It is either a file thread record or folder thread record. + * Look up for catalog record for the file that owns attribute */ + CopyCatalogName((CatalogName *)&(catRecord->hfsPlusThread.nodeName), &catalogName, isHFSPlus); + BuildCatalogKey(catRecord->hfsPlusThread.parentID, &catalogName, isHFSPlus, catKey); + err = SearchBTreeRecord(GPtr->calculatedCatalogFCB, catKey, kNoHint, catKey, catRecord, recordSize, &hint); + if (err) { + plog ("%s: No matching catalog record found\n", __FUNCTION__); + if (cur_debug_level & d_dump_record) + { + plog ("Searching for key:\n"); + HexDump(catKey, CalcKeySize(GPtr->calculatedCatalogBTCB, (BTreeKey *)catKey), FALSE); + } + goto out; + } + +#if DEBUG_XATTR + plog ("%s(%s,%d):2 recordType=%x, flags=%x\n", __FUNCTION__, __FILE__, __LINE__, + catRecord->hfsPlusFile.recordType, + catRecord->hfsPlusFile.flags); +#endif + + /* For catalog file or folder record, the parentID in the thread + * record's key should be equal to the fileID in the file/folder + * record --- which is equal to the ID of the file/folder record + * that is being looked up. If not, mark the volume for repair. + */ + if (thread_key_parentID != catRecord->hfsPlusFile.fileID) { + RcdError(GPtr, E_IncorrectNumThdRcd); + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog("\t%s: fileID=%u, thread.key.parentID=%u, record.fileID=%u\n", + __FUNCTION__, fileID, thread_key_parentID, catRecord->hfsPlusFile.fileID); + } + GPtr->CBTStat |= S_Orphan; + } +out: + return err; +} + +/* Function: RepairAttributesCheckABT + * + * Description: + * This function is called from RepairAttributes (to repair extended + * attributes) during repair stage of fsck_hfs. + * + * 1. Make full pass through attribute BTree. + * 2. For every unique fileID, lookup its catalog record in Catalog BTree. + * 3. If found, check the attributes/security bit in catalog record. + * If not set correctly, set it and replace the catalog record. + * 4. If not found, return error + * + * Input: 1. GPtr - pointer to global scavenger area + * 2. isHFSPlus - boolean value to indicate if volume is HFSPlus + * + * Output: err - Function result + * zero means success + * non-zero means failure + */ +static OSErr RepairAttributesCheckABT(SGlobPtr GPtr, Boolean isHFSPlus) +{ + OSErr err = noErr; + UInt16 selCode; /* select access pattern for BTree */ + UInt32 hint; + + HFSPlusAttrRecord attrRecord; + HFSPlusAttrKey attrKey; + UInt16 attrRecordSize; + CatalogRecord catRecord; + CatalogKey catKey; + UInt16 catRecordSize; + + attributeInfo lastID; /* fileID for the last attribute searched */ + Boolean didRecordChange = false; /* whether catalog record was changed after checks */ + +#if DEBUG_XATTR + char attrName[XATTR_MAXNAMELEN]; + size_t len; +#endif + + lastID.fileID = 0; + lastID.hasSecurity = false; + + selCode = 0x8001; /* Get first record from BTree */ + /* Warning: Attribute record of type kHFSPlusAttrInlineData may be + * truncated on read! (4425232). This function only uses recordType + * field from inline attribute record. + */ + err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &attrKey, &attrRecord, &attrRecordSize, &hint); + if (err == btNotFound) { + /* The attributes B-tree is empty, which is OK; nothing to do. */ + err = noErr; + goto out; + } + if (err != noErr) goto out; + + selCode = 1; /* Get next record */ + do { +#if DEBUG_XATTR + /* Convert unicode attribute name to char for ACL check */ + (void) utf_encodestr(attrKey.attrName, attrKey.attrNameLen * 2, attrName, &len, sizeof(attrName)); + attrName[len] = '\0'; + plog ("%s(%s,%d): Found attrName=%s for fileID=%d\n", __FUNCTION__, __FILE__, __LINE__, attrName, attrKey.fileID); +#endif + + if (attrKey.fileID != lastID.fileID) { + /* We found an attribute with new file ID */ + + /* Replace the previous catalog record only if we updated the flags */ + if (didRecordChange == true) { + err = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &catKey , kNoHint, &catRecord, catRecordSize, &hint); + if (err) { + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: Error in replacing catalog record for id=%u\n", __FUNCTION__, lastID.fileID); + } + goto out; + } + } + + didRecordChange = false; /* reset to indicate new record has not changed */ + + /* Get the catalog record for the new fileID */ + err = GetCatalogRecord(GPtr, attrKey.fileID, isHFSPlus, &catKey, &catRecord, &catRecordSize); + if (err) { + /* No catalog record was found for this fileID. */ + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: No matching catalog record found for id=%u\n", __FUNCTION__, attrKey.fileID); + } + + /* 3984119 - Do not delete extended attributes for file IDs less + * kHFSFirstUserCatalogNodeID but not equal to kHFSRootFolderID + * in prime modulus checksum. These file IDs do not have + * any catalog record + */ + if ((attrKey.fileID < kHFSFirstUserCatalogNodeID) && + (attrKey.fileID != kHFSRootFolderID)) { +#if DEBUG_XATTR + plog ("%s: Ignore catalog check for fileID=%d for attribute=%s\n", __FUNCTION__, attrKey.fileID, attrName); +#endif + goto getnext; + } + + /* Delete this orphan extended attribute */ + err = delete_attr_record(GPtr, &attrKey, &attrRecord); + if (err) { + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\t%s: Error in deleting attribute record for id=%u\n", __FUNCTION__, attrKey.fileID); + } + goto out; + } + goto getnext; + } + + lastID.fileID = attrKey.fileID; /* set last fileID to the new ID */ + lastID.hasSecurity = false; /* reset to indicate new fileID does not have security */ + + /* Check the Attribute bit */ + if (!(catRecord.hfsPlusFile.flags & kHFSHasAttributesMask)) { + /* kHFSHasAttributeBit should be set */ + catRecord.hfsPlusFile.flags |= kHFSHasAttributesMask; + didRecordChange = true; + } + + /* Check if attribute is ACL */ + if (!bcmp(attrKey.attrName, GPtr->securityAttrName, GPtr->securityAttrLen)) { + lastID.hasSecurity = true; + /* Check the security bit */ + if (!(catRecord.hfsPlusFile.flags & kHFSHasSecurityMask)) { + /* kHFSHasSecurityBit should be set */ + catRecord.hfsPlusFile.flags |= kHFSHasSecurityMask; + didRecordChange = true; + } + } + } else { + /* We have seen attribute for fileID in past */ + + /* If last time we saw this fileID did not have an ACL and this + * extended attribute is an ACL, ONLY check consistency of + * security bit from Catalog record + */ + if ((lastID.hasSecurity == false) && !bcmp(attrKey.attrName, GPtr->securityAttrName, GPtr->securityAttrLen)) { + lastID.hasSecurity = true; + /* Check the security bit */ + if (!(catRecord.hfsPlusFile.flags & kHFSHasSecurityMask)) { + /* kHFSHasSecurityBit should be set */ + catRecord.hfsPlusFile.flags |= kHFSHasSecurityMask; + didRecordChange = true; + } + } + } + +getnext: + /* Access the next record + * Warning: Attribute record of type kHFSPlusAttrInlineData may be + * truncated on read! (4425232). This function only uses recordType + * field from inline attribute record. + */ + err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &attrKey, &attrRecord, &attrRecordSize, &hint); + } while (err == noErr); + + err = noErr; + + /* Replace the catalog record for last extended attribute in the attributes BTree + * only if we updated the flags + */ + if (didRecordChange == true) { + err = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &catKey , kNoHint, &catRecord, catRecordSize, &hint); + if (err) { +#if DEBUG_XATTR + plog ("%s: Error in replacing Catalog Record\n", __FUNCTION__); +#endif + goto out; + } + } + +out: + return err; +} + +/* Function: RepairAttributesCheckCBT + * + * Description: + * This function is called from RepairAttributes (to repair extended + * attributes) during repair stage of fsck_hfs. + * + * NOTE: The case where attribute exists and bit is not set is being taken care in + * RepairAttributesCheckABT. This function determines relationship from catalog + * Btree to attribute Btree, and not vice-versa. + + * 1. Make full pass through catalog BTree. + * 2. For every fileID, if the attributes/security bit is set, + * lookup all the extended attributes in the attributes BTree. + * 3. If found, check that if bits are set correctly. + * 4. If not found, clear the bits. + * + * Input: 1. GPtr - pointer to global scavenger area + * 2. isHFSPlus - boolean value to indicate if volume is HFSPlus + * + * Output: err - Function result + * zero means success + * non-zero means failure + */ +static OSErr RepairAttributesCheckCBT(SGlobPtr GPtr, Boolean isHFSPlus) +{ + OSErr err = noErr; + UInt16 selCode; /* select access pattern for BTree */ + UInt16 recordSize; + UInt32 hint; + + HFSPlusAttrKey *attrKey; + CatalogRecord catRecord; + CatalogKey catKey; + + Boolean didRecordChange = false; /* whether catalog record was changed after checks */ + + BTreeIterator iterator; + + UInt32 curFileID; + Boolean curRecordHasAttributes = false; + Boolean curRecordHasSecurity = false; + + selCode = 0x8001; /* Get first record from BTree */ + err = GetBTreeRecord(GPtr->calculatedCatalogFCB, selCode, &catKey, &catRecord, &recordSize, &hint); + if ( err != noErr ) goto out; + + selCode = 1; /* Get next record */ + do { + /* Check only file record and folder record, else skip to next record */ + if ( (catRecord.hfsPlusFile.recordType != kHFSPlusFileRecord) && + (catRecord.hfsPlusFile.recordType != kHFSPlusFolderRecord)) { + goto getnext; + } + + /* Check if catalog record has attribute and/or security bit set, else + * skip to next record + */ + if ( ((catRecord.hfsPlusFile.flags & kHFSHasAttributesMask) == 0) && + ((catRecord.hfsPlusFile.flags & kHFSHasSecurityMask) == 0) ) { + goto getnext; + } + + /* Initialize some flags */ + didRecordChange = false; + curRecordHasSecurity = false; + curRecordHasAttributes = false; + + /* Access all extended attributes for this fileID */ + curFileID = catRecord.hfsPlusFile.fileID; + + /* Initialize the iterator and attribute key */ + ClearMemory(&iterator, sizeof(BTreeIterator)); + attrKey = (HFSPlusAttrKey *)&iterator.key; + attrKey->keyLength = kHFSPlusAttrKeyMinimumLength; + attrKey->fileID = curFileID; + + /* Search for attribute with NULL name. This will place the iterator at correct fileID location in BTree */ + err = BTSearchRecord(GPtr->calculatedAttributesFCB, &iterator, kInvalidMRUCacheKey, NULL, NULL, &iterator); + if (err && (err != btNotFound)) { +#if DEBUG_XATTR + plog ("%s: No matching attribute record found\n", __FUNCTION__); +#endif + goto out; + } + + /* Iterate over to all extended attributes for given fileID */ + err = BTIterateRecord(GPtr->calculatedAttributesFCB, kBTreeNextRecord, &iterator, NULL, NULL); + + /* Check only if we did _find_ an attribute record for the current fileID */ + while ((err == noErr) && (attrKey->fileID == curFileID)) { + /* Current record should have attribute bit set */ + curRecordHasAttributes = true; + + /* Current record should have security bit set */ + if (!bcmp(attrKey->attrName, GPtr->securityAttrName, GPtr->securityAttrLen)) { + curRecordHasSecurity = true; + } + + /* Get the next record */ + err = BTIterateRecord(GPtr->calculatedAttributesFCB, kBTreeNextRecord, &iterator, NULL, NULL); + } + + /* Determine if we need to update the catalog record */ + if ((curRecordHasAttributes == false) && (catRecord.hfsPlusFile.flags & kHFSHasAttributesMask)) { + /* If no attribute exists and attributes bit is set, clear it */ + catRecord.hfsPlusFile.flags &= ~kHFSHasAttributesMask; + didRecordChange = true; + } + + if ((curRecordHasSecurity == false) && (catRecord.hfsPlusFile.flags & kHFSHasSecurityMask)) { + /* If no security attribute exists and security bit is set, clear it */ + catRecord.hfsPlusFile.flags &= ~kHFSHasSecurityMask; + didRecordChange = true; + } + + /* If there was any change in catalog record, write it back to disk */ + if (didRecordChange == true) { + err = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &catKey , kNoHint, &catRecord, recordSize, &hint ); + if (err) { +#if DEBUG_XATTR + plog ("%s: Error writing catalog record\n", __FUNCTION__); +#endif + goto out; + } + } + +getnext: + /* Access the next record */ + err = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &catKey, &catRecord, &recordSize, &hint ); + } while (err == noErr); + + err = noErr; + +out: + return err; +} + +/* Function: RepairAttributes + * + * Description: + * This function fixes the extended attributes consistency by + * calling two functions: + * 1. RepairAttributesCheckABT: Traverses attributes Btree and + * checks if each attribute has correct bits set in its corresponding + * catalog record. + * 2. RepairAttributesCheckCBT: Traverses catalog Btree and checks + * if each catalog record that has attribute/security bit set have + * corresponding extended attributes. + * + * Input: 1. GPtr - pointer to global scavenger area + * + * Output: err - Function result + * zero means success + * non-zero means failure + */ +static OSErr RepairAttributes(SGlobPtr GPtr) +{ + OSErr err = noErr; + Boolean isHFSPlus; + + /* Check if the volume is HFS Plus volume */ + isHFSPlus = VolumeObjectIsHFSPlus(); + if (!isHFSPlus) { + goto out; + } + + /* Traverse Attributes BTree and access required records in Catalog BTree */ + err = RepairAttributesCheckABT(GPtr, isHFSPlus); + if (err) { + goto out; + } + + /* Traverse Catalog BTree and access required records in Attributes BTree */ + err = RepairAttributesCheckCBT(GPtr, isHFSPlus); + if (err) { + goto out; + } + +out: + return err; +} + +/*------------------------------------------------------------------------------ + +Function: cmpLongs + +Function: compares two longs. + +Input: *a: pointer to first number + *b: pointer to second number + +Output: <0 if *a < *b + 0 if a == b + >0 if a > b +------------------------------------------------------------------------------*/ + +int cmpLongs ( const void *a, const void *b ) +{ + return( *(long*)a - *(long*)b ); +} + +/* Function: FixOverlappingExtents + * + * Description: Fix overlapping extents problem. The implementation copies all + * the extents existing in overlapping extents to a new location and updates the + * extent record to point to the new extent. At the end of repair, symlinks are + * created with name "fileID filename" to point to the file involved in + * overlapping extents problem. Note that currently only HFS Plus volumes are + * repaired. + * + * PARTIAL SUCCESS: This function handles partial success in the following + * two ways: + * a. The function pre-allocates space for the all the extents. If the + * allocation fails, it proceeds to allocate space for other extents + * instead of returning error. + * b. If moving an extent succeeds and symlink creation fails, the function + * proceeds to another extent. + * If the repair encounters either a or b condition, appropriate message is + * printed at the end of the function. + * If even a single repair operation succeeds (moving of extent), the function + * returns zero. + * + * Current limitations: + * 1. A regular file instead of symlink is created under following conditions: + * a. The volume is plain HFS volume. HFS does not support symlink + * creation. + * b. The path the new symlink points to is greater than PATH_MAX bytes. + * c. The path the new symlink points has some intermediate component + * greater than NAME_MAX bytes. + * 2. Contiguous disk space for every new extent is expected. The extent is + * not broken into multiple extents if contiguous space is not available on the + * disk. + * 3. The current fix for overlapping extent only works for HFS Plus volumes. + * Plain HFS volumes have problem in accessing the catalog record by fileID. + * 4. Plain HFS volumes might have encoding issues with the newly created + * symlink and its data. + * + * Input: + * GPtr - global scavenger pointer + * + * Output: + * returns zero on success/partial success (moving of one extent succeeds), + * non-zero on failure. + */ +static OSErr FixOverlappingExtents(SGlobPtr GPtr) +{ + OSErr err = noErr; + Boolean isHFSPlus; + unsigned int i; + unsigned int numOverlapExtents = 0; + ExtentInfo *extentInfo; + ExtentsTable **extentsTableH = GPtr->overlappedExtents; + + unsigned int status = 0; +#define S_DISKFULL 0x01 /* error due to disk full */ +#define S_MOVEEXTENT 0x02 /* moving extent succeeded */ + + isHFSPlus = VolumeObjectIsHFSPlus(); + if (isHFSPlus == false) { + /* Do not repair plain HFS volumes */ + err = R_RFail; + goto out; + } + + if (extentsTableH == NULL) { + /* No overlapping extents exist */ + goto out; + } + + numOverlapExtents = (**extentsTableH).count; + + /* Optimization - sort the overlap extent list based on blockCount to + * start allocating contiguous space for largest number of blocks first + */ + qsort((**extentsTableH).extentInfo, numOverlapExtents, sizeof(ExtentInfo), + CompareExtentBlockCount); + +#if DEBUG_OVERLAP + /* Print all overlapping extents structure */ + for (i=0; i<numOverlapExtents; i++) { + extentInfo = &((**extentsTableH).extentInfo[i]); + plog ("%d: fileID = %d, startBlock = %d, blockCount = %d\n", i, extentInfo->fileID, extentInfo->startBlock, extentInfo->blockCount); + } +#endif + + /* Pre-allocate free space for all overlapping extents */ + for (i=0; i<numOverlapExtents; i++) { + extentInfo = &((**extentsTableH).extentInfo[i]); + err = AllocateContigBitmapBits (GPtr->calculatedVCB, extentInfo->blockCount, &(extentInfo->newStartBlock)); + if ((err != noErr)) { + /* Not enough disk space */ + status |= S_DISKFULL; +#if DEBUG_OVERLAP + plog ("%s: Not enough disk space to allocate extent for fileID = %d (start=%d, count=%d)\n", __FUNCTION__, extentInfo->fileID, extentInfo->startBlock, extentInfo->blockCount); +#endif + } + } + + /* For every extent info, copy the extent into new location and create symlink */ + for (i=0; i<numOverlapExtents; i++) { + extentInfo = &((**extentsTableH).extentInfo[i]); + + /* Do not repair this extent as no new extent was allocated */ + if (extentInfo->newStartBlock == 0) { + continue; + } + + /* Move extent data to new location */ + err = MoveExtent(GPtr, extentInfo); + if (err != noErr) { + extentInfo->didRepair = false; +#if DEBUG_OVERLAP + plog ("%s: Extent move failed for extent for fileID = %u (old=%u, new=%u, count=%u) (err=%d)\n", __FUNCTION__, extentInfo->fileID, extentInfo->startBlock, extentInfo->newStartBlock, extentInfo->blockCount, err); +#endif + } else { + /* Mark the overlapping extent as repaired */ + extentInfo->didRepair = true; + status |= S_MOVEEXTENT; +#if DEBUG_OVERLAP + plog ("%s: Extent move success for extent for fileID = %u (old=%u, new=%u, count=%u)\n", __FUNCTION__, extentInfo->fileID, extentInfo->startBlock, extentInfo->newStartBlock, extentInfo->blockCount); +#endif + } + + /* Create symlink for the corrupt file */ + err = CreateCorruptFileSymlink(GPtr, extentInfo->fileID); + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: Error in creating symlink for fileID = %d (err=%d)\n", __FUNCTION__, extentInfo->fileID, err); +#endif + } else { +#if DEBUG_OVERLAP + plog ("%s: Created symlink for fileID = %u (old=%u, new=%u, count=%u)\n", __FUNCTION__, extentInfo->fileID, extentInfo->startBlock, extentInfo->newStartBlock, extentInfo->blockCount); +#endif + } + } + +out: + /* Release all blocks used by overlap extents that are repaired */ + for (i=0; i<numOverlapExtents; i++) { + extentInfo = &((**extentsTableH).extentInfo[i]); + if (extentInfo->didRepair == true) { + ReleaseBitmapBits (extentInfo->startBlock, extentInfo->blockCount); + } + } + + /* For all un-repaired extents, + * 1. Release all blocks allocated for new extent. + * 2. Mark all blocks used for the old extent (since the overlapping region + * have been marked free in the for loop above. + */ + for (i=0; i<numOverlapExtents; i++) { + extentInfo = &((**extentsTableH).extentInfo[i]); + if (extentInfo->didRepair == false) { + CaptureBitmapBits (extentInfo->startBlock, extentInfo->blockCount); + + if (extentInfo->newStartBlock != 0) { + ReleaseBitmapBits (extentInfo->newStartBlock, extentInfo->blockCount); + } + } + } + + /* Update the volume free block count since the release and alloc above might + * have worked on same bit multiple times. + */ + UpdateFreeBlockCount (GPtr); + + /* Print correct status messages */ + if (status & S_DISKFULL) { + fsckPrint(GPtr->context, E_DiskFull); + } + + /* If moving of even one extent succeeds, return success */ + if (status & S_MOVEEXTENT) { + err = noErr; + } + + return err; +} /* FixOverlappingExtents */ + +/* Function: CompareExtentBlockCount + * + * Description: Compares the blockCount from two ExtentInfo and return the + * comparison result. (since we have to arrange in descending order) + * + * Input: + * first and second - void pointers to ExtentInfo structure. + * + * Output: + * <0 if first > second + * =0 if first == second + * >0 if first < second + */ +static int CompareExtentBlockCount(const void *first, const void *second) +{ + return (((ExtentInfo *)second)->blockCount - + ((ExtentInfo *)first)->blockCount); +} /* CompareExtentBlockCount */ + +/* Function: MoveExtent + * + * Description: Move data from old extent to new extent and update corresponding + * records. + * 1. Search the extent record for the overlapping extent. + * If the fileID < kHFSFirstUserCatalogNodeID, + * Ignore repair for BadBlock, RepairCatalog, BogusExtent files. + * Search for extent record in volume header. + * Else, + * Search for extent record in catalog BTree. If the extent list does + * not end in catalog record and extent record not found in catalog + * record, search in extents BTree. + * 2. If found, copy disk blocks from old extent to new extent. + * 3. If it succeeds, update extent record with new start block and write back + * to disk. + * This function does not take care to deallocate blocks from old start block. + * + * Input: + * GPtr - Global Scavenger structure pointer + * extentInfo - Current overlapping extent details. + * + * Output: + * err: zero on success, non-zero on failure + * paramErr - Invalid paramter, ex. file ID is less than + * kHFSFirstUserCatalogNodeID. + */ +static OSErr MoveExtent(SGlobPtr GPtr, ExtentInfo *extentInfo) +{ + OSErr err = noErr; + Boolean isHFSPlus; + + CatalogRecord catRecord; + CatalogKey catKey; + HFSPlusExtentKey extentKey; + HFSPlusExtentRecord extentData; + HFSPlusAttrKey attrKey; + HFSPlusAttrRecord attrRecord; + UInt16 recordSize; + + enum locationTypes {volumeHeader, catalogBTree, extentsBTree, attributeBTree} foundLocation; + + UInt32 foundExtentIndex = 0; + Boolean noMoreExtents = true; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + /* Find correct location of this overlapping extent */ + if (extentInfo->forkType == kEAData) { + assert(isHFSPlus == true); + + /* Search extent in attribute btree */ + err = SearchExtentInAttributeBT (GPtr, extentInfo, &attrKey, &attrRecord, + &recordSize, &foundExtentIndex); + if (err != noErr) { + goto out; + } + foundLocation = attributeBTree; + } else { /* kDataFork or kRsrcFork */ + if (extentInfo->fileID < kHFSFirstUserCatalogNodeID) { + /* Ignore these fileIDs in repair. Bad block file blocks should + * never be moved. kHFSRepairCatalogFileID and kHFSBogusExtentFileID + * are temporary runtime files. We need to return error to the caller + * to deallocate disk blocks preallocated during preflight + * to move the overlapping extents. Any other extent that overlaps + * with these extents might have moved successfully, thus repairing + * the problem. + */ + if ((extentInfo->fileID == kHFSBadBlockFileID) || + (extentInfo->fileID == kHFSBogusExtentFileID) || + (extentInfo->fileID == kHFSRepairCatalogFileID)) { + err = paramErr; + goto out; + } + + /* Search for extent record in the volume header */ + err = SearchExtentInVH (GPtr, extentInfo, &foundExtentIndex, &noMoreExtents); + foundLocation = volumeHeader; + } else { + /* Search the extent record from the catalog btree */ + err = SearchExtentInCatalogBT (GPtr, extentInfo, &catKey, &catRecord, + &recordSize, &foundExtentIndex, &noMoreExtents); + foundLocation = catalogBTree; + } + if (err != noErr) { + if (noMoreExtents == false) { + /* search extent in extents overflow btree */ + err = SearchExtentInExtentBT (GPtr, extentInfo, &extentKey, + &extentData, &recordSize, &foundExtentIndex); + foundLocation = extentsBTree; + if (err != noErr) { + DPRINTF (d_error|d_overlap, "%s: No matching extent record found in extents btree for fileID = %d (err=%d)\n", __FUNCTION__, extentInfo->fileID, err); + goto out; + } + } else { + /* No more extents exist for this file */ + DPRINTF (d_error|d_overlap, "%s: No matching extent record found for fileID = %d\n", __FUNCTION__, extentInfo->fileID); + goto out; + } + } + } + /* Copy disk blocks from old extent to new extent */ + err = CopyDiskBlocks(GPtr, extentInfo->startBlock, extentInfo->blockCount, + extentInfo->newStartBlock); + if (err != noErr) { + DPRINTF (d_error|d_overlap, "%s: Error in copying disk blocks for fileID = %d (err=%d)\n", __FUNCTION__, extentInfo->fileID, err); + goto out; + } + + /* Replace the old start block in extent record with new start block */ + if (foundLocation == catalogBTree) { + err = UpdateExtentInCatalogBT(GPtr, extentInfo, &catKey, &catRecord, + &recordSize, foundExtentIndex); + } else if (foundLocation == volumeHeader) { + err = UpdateExtentInVH(GPtr, extentInfo, foundExtentIndex); + } else if (foundLocation == extentsBTree) { + extentData[foundExtentIndex].startBlock = extentInfo->newStartBlock; + err = UpdateExtentRecord(GPtr->calculatedVCB, NULL, &extentKey, extentData, kNoHint); + } else if (foundLocation == attributeBTree) { + err = UpdateExtentInAttributeBT(GPtr, extentInfo, &attrKey, &attrRecord, + &recordSize, foundExtentIndex); + + } + if (err != noErr) { + DPRINTF (d_error|d_overlap, "%s: Error in updating extent record for fileID = %d (err=%d)\n", __FUNCTION__, extentInfo->fileID, err); + goto out; + } + +out: + return err; +} /* MoveExtent */ + +/* Function: CreateCorruptFileSymlink + * + * Description: Create symlink to point to the corrupt files that might + * have data loss due to repair (overlapping extents, bad extents) + * + * The function looks up directory with name "DamagedFiles" in the + * root of the file system being repaired. If it does not exists, it + * creates the directory. The symlink to damaged file is created in this + * directory. + * + * If fileID >= kHFSFirstUserCatalogNodeID, + * Lookup the filename and path to the file based on file ID. Create the + * new file name as "fileID filename" and data as the relative path of the file + * from the root of the volume. + * If either + * the volume is plain HFS, or + * the length of the path pointed by data is greater than PATH_MAX, or + * the length of any intermediate path component is greater than NAME_MAX, + * Create a plain file with given data. + * Else + * Create a symlink. + * Else + * Find the name of file based on ID (ie. Catalog BTree, etc), and create plain + * regular file with name "fileID filename" and data as "System File: + * filename". + * + * Input: + * 1. GPtr - global scavenger structure pointer. + * 2. fileID - fileID of the source for creating symlink + * + * Output: + * returns zero on success, non-zero on failure. + * memFullErr - Not enough memory + */ +static OSErr CreateCorruptFileSymlink(SGlobPtr GPtr, UInt32 fileID) +{ + OSErr err = noErr; + Boolean isHFSPlus; + char *filename = NULL; + unsigned int filenamelen; + char *data = NULL; + unsigned int datalen; + unsigned int filenameoffset; + unsigned int dataoffset; + UInt32 damagedDirID; + UInt16 status; + UInt16 fileType; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + /* Lookup and create, if required, the DamagedFiles folder */ + damagedDirID = CreateDirByName(GPtr, (u_char *)"DamagedFiles", kHFSRootFolderID); + if (damagedDirID == 0) { + goto out; + } + + /* Allocate (kHFSPlusMaxFileNameChars * 4) for unicode - utf8 conversion */ + filenamelen = kHFSPlusMaxFileNameChars * 4; + filename = malloc(filenamelen); + if (!filename) { + err = memFullErr; + goto out; + } + + /* Allocate (PATH_MAX * 4) instead of PATH_MAX for unicode - utf8 conversion */ + datalen = PATH_MAX * 4; + data = malloc(datalen); + if (!data) { + err = memFullErr; + goto out; + } + + /* Find filename, path for fileID >= 16 and determine new fileType */ + if (fileID >= kHFSFirstUserCatalogNodeID) { + char *name; + char *path; + + /* construct symlink data with .. prefix */ + dataoffset = sprintf (data, ".."); + path = data + dataoffset; + datalen -= dataoffset; + + /* construct new filename prefix with fileID<space> */ + filenameoffset = sprintf (filename, "%08x ", fileID); + name = filename + filenameoffset; + filenamelen -= filenameoffset; + + /* find file name and path (data for symlink) for fileID */ + err = GetFileNamePathByID(GPtr, fileID, path, &datalen, + name, &filenamelen, &status); + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: Error in getting name/path for fileID = %d (err=%d)\n", __FUNCTION__, fileID, err); +#endif + goto out; + } + + /* update length of path and filename */ + filenamelen += filenameoffset; + datalen += dataoffset; + + /* If + * (1) the volume is plain HFS volume, or + * (2) any intermediate component in path was more than NAME_MAX bytes, or + * (3) the entire path was greater than PATH_MAX bytes + * then create regular file + * else create symlink. + */ + if (!isHFSPlus || (status & FPATH_BIGNAME) || (datalen > PATH_MAX)) { + /* create file */ + fileType = S_IFREG; + } else { + /* create symlink */ + fileType = S_IFLNK; + } + } else { + /* for fileID < 16, create regular file */ + fileType = S_IFREG; + + /* construct the name of the file */ + filenameoffset = sprintf (filename, "%08x ", fileID); + filenamelen -= filenameoffset; + err = GetSystemFileName (fileID, (filename + filenameoffset), &filenamelen); + filenamelen += filenameoffset; + + /* construct the data of the file */ + dataoffset = sprintf (data, "System File: "); + datalen -= dataoffset; + err = GetSystemFileName (fileID, (data + dataoffset), &datalen); + datalen += dataoffset; + } + + /* Create new file */ + err = CreateFileByName (GPtr, damagedDirID, fileType, (u_char *)filename, + filenamelen, (u_char *)data, datalen); + /* Mask error if file already exists */ + if (err == EEXIST) { + err = noErr; + } + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: Error in creating fileType = %d for fileID = %d (err=%d)\n", __FUNCTION__, fileType, fileID, err); +#endif + goto out; + } + +out: + if (err) { + if ((GPtr->PrintStat & S_SymlinkCreate) == 0) { + fsckPrint(GPtr->context, E_SymlinkCreate); + GPtr->PrintStat|= S_SymlinkCreate; + } + } else { + if ((GPtr->PrintStat & S_DamagedDir) == 0) { + fsckPrint(GPtr->context, fsckCorruptFilesDirectory, "DamagedFiles"); + GPtr->PrintStat|= S_DamagedDir; + } + } + + if (data) { + free (data); + } + if (filename) { + free (filename); + } + + return err; +} /* CreateCorruptFileSymlink */ + +/* Function: SearchExtentInAttributeBT + * + * Description: Search extent with given information (fileID, attribute name, + * startBlock, blockCount) in the attribute BTree. + * + * Input: + * 1. GPtr - global scavenger structure pointer. + * 2. extentInfo - Information about extent to be searched. + * + * Output: + * Returns zero on success, fnfErr on failure. + * 1. *attrKey - Attribute key for given fileID and attribute name, if found. + * 2. *attrRecord - Attribute record for given fileID and attribute name, if found. + * 3. *recordSize - Size of the record being returned. + * 4. *foundExtentIndex - Index in extent record which matches the input data. + */ +static OSErr SearchExtentInAttributeBT(SGlobPtr GPtr, ExtentInfo *extentInfo, + HFSPlusAttrKey *attrKey, HFSPlusAttrRecord *attrRecord, + UInt16 *recordSize, UInt32 *foundExtentIndex) +{ + OSErr result = fnfErr; + BTreeIterator iterator; + FSBufferDescriptor btRecord; + HFSPlusAttrKey *key; + Boolean noMoreExtents; + unsigned char *attrname = NULL; + size_t attrnamelen; + + assert((extentInfo->attrname != NULL)); + + attrname = malloc (XATTR_MAXNAMELEN + 1); + if (!attrname) { + result = memFullErr; + goto out; + } + + /* Initialize the iterator, attribute record buffer, and attribute key */ + ClearMemory(&iterator, sizeof(BTreeIterator)); + key = (HFSPlusAttrKey *)&iterator.key; + attrnamelen = strlen(extentInfo->attrname); + BuildAttributeKey(extentInfo->fileID, 0, (unsigned char *)extentInfo->attrname, attrnamelen, key); + + btRecord.bufferAddress = attrRecord; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(HFSPlusAttrRecord); + + /* Search for the attribute record + * Warning: Attribute record of type kHFSPlusAttrInlineData may be + * truncated on read! (4425232). This function only uses recordType + * field from inline attribute record. + */ + result = BTSearchRecord(GPtr->calculatedAttributesFCB, &iterator, + kInvalidMRUCacheKey, &btRecord, recordSize, &iterator); + if (result) { + DPRINTF (d_error|d_overlap, "%s: Error finding attribute record (err=%d) for fileID = %d, attrname = %d\n", __FUNCTION__, result, extentInfo->fileID, extentInfo->attrname); + goto out; + } + + /* Search the attribute record for overlapping extent. If found, return + * success. If not, iterate to the next record. If it is a valid + * attribute extent record belonging to the same attribute, search + * for the desired extent. + */ + while (1) { + if (attrRecord->recordType == kHFSPlusAttrForkData) { + result = FindExtentInExtentRec(true, extentInfo->startBlock, + extentInfo->blockCount, attrRecord->forkData.theFork.extents, + foundExtentIndex, &noMoreExtents); + if ((result == noErr) || (noMoreExtents == true)) { + goto out; + } + } else if (attrRecord->recordType == kHFSPlusAttrExtents) { + result = FindExtentInExtentRec(true, extentInfo->startBlock, + extentInfo->blockCount, attrRecord->overflowExtents.extents, + foundExtentIndex, &noMoreExtents); + if ((result == noErr) || (noMoreExtents == true)) { + goto out; + } + } else { + /* Invalid attribute record. This function should not find any + * attribute record except forkData and AttrExtents. + */ + result = fnfErr; + goto out; + } + + /* Iterate to the next record + * Warning: Attribute record of type kHFSPlusAttrInlineData may be + * truncated on read! (4425232). This function only uses recordType + * field from inline attribute record. + */ + result = BTIterateRecord(GPtr->calculatedAttributesFCB, kBTreeNextRecord, + &iterator, &btRecord, recordSize); + if (result) { + goto out; + } + + (void) utf_encodestr(key->attrName, key->attrNameLen * 2, attrname, &attrnamelen, XATTR_MAXNAMELEN + 1); + attrname[attrnamelen] = '\0'; + + /* Check if the attribute record belongs to the same attribute */ + if ((key->fileID != extentInfo->fileID) || + (strcmp((char *)attrname, extentInfo->attrname))) { + /* The attribute record belongs to another attribute */ + result = fnfErr; + goto out; + } + } + +out: + /* Copy the correct key to the caller */ + if (result == noErr) { + CopyMemory(key, attrKey, sizeof(HFSPlusAttrKey)); + } + + if (attrname != NULL) { + free (attrname); + } + + return (result); +} + +/* Function: UpdateExtentInAttributeBT + * + * Description: Update extent record with given information (fileID, startBlock, + * blockCount) in attribute BTree. + * + * Input: + * 1. GPtr - global scavenger structure pointer. + * 2. extentInfo - Information about extent to be searched. + * 3. *attrKey - Attribute key for record to update. + * 4. *attrRecord - Attribute record to update. + * 5. *recordSize - Size of the record. + * 6. foundExtentIndex - Index in extent record to update. + * + * Output: + * Returns zero on success, non-zero on failure. + */ +static OSErr UpdateExtentInAttributeBT (SGlobPtr GPtr, ExtentInfo *extentInfo, + HFSPlusAttrKey *attrKey, HFSPlusAttrRecord *attrRecord, + UInt16 *recordSize, UInt32 foundInExtentIndex) +{ + OSErr err; + UInt32 foundHint; + + assert ((attrRecord->recordType == kHFSPlusAttrForkData) || + (attrRecord->recordType == kHFSPlusAttrExtents)); + + /* Update the new start block count in the extent */ + if (attrRecord->recordType == kHFSPlusAttrForkData) { + attrRecord->forkData.theFork.extents[foundInExtentIndex].startBlock = + extentInfo->newStartBlock; + } else if (attrRecord->recordType == kHFSPlusAttrExtents) { + attrRecord->overflowExtents.extents[foundInExtentIndex].startBlock = + extentInfo->newStartBlock; + } + + /* Replace the attribute record. + * Warning: Attribute record of type kHFSPlusAttrInlineData may be + * truncated on read! (4425232). + */ + err = ReplaceBTreeRecord (GPtr->calculatedAttributesFCB, attrKey, kNoHint, + attrRecord, *recordSize, &foundHint); + + return (err); +} + +/* Function: SearchExtentInVH + * + * Description: Search extent with given information (fileID, startBlock, + * blockCount) in volume header. + * + * Input: + * 1. GPtr - global scavenger structure pointer. + * 2. extentInfo - Information about extent to be searched. + * + * Output: + * Returns zero on success, fnfErr on failure. + * 1. *foundExtentIndex - Index in extent record which matches the input data. + * 2. *noMoreExtents - Indicates that no more extents will exist for this + * fileID in extents BTree. + */ +static OSErr SearchExtentInVH(SGlobPtr GPtr, ExtentInfo *extentInfo, UInt32 *foundExtentIndex, Boolean *noMoreExtents) +{ + OSErr err = fnfErr; + Boolean isHFSPlus; + SFCB *fcb = NULL; + + isHFSPlus = VolumeObjectIsHFSPlus(); + *noMoreExtents = true; + + /* Find correct FCB structure */ + switch (extentInfo->fileID) { + case kHFSExtentsFileID: + fcb = GPtr->calculatedVCB->vcbExtentsFile; + break; + + case kHFSCatalogFileID: + fcb = GPtr->calculatedVCB->vcbCatalogFile; + break; + + case kHFSAllocationFileID: + fcb = GPtr->calculatedVCB->vcbAllocationFile; + break; + + case kHFSStartupFileID: + fcb = GPtr->calculatedVCB->vcbStartupFile; + break; + + case kHFSAttributesFileID: + fcb = GPtr->calculatedVCB->vcbAttributesFile; + break; + }; + + /* If extent found, find correct extent index */ + if (fcb != NULL) { + if (isHFSPlus) { + err = FindExtentInExtentRec(isHFSPlus, extentInfo->startBlock, + extentInfo->blockCount, fcb->fcbExtents32, + foundExtentIndex, noMoreExtents); + } else { + err = FindExtentInExtentRec(isHFSPlus, extentInfo->startBlock, + extentInfo->blockCount, + (*(HFSPlusExtentRecord *)fcb->fcbExtents16), + foundExtentIndex, noMoreExtents); + } + } + return err; +} /* SearchExtentInVH */ + +/* Function: UpdateExtentInVH + * + * Description: Update the extent record for given fileID and index in the + * volume header with new start block. + * + * Input: + * 1. GPtr - global scavenger structure pointer. + * 2. extentInfo - Information about extent to be searched. + * 3. foundExtentIndex - Index in extent record to update. + * + * Output: + * Returns zero on success, fnfErr on failure. This function will fail an + * incorrect fileID is passed. + */ +static OSErr UpdateExtentInVH (SGlobPtr GPtr, ExtentInfo *extentInfo, UInt32 foundExtentIndex) +{ + OSErr err = fnfErr; + Boolean isHFSPlus; + SFCB *fcb = NULL; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + /* Find correct FCB structure */ + switch (extentInfo->fileID) { + case kHFSExtentsFileID: + fcb = GPtr->calculatedVCB->vcbExtentsFile; + break; + + case kHFSCatalogFileID: + fcb = GPtr->calculatedVCB->vcbCatalogFile; + break; + + case kHFSAllocationFileID: + fcb = GPtr->calculatedVCB->vcbAllocationFile; + break; + + case kHFSStartupFileID: + fcb = GPtr->calculatedVCB->vcbStartupFile; + break; + + case kHFSAttributesFileID: + fcb = GPtr->calculatedVCB->vcbAttributesFile; + break; + }; + + /* If extent found, find correct extent index */ + if (fcb != NULL) { + if (isHFSPlus) { + fcb->fcbExtents32[foundExtentIndex].startBlock = extentInfo->newStartBlock; + } else { + fcb->fcbExtents16[foundExtentIndex].startBlock = extentInfo->newStartBlock; + } + MarkVCBDirty(GPtr->calculatedVCB); + err = noErr; + } + return err; +} /* UpdateExtentInVH */ + +/* Function: SearchExtentInCatalogBT + * + * Description: Search extent with given information (fileID, startBlock, + * blockCount) in catalog BTree. + * + * Input: + * 1. GPtr - global scavenger structure pointer. + * 2. extentInfo - Information about extent to be searched. + * + * Output: + * Returns zero on success, non-zero on failure. + * 1. *catKey - Catalog key for given fileID, if found. + * 2. *catRecord - Catalog record for given fileID, if found. + * 3. *recordSize - Size of the record being returned. + * 4. *foundExtentIndex - Index in extent record which matches the input data. + * 5. *noMoreExtents - Indicates that no more extents will exist for this + * fileID in extents BTree. + */ +static OSErr SearchExtentInCatalogBT(SGlobPtr GPtr, ExtentInfo *extentInfo, CatalogKey *catKey, CatalogRecord *catRecord, UInt16 *recordSize, UInt32 *foundExtentIndex, Boolean *noMoreExtents) +{ + OSErr err; + Boolean isHFSPlus; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + /* Search catalog btree for this file ID */ + err = GetCatalogRecord(GPtr, extentInfo->fileID, isHFSPlus, catKey, catRecord, + recordSize); + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: No matching catalog record found for fileID = %d (err=%d)\n", __FUNCTION__, extentInfo->fileID, err); +#endif + goto out; + } + + if (isHFSPlus) { + /* HFS Plus */ + if (extentInfo->forkType == kDataFork) { + /* data fork */ + err = FindExtentInExtentRec(isHFSPlus, extentInfo->startBlock, + extentInfo->blockCount, + catRecord->hfsPlusFile.dataFork.extents, + foundExtentIndex, noMoreExtents); + } else { + /* resource fork */ + err = FindExtentInExtentRec(isHFSPlus, extentInfo->startBlock, + extentInfo->blockCount, + catRecord->hfsPlusFile.resourceFork.extents, + foundExtentIndex, noMoreExtents); + } + } else { + /* HFS */ + if (extentInfo->forkType == kDataFork) { + /* data fork */ + err = FindExtentInExtentRec(isHFSPlus, extentInfo->startBlock, + extentInfo->blockCount, + (*(HFSPlusExtentRecord *)catRecord->hfsFile.dataExtents), + foundExtentIndex, noMoreExtents); + } else { + /* resource fork */ + err = FindExtentInExtentRec(isHFSPlus, extentInfo->startBlock, + extentInfo->blockCount, + (*(HFSPlusExtentRecord *)catRecord->hfsFile.rsrcExtents), + foundExtentIndex, noMoreExtents); + } + } + +out: + return err; +} /* SearchExtentInCatalogBT */ + +/* Function: UpdateExtentInCatalogBT + * + * Description: Update extent record with given information (fileID, startBlock, + * blockCount) in catalog BTree. + * + * Input: + * 1. GPtr - global scavenger structure pointer. + * 2. extentInfo - Information about extent to be searched. + * 3. *catKey - Catalog key for record to update. + * 4. *catRecord - Catalog record to update. + * 5. *recordSize - Size of the record. + * 6. foundExtentIndex - Index in extent record to update. + * + * Output: + * Returns zero on success, non-zero on failure. + */ +static OSErr UpdateExtentInCatalogBT (SGlobPtr GPtr, ExtentInfo *extentInfo, CatalogKey *catKey, CatalogRecord *catRecord, UInt16 *recordSize, UInt32 foundInExtentIndex) +{ + OSErr err; + Boolean isHFSPlus; + UInt32 foundHint; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + /* Modify the catalog record */ + if (isHFSPlus) { + if (extentInfo->forkType == kDataFork) { + catRecord->hfsPlusFile.dataFork.extents[foundInExtentIndex].startBlock = extentInfo->newStartBlock; + } else { + catRecord->hfsPlusFile.resourceFork.extents[foundInExtentIndex].startBlock = extentInfo->newStartBlock; + } + } else { + if (extentInfo->forkType == kDataFork) { + catRecord->hfsFile.dataExtents[foundInExtentIndex].startBlock = extentInfo->newStartBlock; + } else { + catRecord->hfsFile.rsrcExtents[foundInExtentIndex].startBlock = extentInfo->newStartBlock; + } + } + + /* Replace the catalog record */ + err = ReplaceBTreeRecord (GPtr->calculatedCatalogFCB, catKey, kNoHint, + catRecord, *recordSize, &foundHint); + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: Error in replacing catalog record for fileID = %d (err=%d)\n", __FUNCTION__, extentInfo->fileID, err); +#endif + } + return err; +} /* UpdateExtentInCatalogBT */ + +/* Function: SearchExtentInExtentBT + * + * Description: Search extent with given information (fileID, startBlock, + * blockCount) in Extent BTree. + * + * Input: + * 1. GPtr - global scavenger structure pointer. + * 2. extentInfo - Information about extent to be searched. + * + * Output: + * Returns zero on success, non-zero on failure. + * fnfErr - desired extent record was not found. + * 1. *extentKey - Extent key, if found. + * 2. *extentRecord - Extent record, if found. + * 3. *recordSize - Size of the record being returned. + * 4. *foundExtentIndex - Index in extent record which matches the input data. + */ +static OSErr SearchExtentInExtentBT(SGlobPtr GPtr, ExtentInfo *extentInfo, HFSPlusExtentKey *extentKey, HFSPlusExtentRecord *extentRecord, UInt16 *recordSize, UInt32 *foundExtentIndex) +{ + OSErr err = noErr; + Boolean isHFSPlus; + Boolean noMoreExtents = true; + UInt32 hint; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + /* set up extent key */ + BuildExtentKey (isHFSPlus, extentInfo->forkType, extentInfo->fileID, 0, extentKey); + err = SearchBTreeRecord (GPtr->calculatedExtentsFCB, extentKey, kNoHint, + extentKey, extentRecord, recordSize, &hint); + if ((err != noErr) && (err != btNotFound)) { +#if DEBUG_OVERLAP + plog ("%s: Error on searching first record for fileID = %d in Extents Btree (err=%d)\n", __FUNCTION__, extentInfo->fileID, err); +#endif + goto out; + } + + if (err == btNotFound) + { + /* Position to the first record for the given fileID */ + err = GetBTreeRecord (GPtr->calculatedExtentsFCB, 1, extentKey, + extentRecord, recordSize, &hint); + } + + while (err == noErr) + { + /* Break out if we see different fileID, forkType in the BTree */ + if (isHFSPlus) { + if ((extentKey->fileID != extentInfo->fileID) || + (extentKey->forkType != extentInfo->forkType)) { + err = fnfErr; + break; + } + } else { + if ((((HFSExtentKey *)extentKey)->fileID != extentInfo->fileID) || + (((HFSExtentKey *)extentKey)->forkType != extentInfo->forkType)) { + err = fnfErr; + break; + } + } + + /* Check the extents record for corresponding startBlock, blockCount */ + err = FindExtentInExtentRec(isHFSPlus, extentInfo->startBlock, + extentInfo->blockCount, *extentRecord, + foundExtentIndex, &noMoreExtents); + if (err == noErr) { + break; + } + if (noMoreExtents == true) { + err = fnfErr; + break; + } + + /* Try next record for this fileID and forkType */ + err = GetBTreeRecord (GPtr->calculatedExtentsFCB, 1, extentKey, + extentRecord, recordSize, &hint); + } + +out: + return err; +} /* SearchExtentInExtentBT */ + +/* Function: FindExtentInExtentRec + * + * Description: Traverse the given extent record (size based on if the volume is + * HFS or HFSPlus volume) and find the index location if the given startBlock + * and blockCount match. + * + * Input: + * 1. isHFSPlus - If the volume is plain HFS or HFS Plus. + * 2. startBlock - startBlock to be searched in extent record. + * 3. blockCount - blockCount to be searched in extent record. + * 4. extentData - Extent Record to be searched. + * + * Output: + * Returns zero if the match is found, else fnfErr on failure. + * 1. *foundExtentIndex - Index in extent record which matches the input data. + * 2. *noMoreExtents - Indicates that no more extents exist after this extent + * record. + */ +static OSErr FindExtentInExtentRec (Boolean isHFSPlus, UInt32 startBlock, UInt32 blockCount, const HFSPlusExtentRecord extentData, UInt32 *foundExtentIndex, Boolean *noMoreExtents) +{ + OSErr err = noErr; + UInt32 numOfExtents; + Boolean foundExtent; + int i; + + foundExtent = false; + *noMoreExtents = false; + *foundExtentIndex = 0; + + if (isHFSPlus) { + numOfExtents = kHFSPlusExtentDensity; + } else { + numOfExtents = kHFSExtentDensity; + } + + for (i=0; i<numOfExtents; i++) { + if (extentData[i].blockCount == 0) { + /* no more extents left to check */ + *noMoreExtents = true; + break; + } + if ((startBlock == extentData[i].startBlock) && + (blockCount == extentData[i].blockCount)) { + foundExtent = true; + *foundExtentIndex = i; + break; + } + } + + if (foundExtent == false) { + err = fnfErr; + } + + return err; +} /* FindExtentInExtentRec */ + +/* Function: GetSystemFileName + * + * Description: Return the name of the system file based on fileID + * + * Input: + * 1. fileID - fileID whose name is to be returned. + * 2. *filenamelen - length of filename buffer. + * + * Output: + * 1. *filename - filename, is limited by the length of filename buffer passed + * in *filenamelen. + * 2. *filenamelen - length of the filename + * Always returns zero. + */ +OSErr GetSystemFileName(UInt32 fileID, char *filename, unsigned int *filenamelen) +{ + OSErr err = noErr; + unsigned int len; + + if (filename) { + len = *filenamelen - 1; + switch (fileID) { + case kHFSExtentsFileID: + strncpy (filename, "Extents Overflow BTree", len); + break; + + case kHFSCatalogFileID: + strncpy (filename, "Catalog BTree", len); + break; + + case kHFSAllocationFileID: + strncpy (filename, "Allocation File", len); + break; + + case kHFSStartupFileID: + strncpy (filename, "Startup File", len); + break; + + case kHFSAttributesFileID: + strncpy (filename, "Attributes BTree", len); + break; + + case kHFSBadBlockFileID: + strncpy (filename, "Bad Allocation File", len); + break; + + case kHFSRepairCatalogFileID: + strncpy (filename, "Repair Catalog File", len); + break; + + case kHFSBogusExtentFileID: + strncpy (filename, "Bogus Extents File", len); + break; + + default: + strncpy (filename, "Unknown File", len); + break; + }; + filename[len] = '\0'; + *filenamelen = strlen (filename); + } + return err; +} + +/* structure to store the intermediate path components during BTree traversal. + * This is used as a LIFO linked list + */ +struct fsPathString +{ + char *name; + unsigned int namelen; + struct fsPathString *childPtr; +}; + +/* Function: GetFileNamePathByID + * + * Description: Return the file/directory name and/or full path by ID. The + * length of the strings returned is limited by string lengths passed as + * parameters. + * The function lookups catalog thread record for given fileID and its parents + * until it reaches the Root Folder. + * + * Note: + * 1. The path returned currently does not return mangled names. + * 2. Either one or both of fullPath and fileName can be NULL. + * 3. fullPath and fileName are returned as NULL-terminated UTF8 strings. + * 4. Returns error if fileID < kHFSFirstUserCatalogID. + * + * Input: + * 1. GPtr - global scavenger structure pointer + * 2. fileID - fileID for the target file/directory for finding the path + * 3. fullPathLen - size of array to return full path + * 4. fileNameLen - size of array to return file name + * + * Output: + * Return value: zero on success, non-zero on failure + * memFullErr - Not enough memory + * paramErr - Invalid paramter + * + * The data in *fileNameLen and *fullPathLen is undefined on error. + * + * 1. fullPath - If fullPath is non-NULL, full path of file/directory is + * returned (size limited by fullPathLen) + * 2. *fullPathLen - length of fullPath returned. + * 3. fileName - If fileName is non-NULL, file name of fileID is returned (size + * limited by fileNameLen). + * 4. *fileNameLen - length of fileName returned. + * 5. *status - status of operation, any of the following bits can be set + * (defined in dfalib/Scavenger.h). + * FNAME_BUF2SMALL - filename buffer is too small. + * FNAME_BIGNAME - filename is more than NAME_MAX bytes. + * FPATH_BUF2SMALL - path buffer is too small. + * FPATH_BIGNAME - one or more intermediate path component is greater + * than NAME_MAX bytes. + * F_RESERVE_FILEID- fileID is less than kHFSFirstUserCatalogNodeID. + */ +OSErr GetFileNamePathByID(SGlobPtr GPtr, UInt32 fileID, char *fullPath, unsigned int *fullPathLen, char *fileName, unsigned int *fileNameLen, u_int16_t *status) +{ + OSErr err = noErr; + Boolean isHFSPlus; + UInt16 recordSize; + UInt16 curStatus = 0; + UInt32 hint; + CatalogKey catKey; + CatalogRecord catRecord; + struct fsPathString *listHeadPtr = NULL; + struct fsPathString *listTailPtr = NULL; + struct fsPathString *curPtr = NULL; + u_char *filename = NULL; + size_t namelen; + + if (!fullPath && !fileName) { + goto out; + } + + if (fileID < kHFSFirstUserCatalogNodeID) { + curStatus = F_RESERVE_FILEID; + err = paramErr; + goto out; + } + + isHFSPlus = VolumeObjectIsHFSPlus(); + + if (isHFSPlus) { + filename = malloc(kHFSPlusMaxFileNameChars * 3 + 1); + } else { + filename = malloc(kHFSMaxFileNameChars + 1); + } + if (!filename) { + err = memFullErr; +#if DEBUG_OVERLAP + plog ("%s: Not enough memory (err=%d)\n", __FUNCTION__, err); +#endif + goto out; + } + + while (fileID != kHFSRootFolderID) { + /* lookup for thread record for this fileID */ + BuildCatalogKey(fileID, NULL, isHFSPlus, &catKey); + err = SearchBTreeRecord(GPtr->calculatedCatalogFCB, &catKey, kNoHint, + &catKey, &catRecord, &recordSize, &hint); + if (err) { +#if DEBUG_OVERLAP + plog ("%s: Error finding thread record for fileID = %d (err=%d)\n", __FUNCTION__, fileID, err); +#endif + goto out; + } + + /* Check if this is indeed a thread record */ + if ((catRecord.hfsPlusThread.recordType != kHFSPlusFileThreadRecord) && + (catRecord.hfsPlusThread.recordType != kHFSPlusFolderThreadRecord) && + (catRecord.hfsThread.recordType != kHFSFileThreadRecord) && + (catRecord.hfsThread.recordType != kHFSFolderThreadRecord)) { + err = paramErr; +#if DEBUG_OVERLAP + plog ("%s: Error finding valid thread record for fileID = %d\n", __FUNCTION__, fileID); +#endif + goto out; + } + + /* Convert the name string to utf8 */ + if (isHFSPlus) { + (void) utf_encodestr(catRecord.hfsPlusThread.nodeName.unicode, + catRecord.hfsPlusThread.nodeName.length * 2, + filename, &namelen, kHFSPlusMaxFileNameChars * 3 + 1); + } else { + namelen = catRecord.hfsThread.nodeName[0]; + memcpy (filename, catKey.hfs.nodeName, namelen); + } + + /* Store the path name in LIFO linked list */ + curPtr = malloc(sizeof(struct fsPathString)); + if (!curPtr) { + err = memFullErr; +#if DEBUG_OVERLAP + plog ("%s: Not enough memory (err=%d)\n", __FUNCTION__, err); +#endif + goto out; + } + + /* Do not NULL terminate the string */ + curPtr->namelen = namelen; + curPtr->name = malloc(namelen); + if (!curPtr->name) { + err = memFullErr; +#if DEBUG_OVERLAP + plog ("%s: Not enough memory (err=%d)\n", __FUNCTION__, err); +#endif + } + memcpy (curPtr->name, filename, namelen); + curPtr->childPtr = listHeadPtr; + listHeadPtr = curPtr; + if (listTailPtr == NULL) { + listTailPtr = curPtr; + } + + /* lookup for parentID */ + if (isHFSPlus) { + fileID = catRecord.hfsPlusThread.parentID; + } else { + fileID = catRecord.hfsThread.parentID; + } + + /* no need to find entire path, bail out */ + if (fullPath == NULL) { + break; + } + } + + /* return the name of the file/directory */ + if (fileName) { + /* Do not overflow the buffer length passed */ + if (*fileNameLen < (listTailPtr->namelen + 1)) { + *fileNameLen = *fileNameLen - 1; + curStatus |= FNAME_BUF2SMALL; + } else { + *fileNameLen = listTailPtr->namelen; + } + if (*fileNameLen > NAME_MAX) { + curStatus |= FNAME_BIGNAME; + } + memcpy (fileName, listTailPtr->name, *fileNameLen); + fileName[*fileNameLen] = '\0'; + } + + /* return the full path of the file/directory */ + if (fullPath) { + /* Do not overflow the buffer length passed and reserve last byte for NULL termination */ + unsigned int bytesRemain = *fullPathLen - 1; + + *fullPathLen = 0; + while (listHeadPtr != NULL) { + if (bytesRemain == 0) { + break; + } + memcpy ((fullPath + *fullPathLen), "/", 1); + *fullPathLen += 1; + bytesRemain--; + + if (bytesRemain == 0) { + break; + } + if (bytesRemain < listHeadPtr->namelen) { + namelen = bytesRemain; + curStatus |= FPATH_BUF2SMALL; + } else { + namelen = listHeadPtr->namelen; + } + if (namelen > NAME_MAX) { + curStatus |= FPATH_BIGNAME; + } + memcpy ((fullPath + *fullPathLen), listHeadPtr->name, namelen); + *fullPathLen += namelen; + bytesRemain -= namelen; + + curPtr = listHeadPtr; + listHeadPtr = listHeadPtr->childPtr; + free(curPtr->name); + free(curPtr); + } + + fullPath[*fullPathLen] = '\0'; + } + + err = noErr; + +out: + if (status) { + *status = curStatus; + } + + /* free any remaining allocated memory */ + while (listHeadPtr != NULL) { + curPtr = listHeadPtr; + listHeadPtr = listHeadPtr->childPtr; + if (curPtr->name) { + free (curPtr->name); + } + free (curPtr); + } + if (filename) { + free (filename); + } + + return err; +} /* GetFileNamePathByID */ + +/* Function: CopyDiskBlocks + * + * Description: Copy data from source extent to destination extent + * for blockCount on the disk. + * + * Input: + * 1. GPtr - pointer to global scavenger structure. + * 2. startAllocationBlock - startBlock for old extent + * 3. blockCount - total blocks to copy + * 4. newStartAllocationBlock - startBlock for new extent + * + * Output: + * err, zero on success, non-zero on failure. + */ +OSErr CopyDiskBlocks(SGlobPtr GPtr, const UInt32 startAllocationBlock, const UInt32 blockCount, const UInt32 newStartAllocationBlock ) +{ + OSErr err = noErr; + SVCB *vcb; + uint64_t old_offset; + uint64_t new_offset; + uint32_t sectorsPerBlock; + + vcb = GPtr->calculatedVCB; + sectorsPerBlock = vcb->vcbBlockSize / Blk_Size; + + old_offset = (vcb->vcbAlBlSt + (sectorsPerBlock * startAllocationBlock)) << Log2BlkLo; + new_offset = (vcb->vcbAlBlSt + (sectorsPerBlock * newStartAllocationBlock)) << Log2BlkLo; + + err = CacheCopyDiskBlocks (vcb->vcbBlockCache, old_offset, new_offset, + blockCount * vcb->vcbBlockSize); + return err; +} /* CopyDiskBlocks */ + +/* Function: WriteBufferToDisk + * + * Description: Write given buffer data to disk blocks. + * If the length of the buffer is not a multiple of allocation block size, + * the disk is filled with zero from the length of buffer upto the + * end of allocation blocks (specified by blockCount). + * + * Input: + * 1. GPtr - global scavenger structure pointer + * 2. startBlock - starting block number for writing data. + * 3. blockCount - total number of contiguous blocks to be written + * 4. buffer - data buffer to be written to disk + * 5. bufLen - length of data buffer to be written to disk. + * + * Output: + * returns zero on success, non-zero on failure. + */ +OSErr WriteBufferToDisk(SGlobPtr GPtr, UInt32 startBlock, UInt32 blockCount, u_char *buffer, int bufLen) +{ + OSErr err = noErr; + SVCB *vcb; + uint64_t offset; + uint32_t write_len; + + vcb = GPtr->calculatedVCB; + + /* Calculate offset and length */ + offset = (vcb->vcbAlBlSt + ((vcb->vcbBlockSize / Blk_Size) * startBlock)) << Log2BlkLo; + write_len = blockCount * vcb->vcbBlockSize; + + /* Write buffer to disk */ + err = CacheWriteBufferToDisk (vcb->vcbBlockCache, offset, write_len, buffer, bufLen); + + return err; +} /* WriteBufferToDisk */ + +// 2210409, in System 8.1, moving file or folder would cause HFS+ thread records to be +// 520 bytes in size. We only shrink the threads if other repairs are needed. +static OSErr FixBloatedThreadRecords( SGlob *GPtr ) +{ + CatalogRecord record; + CatalogKey foundKey; + UInt32 hint; + UInt16 recordSize; + SInt16 i = 0; + OSErr err; + SInt16 selCode = 0x8001; // Start with 1st record + + err = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &foundKey, &record, &recordSize, &hint ); + ReturnIfError( err ); + + selCode = 1; // Get next record from now on + + do + { + if ( ++i > 10 ) { (void) CheckForStop( GPtr ); i = 0; } // Spin the cursor every 10 entries + + if ( (recordSize == sizeof(HFSPlusCatalogThread)) && ((record.recordType == kHFSPlusFolderThreadRecord) || (record.recordType == kHFSPlusFileThreadRecord)) ) + { + // HFS Plus has varaible sized threads so adjust to actual length + recordSize -= ( sizeof(record.hfsPlusThread.nodeName.unicode) - (record.hfsPlusThread.nodeName.length * sizeof(UniChar)) ); + + err = ReplaceBTreeRecord( GPtr->calculatedCatalogFCB, &foundKey, hint, &record, recordSize, &hint ); + ReturnIfError( err ); + } + + err = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &foundKey, &record, &recordSize, &hint ); + } while ( err == noErr ); + + if ( err == btNotFound ) + err = noErr; + + return( err ); +} + + +static OSErr +FixMissingThreadRecords( SGlob *GPtr ) +{ + struct MissingThread * mtp; + FSBufferDescriptor btRecord; + BTreeIterator iterator; + OSStatus result; + UInt16 dataSize; + Boolean headsUp; + UInt32 lostAndFoundDirID; + + lostAndFoundDirID = 0; + headsUp = false; + for (mtp = GPtr->missingThreadList; mtp != NULL; mtp = mtp->link) { + if ( mtp->threadID == 0 ) + continue; + + // if the thread record information in the MissingThread struct is not there + // then we have a missing directory in addition to a missing thread record + // for that directory. We will recreate the missing directory in our + // lost+found directory. + if ( mtp->thread.parentID == 0 ) { + if (embedded == 1 && debug == 0) { + return( R_RFail ); + } + if ( lostAndFoundDirID == 0 ) + lostAndFoundDirID = CreateDirByName( GPtr , (u_char *)"lost+found", kHFSRootFolderID); + if ( lostAndFoundDirID == 0 ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tCould not create lost+found directory \n" ); + return( R_RFail ); + } + fsckPrint(GPtr->context, E_NoDir, mtp->threadID); + result = FixMissingDirectory( GPtr, mtp->threadID, lostAndFoundDirID ); + if ( result != 0 ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tCould not recreate a missing directory (error %d)\n", result ); + return( R_RFail ); + } + else + headsUp = true; + continue; + } + + dataSize = 10 + (mtp->thread.nodeName.length * 2); + btRecord.bufferAddress = (void *)&mtp->thread; + btRecord.itemSize = dataSize; + btRecord.itemCount = 1; + iterator.hint.nodeNum = 0; + BuildCatalogKey(mtp->threadID, NULL, true, (CatalogKey*)&iterator.key); + + result = BTInsertRecord(GPtr->calculatedCatalogFCB, &iterator, &btRecord, dataSize); + if (result) + return (IntError(GPtr, R_IntErr)); + mtp->threadID = 0; + } + if ( headsUp ) + fsckPrint(GPtr->context, fsckLostFoundDirectory, "lost+found"); + + return (0); +} + + +static OSErr +FixMissingDirectory( SGlob *GPtr, UInt32 theObjID, UInt32 theParID ) +{ + Boolean isHFSPlus; + UInt16 recSize; + OSErr result; + int nameLen; + UInt32 hint; + char myString[ 32 ]; + CatalogName myName; + CatalogRecord catRec; + CatalogKey myKey, myThreadKey; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + + // we will use the object ID of the missing directory as the name since we have + // no way to find the original name and this should make it unique within our + // lost+found directory. + sprintf( myString, "%ld", (long)theObjID ); + nameLen = strlen( myString ); + + if ( isHFSPlus ) + { + int i; + myName.ustr.length = nameLen; + for ( i = 0; i < myName.ustr.length; i++ ) + myName.ustr.unicode[ i ] = (u_int16_t) myString[ i ]; + } + else + { + myName.pstr[0] = nameLen; + memcpy( &myName.pstr[1], &myString[0], nameLen ); + } + + // make sure the name is not already used + BuildCatalogKey( theParID, &myName, isHFSPlus, &myKey ); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &myKey, kNoHint, + NULL, &catRec, &recSize, &hint ); + if ( result == noErr ) + return( R_IntErr ); + + // insert new directory and thread record into the catalog + recSize = BuildThreadRec( &myKey, &catRec, isHFSPlus, true ); + BuildCatalogKey( theObjID, NULL, isHFSPlus, &myThreadKey ); + result = InsertBTreeRecord( GPtr->calculatedCatalogFCB, &myThreadKey, &catRec, recSize, &hint ); + if ( result != noErr ) + return( result ); + + recSize = BuildFolderRec( GPtr, 01777, theObjID, isHFSPlus, &catRec ); + + result = InsertBTreeRecord( GPtr->calculatedCatalogFCB, &myKey, &catRec, recSize, &hint ); + if ( result != noErr ) + return( result ); + + /* update parent directory to reflect addition of new directory */ + result = UpdateFolderCount( GPtr->calculatedVCB, theParID, NULL, + ((isHFSPlus) ? kHFSPlusFolderRecord : kHFSFolderRecord), + kNoHint, 1 ); + + /* update our header node on disk from our BTreeControlBlock */ + UpdateBTreeHeader( GPtr->calculatedCatalogFCB ); + + return( result ); + +} /* FixMissingDirectory */ + + +static HFSCatalogNodeID +GetObjectID( CatalogRecord * theRecPtr ) +{ + HFSCatalogNodeID myObjID; + + switch ( theRecPtr->recordType ) { + case kHFSPlusFolderRecord: + myObjID = theRecPtr->hfsPlusFolder.folderID; + break; + case kHFSPlusFileRecord: + myObjID = theRecPtr->hfsPlusFile.fileID; + break; + case kHFSFolderRecord: + myObjID = theRecPtr->hfsFolder.folderID; + break; + case kHFSFileRecord: + myObjID = theRecPtr->hfsFile.fileID; + break; + default: + myObjID = 0; + } + + return( myObjID ); + +} /* GetObjectID */ + +/* Function: CreateFileByName + * + * Description: Create a file with given fileName of type fileType containing + * data of length dataLen. This function assumes that the name of symlink + * to be created is passed as UTF8 + * + * Input: + * 1. GPtr - pointer to global scavenger structure + * 2. parentID - ID of parent directory to create the new file. + * 3. fileName - name of the file to create in UTF8 format. + * 4. fileNameLen - length of the filename to be created. + * If the volume is HFS Plus, the filename is delimited to + * kHFSPlusMaxFileNameChars characters. + * If the volume is plain HFS, the filename is delimited to + * kHFSMaxFileNameChars characters. + * 5. fileType - file type (currently supported S_IFREG, S_IFLNK). + * 6. data - data content of first data fork of the file + * 7. dataLen - length of data to be written + * + * Output: + * returns zero on success, non-zero on failure. + * memFullErr - Not enough memory + * paramErr - Invalid paramter + */ +OSErr CreateFileByName(SGlobPtr GPtr, UInt32 parentID, UInt16 fileType, u_char *fileName, unsigned int filenameLen, u_char *data, unsigned int dataLen) +{ + OSErr err = noErr; + Boolean isHFSPlus; + Boolean isCatUpdated = false; + + CatalogName fName; + CatalogRecord catRecord; + CatalogKey catKey; + CatalogKey threadKey; + UInt32 hint; + UInt16 recordSize; + + UInt32 startBlock = 0; + UInt32 blockCount = 0; + UInt32 nextCNID; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + /* Construct unicode name for file name to construct catalog key */ + if (isHFSPlus) { + /* Convert utf8 filename to Unicode filename */ + size_t namelen; + + if (filenameLen < kHFSPlusMaxFileNameChars) { + (void) utf_decodestr (fileName, filenameLen, fName.ustr.unicode, &namelen, sizeof(fName.ustr.unicode)); + namelen /= 2; + fName.ustr.length = namelen; + } else { + /* The resulting may result in more than kHFSPlusMaxFileNameChars chars */ + UInt16 *unicodename; + + /* Allocate a large array to convert the utf-8 to utf-16 */ + unicodename = malloc (filenameLen * 4); + if (unicodename == NULL) { + err = memFullErr; + goto out; + } + + (void) utf_decodestr (fileName, filenameLen, unicodename, &namelen, filenameLen * 4); + namelen /= 2; + + /* Chopping unicode string based on length might affect unicode + * chars that take more than one UInt16 - very rare possiblity. + */ + if (namelen > kHFSPlusMaxFileNameChars) { + namelen = kHFSPlusMaxFileNameChars; + } + + memcpy (fName.ustr.unicode, unicodename, (namelen * 2)); + free (unicodename); + fName.ustr.length = namelen; + } + } else { + if (filenameLen > kHFSMaxFileNameChars) { + filenameLen = kHFSMaxFileNameChars; + } + fName.pstr[0] = filenameLen; + memcpy(&fName.pstr[1], fileName, filenameLen); + } + + /* Make sure that a file with same name does not exist in parent dir */ + BuildCatalogKey(parentID, &fName, isHFSPlus, &catKey); + err = SearchBTreeRecord(GPtr->calculatedCatalogFCB, &catKey, kNoHint, NULL, + &catRecord, &recordSize, &hint); + if (err != fsBTRecordNotFoundErr) { +#if DEBUG_OVERLAP + plog ("%s: %s probably exists in dirID = %d (err=%d)\n", __FUNCTION__, fileName, parentID, err); +#endif + err = EEXIST; + goto out; + } + + if (data) { + /* Calculate correct number of blocks required for data */ + if (dataLen % (GPtr->calculatedVCB->vcbBlockSize)) { + blockCount = (dataLen / (GPtr->calculatedVCB->vcbBlockSize)) + 1; + } else { + blockCount = dataLen / (GPtr->calculatedVCB->vcbBlockSize); + } + + if (blockCount) { + /* Allocate disk space for the data */ + err = AllocateContigBitmapBits (GPtr->calculatedVCB, blockCount, &startBlock); + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: Not enough disk space (err=%d)\n", __FUNCTION__, err); +#endif + goto out; + } + + /* Write the data to the blocks */ + err = WriteBufferToDisk(GPtr, startBlock, blockCount, data, dataLen); + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: Error in writing data of %s to disk (err=%d)\n", __FUNCTION__, fileName, err); +#endif + goto out; + } + } + } + + /* Build and insert thread record */ + nextCNID = GPtr->calculatedVCB->vcbNextCatalogID; + if (!isHFSPlus && nextCNID == 0xffffFFFF) { + goto out; + } + recordSize = BuildThreadRec(&catKey, &catRecord, isHFSPlus, false); + for (;;) { + BuildCatalogKey(nextCNID, NULL, isHFSPlus, &threadKey); + err = InsertBTreeRecord(GPtr->calculatedCatalogFCB, &threadKey, &catRecord, + recordSize, &hint ); + if (err == fsBTDuplicateRecordErr && isHFSPlus) { + /* Allow CNIDs on HFS Plus volumes to wrap around */ + ++nextCNID; + if (nextCNID < kHFSFirstUserCatalogNodeID) { + GPtr->calculatedVCB->vcbAttributes |= kHFSCatalogNodeIDsReusedMask; + MarkVCBDirty(GPtr->calculatedVCB); + nextCNID = kHFSFirstUserCatalogNodeID; + } + continue; + } + break; + } + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: Error inserting thread record for file = %s (err=%d)\n", __FUNCTION__, fileName, err); +#endif + goto out; + } + + /* Build file record */ + recordSize = BuildFileRec(fileType, 0666, nextCNID, isHFSPlus, &catRecord); + if (recordSize == 0) { +#if DEBUG_OVERLAP + plog ("%s: Incorrect fileType\n", __FUNCTION__); +#endif + + /* Remove the thread record inserted above */ + err = DeleteBTreeRecord (GPtr->calculatedCatalogFCB, &threadKey); + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: Error in removing thread record\n", __FUNCTION__); +#endif + } + err = paramErr; + goto out; + } + + /* Update startBlock, blockCount, etc */ + if (isHFSPlus) { + catRecord.hfsPlusFile.dataFork.logicalSize = dataLen; + catRecord.hfsPlusFile.dataFork.totalBlocks = blockCount; + catRecord.hfsPlusFile.dataFork.extents[0].startBlock = startBlock; + catRecord.hfsPlusFile.dataFork.extents[0].blockCount = blockCount; + } else { + catRecord.hfsFile.dataLogicalSize = dataLen; + catRecord.hfsFile.dataPhysicalSize = blockCount * GPtr->calculatedVCB->vcbBlockSize; + catRecord.hfsFile.dataExtents[0].startBlock = startBlock; + catRecord.hfsFile.dataExtents[0].blockCount = blockCount; + } + + /* Insert catalog file record */ + err = InsertBTreeRecord(GPtr->calculatedCatalogFCB, &catKey, &catRecord, recordSize, &hint ); + if (err == noErr) { + isCatUpdated = true; + +#if DEBUG_OVERLAP + plog ("Created \"%s\" with ID = %d startBlock = %d, blockCount = %d, dataLen = %d\n", fileName, nextCNID, startBlock, blockCount, dataLen); +#endif + } else { +#if DEBUG_OVERLAP + plog ("%s: Error in inserting file record for file = %s (err=%d)\n", __FUNCTION__, fileName, err); +#endif + + /* remove the thread record inserted above */ + err = DeleteBTreeRecord (GPtr->calculatedCatalogFCB, &threadKey); + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: Error in removing thread record\n", __FUNCTION__); +#endif + } + err = paramErr; + goto out; + } + + /* Update volume header */ + GPtr->calculatedVCB->vcbNextCatalogID = nextCNID + 1; + if (GPtr->calculatedVCB->vcbNextCatalogID < kHFSFirstUserCatalogNodeID) { + GPtr->calculatedVCB->vcbAttributes |= kHFSCatalogNodeIDsReusedMask; + GPtr->calculatedVCB->vcbNextCatalogID = kHFSFirstUserCatalogNodeID; + } + MarkVCBDirty( GPtr->calculatedVCB ); + + /* update our header node on disk from our BTreeControlBlock */ + UpdateBTreeHeader(GPtr->calculatedCatalogFCB); + + /* update parent directory to reflect addition of new file */ + err = UpdateFolderCount(GPtr->calculatedVCB, parentID, NULL, kHFSPlusFileRecord, kNoHint, 1); + if (err != noErr) { +#if DEBUG_OVERLAP + plog ("%s: Error in updating parent folder count (err=%d)\n", __FUNCTION__, err); +#endif + goto out; + } + +out: + /* On error, if catalog record was not inserted and disk block were allocated, + * deallocate the blocks + */ + if (err && (isCatUpdated == false) && startBlock) { + ReleaseBitmapBits (startBlock, blockCount); + } + + return err; +} /* CreateFileByName */ + +/* Function: CreateDirByName + * + * Description: Create directory with name dirName in a directory with ID as + * parentID. The function assumes that the dirName passed is ASCII. + * + * Input: + * GPtr - global scavenger structure pointer + * dirName - name of directory to be created + * parentID - dirID of the parent directory for new directory + * + * Output: + * on success, ID of the new directory created. + * on failure, zero. + * + */ +UInt32 CreateDirByName(SGlob *GPtr , const u_char *dirName, const UInt32 parentID) +{ + Boolean isHFSPlus; + UInt16 recSize; + UInt16 myMode; + int result; + int nameLen; + UInt32 hint; + UInt32 nextCNID; + SFCB * fcbPtr; + CatalogKey myKey; + CatalogName myName; + CatalogRecord catRec; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + fcbPtr = GPtr->calculatedCatalogFCB; + nameLen = strlen( (char *)dirName ); + + if ( isHFSPlus ) + { + int i; + myName.ustr.length = nameLen; + for ( i = 0; i < myName.ustr.length; i++ ) + myName.ustr.unicode[ i ] = (u_int16_t) dirName[ i ]; + } + else + { + myName.pstr[0] = nameLen; + memcpy( &myName.pstr[1], &dirName[0], nameLen ); + } + + // see if we already have a lost and found directory + BuildCatalogKey( parentID, &myName, isHFSPlus, &myKey ); + result = SearchBTreeRecord( fcbPtr, &myKey, kNoHint, NULL, &catRec, &recSize, &hint ); + if ( result == noErr ) { + if ( isHFSPlus ) { + if ( catRec.recordType == kHFSPlusFolderRecord ) + return( catRec.hfsPlusFolder.folderID ); + } + else if ( catRec.recordType == kHFSFolderRecord ) + return( catRec.hfsFolder.folderID ); + return( 0 ); // something already there but not a directory + } + + // insert new directory and thread record into the catalog + nextCNID = GPtr->calculatedVCB->vcbNextCatalogID; + if ( !isHFSPlus && nextCNID == 0xFFFFFFFF ) + return( 0 ); + + recSize = BuildThreadRec( &myKey, &catRec, isHFSPlus, true ); + for (;;) { + CatalogKey key; + + BuildCatalogKey( nextCNID, NULL, isHFSPlus, &key ); + result = InsertBTreeRecord( fcbPtr, &key, &catRec, recSize, &hint ); + if ( result == fsBTDuplicateRecordErr && isHFSPlus ) { + /* + * Allow CNIDs on HFS Plus volumes to wrap around + */ + ++nextCNID; + if ( nextCNID < kHFSFirstUserCatalogNodeID ) { + GPtr->calculatedVCB->vcbAttributes |= kHFSCatalogNodeIDsReusedMask; + MarkVCBDirty( GPtr->calculatedVCB ); + nextCNID = kHFSFirstUserCatalogNodeID; + } + continue; + } + break; + } + if ( result != 0 ) + return( 0 ); + + myMode = ( GPtr->lostAndFoundMode == 0 ) ? 01777 : GPtr->lostAndFoundMode; + recSize = BuildFolderRec( GPtr, myMode, nextCNID, isHFSPlus, &catRec ); + result = InsertBTreeRecord( fcbPtr, &myKey, &catRec, recSize, &hint ); + if ( result != 0 ) + return( 0 ); + + /* Update volume header */ + GPtr->calculatedVCB->vcbNextCatalogID = nextCNID + 1; + if ( GPtr->calculatedVCB->vcbNextCatalogID < kHFSFirstUserCatalogNodeID ) { + GPtr->calculatedVCB->vcbAttributes |= kHFSCatalogNodeIDsReusedMask; + GPtr->calculatedVCB->vcbNextCatalogID = kHFSFirstUserCatalogNodeID; + } + MarkVCBDirty( GPtr->calculatedVCB ); + + /* update parent directory to reflect addition of new directory */ + result = UpdateFolderCount( GPtr->calculatedVCB, parentID, NULL, kHFSPlusFolderRecord, kNoHint, 1 ); + + /* update our header node on disk from our BTreeControlBlock */ + UpdateBTreeHeader( GPtr->calculatedCatalogFCB ); + + return( nextCNID ); + +} /* CreateDirByName */ + +static void +CountFolderItems(SGlobPtr GPtr, UInt32 folderID, Boolean isHFSPlus, UInt32 *itemCount, UInt32 *folderCount) +{ + SFCB *fcb = GPtr->calculatedCatalogFCB; + OSErr err = 0; + BTreeIterator iterator; + FSBufferDescriptor btRecord; + union { + HFSPlusCatalogFolder catRecord; + HFSPlusCatalogFile catFile; + } catRecord; + HFSPlusCatalogKey *key; + UInt16 recordSize = 0; + int fCount = 0, iCount = 0; + + ClearMemory(&iterator, sizeof(iterator)); + key = (HFSPlusCatalogKey*)&iterator.key; + BuildCatalogKey(folderID, NULL, isHFSPlus, (CatalogKey*)key); + btRecord.bufferAddress = &catRecord; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(catRecord); + + for (err = BTSearchRecord(fcb, &iterator, kNoHint, &btRecord, &recordSize, &iterator); + err == 0; + err = BTIterateRecord(fcb, kBTreeNextRecord, &iterator, &btRecord, &recordSize)) { + if (catRecord.catRecord.recordType == kHFSPlusFolderThreadRecord || + catRecord.catRecord.recordType == kHFSPlusFileThreadRecord || + catRecord.catRecord.recordType == kHFSFolderThreadRecord || + catRecord.catRecord.recordType == kHFSFileThreadRecord) + continue; + if (key->parentID != folderID) + break; + if (isHFSPlus && + (catRecord.catRecord.recordType == kHFSPlusFileRecord) && + (catRecord.catFile.flags & kHFSHasLinkChainMask) && + (catRecord.catFile.userInfo.fdType == kHFSAliasType) && + (catRecord.catFile.userInfo.fdCreator == kHFSAliasCreator) && + (key->parentID != GPtr->filelink_priv_dir_id)) { + // It's a directory hard link, which counts as a directory here + fCount++; + } + if (catRecord.catRecord.recordType == kHFSPlusFolderRecord) + fCount++; + iCount++; + } + if (itemCount) + *itemCount = iCount; + if (folderCount) + *folderCount = fCount; + return; +} +/* + * Build a catalog node folder record with the given input. + */ +static int +BuildFolderRec( SGlob *GPtr, u_int16_t theMode, UInt32 theObjID, Boolean isHFSPlus, CatalogRecord * theRecPtr ) +{ + UInt16 recSize; + UInt32 createTime; + UInt32 vCount = 0, fCount = 0; + + ClearMemory( (Ptr)theRecPtr, sizeof(*theRecPtr) ); + + CountFolderItems(GPtr, theObjID, isHFSPlus, &vCount, &fCount); + if ( isHFSPlus ) { + createTime = GetTimeUTC(); + theRecPtr->hfsPlusFolder.recordType = kHFSPlusFolderRecord; + theRecPtr->hfsPlusFolder.folderID = theObjID; + theRecPtr->hfsPlusFolder.createDate = createTime; + theRecPtr->hfsPlusFolder.contentModDate = createTime; + theRecPtr->hfsPlusFolder.attributeModDate = createTime; + theRecPtr->hfsPlusFolder.bsdInfo.ownerID = getuid( ); + theRecPtr->hfsPlusFolder.bsdInfo.groupID = getgid( ); + theRecPtr->hfsPlusFolder.bsdInfo.fileMode = S_IFDIR; + theRecPtr->hfsPlusFolder.bsdInfo.fileMode |= theMode; + theRecPtr->hfsPlusFolder.valence = vCount; + recSize= sizeof(HFSPlusCatalogFolder); + if (VolumeObjectIsHFSX(GPtr)) { + theRecPtr->hfsPlusFolder.flags |= kHFSHasFolderCountMask; + theRecPtr->hfsPlusFolder.folderCount = fCount; + } + } + else { + createTime = GetTimeLocal( true ); + theRecPtr->hfsFolder.recordType = kHFSFolderRecord; + theRecPtr->hfsFolder.folderID = theObjID; + theRecPtr->hfsFolder.createDate = createTime; + theRecPtr->hfsFolder.modifyDate = createTime; + theRecPtr->hfsFolder.valence = vCount; + recSize= sizeof(HFSCatalogFolder); + } + + return( recSize ); + +} /* BuildFolderRec */ + + +/* + * Build a catalog node thread record from a catalog key + * and return the size of the record. + */ +static int +BuildThreadRec( CatalogKey * theKeyPtr, CatalogRecord * theRecPtr, + Boolean isHFSPlus, Boolean isDirectory ) +{ + int size = 0; + + if ( isHFSPlus ) { + HFSPlusCatalogKey *key = (HFSPlusCatalogKey *)theKeyPtr; + HFSPlusCatalogThread *rec = (HFSPlusCatalogThread *)theRecPtr; + + size = sizeof(HFSPlusCatalogThread); + if ( isDirectory ) + rec->recordType = kHFSPlusFolderThreadRecord; + else + rec->recordType = kHFSPlusFileThreadRecord; + rec->reserved = 0; + rec->parentID = key->parentID; + bcopy(&key->nodeName, &rec->nodeName, + sizeof(UniChar) * (key->nodeName.length + 1)); + + /* HFS Plus has varaible sized thread records */ + size -= (sizeof(rec->nodeName.unicode) - + (rec->nodeName.length * sizeof(UniChar))); + } + else /* HFS standard */ { + HFSCatalogKey *key = (HFSCatalogKey *)theKeyPtr; + HFSCatalogThread *rec = (HFSCatalogThread *)theRecPtr; + + size = sizeof(HFSCatalogThread); + bzero(rec, size); + if ( isDirectory ) + rec->recordType = kHFSFolderThreadRecord; + else + rec->recordType = kHFSFileThreadRecord; + rec->parentID = key->parentID; + bcopy(key->nodeName, rec->nodeName, key->nodeName[0]+1); + } + + return (size); + +} /* BuildThreadRec */ + +/* Function: BuildFileRec + * + * Description: Build a catalog file record with given fileID, fileType + * and fileMode. + * + * Input: + * 1. fileType - currently supports S_IFREG, S_IFLNK + * 2. fileMode - file mode desired. + * 3. fileID - file ID + * 4. isHFSPlus - indicates whether the record is being created for + * HFSPlus volume or plain HFS volume. + * 5. catRecord - pointer to catalog record + * + * Output: + * returns size of the catalog record. + * on success, non-zero value; on failure, zero. + */ +static int BuildFileRec(UInt16 fileType, UInt16 fileMode, UInt32 fileID, Boolean isHFSPlus, CatalogRecord *catRecord) +{ + UInt16 recordSize = 0; + UInt32 createTime; + + /* We only support creating S_IFREG and S_IFLNK and S_IFLNK is not supported + * on plain HFS + */ + if (((fileType != S_IFREG) && (fileType != S_IFLNK)) || + ((isHFSPlus == false) && (fileType == S_IFLNK))) { + goto out; + } + + ClearMemory((Ptr)catRecord, sizeof(*catRecord)); + + if ( isHFSPlus ) { + createTime = GetTimeUTC(); + catRecord->hfsPlusFile.recordType = kHFSPlusFileRecord; + catRecord->hfsPlusFile.fileID = fileID; + catRecord->hfsPlusFile.createDate = createTime; + catRecord->hfsPlusFile.contentModDate = createTime; + catRecord->hfsPlusFile.attributeModDate = createTime; + catRecord->hfsPlusFile.bsdInfo.ownerID = getuid(); + catRecord->hfsPlusFile.bsdInfo.groupID = getgid(); + catRecord->hfsPlusFile.bsdInfo.fileMode = fileType; + catRecord->hfsPlusFile.bsdInfo.fileMode |= fileMode; + if (fileType == S_IFLNK) { + catRecord->hfsPlusFile.userInfo.fdType = kSymLinkFileType; + catRecord->hfsPlusFile.userInfo.fdCreator = kSymLinkCreator; + } else { + catRecord->hfsPlusFile.userInfo.fdType = kTextFileType; + catRecord->hfsPlusFile.userInfo.fdCreator = kTextFileCreator; + } + recordSize= sizeof(HFSPlusCatalogFile); + } + else { + createTime = GetTimeLocal(true); + catRecord->hfsFile.recordType = kHFSFileRecord; + catRecord->hfsFile.fileID = fileID; + catRecord->hfsFile.createDate = createTime; + catRecord->hfsFile.modifyDate = createTime; + catRecord->hfsFile.userInfo.fdType = kTextFileType; + catRecord->hfsFile.userInfo.fdCreator = kTextFileCreator; + recordSize= sizeof(HFSCatalogFile); + } + +out: + return(recordSize); +} /* BuildFileRec */ + +/* Function: BuildAttributeKey + * + * Build attribute key based on given information like - + * fileID, startBlock, attribute name and attribute name length. + * + * Note that the attribute name is the UTF-8 format string. + */ +static void BuildAttributeKey(u_int32_t fileID, u_int32_t startBlock, + unsigned char *attrName, u_int16_t attrNameLen, HFSPlusAttrKey *key) +{ + size_t attrNameLenBytes; + + assert(VolumeObjectIsHFSPlus() == true); + + key->pad = 0; + key->fileID = fileID; + key->startBlock = startBlock; + + /* Convert UTF-8 attribute name to unicode */ + (void) utf_decodestr(attrName, attrNameLen, key->attrName, &attrNameLenBytes, sizeof(key->attrName)); + key->attrNameLen = attrNameLenBytes / 2; + + key->keyLength = kHFSPlusAttrKeyMinimumLength + attrNameLenBytes; +} + +/* Delete catalog record and thread record for given ID. On successful + * deletion, this function also updates the valence and folder count for + * the parent directory and the file/folder count in the volume header. + * + * Returns - zero on success, non-zero on failure. + */ +static int DeleteCatalogRecordByID(SGlobPtr GPtr, uint32_t id, Boolean for_rename) +{ + int retval; + CatalogRecord rec; + CatalogKey key; + UInt16 recsize; + Boolean isHFSPlus; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + /* Lookup the catalog record to move */ + retval = GetCatalogRecordByID(GPtr, id, isHFSPlus, &key, &rec, &recsize); + if (retval) { + return retval; + } + + /* Delete the record */ + if (isHFSPlus) { + retval = DeleteCatalogNode(GPtr->calculatedVCB, + key.hfsPlus.parentID, + (const CatalogName *)&key.hfsPlus.nodeName, + kNoHint, for_rename); + } else { + retval = DeleteCatalogNode(GPtr->calculatedVCB, + key.hfs.parentID, + (const CatalogName *)&key.hfs.nodeName, + kNoHint, for_rename); + } + + /* If deletion of record succeeded, and the operation was not + * being performed for rename, and the volume is HFS+, try + * deleting all extended attributes for this file/folder + */ + if ((retval == 0) && (for_rename == false) && (isHFSPlus == true)) { + /* Delete all attributes associated with this ID */ + retval = DeleteAllAttrsByID(GPtr, id); + } + + return retval; +} + +/* Move a catalog record with given ID to a new parent directory with given + * parentID. This function should only be called for HFS+ volumes. + * This function removes the catalog record from old parent and inserts + * it back with the new parent ID. It also takes care of updating the + * parent directory counts. Note that this function clears kHFSHasLinkChainBit + * from the catalog record flags. + * + * On success, returns zero. On failure, returns non-zero. + */ +static int MoveCatalogRecordByID(SGlobPtr GPtr, uint32_t id, uint32_t new_parentid) +{ + int retval; + CatalogRecord rec; + CatalogKey key; + UInt32 hint; + UInt16 recsize; + Boolean isFolder = false; + BTreeIterator iterator; + + assert (VolumeObjectIsHFSPlus() == true); + + /* Lookup the catalog record to move */ + retval = GetCatalogRecordByID(GPtr, id, true, &key, &rec, &recsize); + if (retval) { + goto out; + } + + /* Delete the record and its thread from original location. + * For file records, do not deallocate original extents. + */ + retval = DeleteCatalogRecordByID(GPtr, id, true); + if (retval) { + goto out; + } + + key.hfsPlus.parentID = new_parentid; + /* The record being moved should not have linkChainMask set */ + if (rec.recordType == kHFSPlusFolderRecord) { + rec.hfsPlusFolder.flags &= ~kHFSHasLinkChainMask; + isFolder = true; + } else if (rec.recordType == kHFSPlusFileRecord) { + rec.hfsPlusFile.flags &= ~kHFSHasLinkChainMask; + isFolder = false; + } + + /* Insert the catalog record with new parent */ + retval = InsertBTreeRecord(GPtr->calculatedCatalogFCB, &key, &rec, + recsize, &hint); + if (retval) { + goto out; + } + + /* Insert the new thread record */ + recsize = BuildThreadRec(&key, &rec, true, isFolder); + BuildCatalogKey(id, NULL, true, &key); + retval = InsertBTreeRecord(GPtr->calculatedCatalogFCB, &key, &rec, + recsize, &hint); + if (retval) { + goto out; + } + + /* Update the counts in the new parent directory and volume header */ + ClearMemory(&iterator, sizeof(iterator)); + retval = GetCatalogRecordByID(GPtr, new_parentid, true, &key, &rec, &recsize); + if (retval) { + if ((retval == btNotFound) && (GPtr->CBTStat & S_Orphan)) { + /* No need for re-repair minor repair order because + * we are failing on updating the parent directory. + */ + retval = 0; + } + goto out; + } + if (rec.recordType != kHFSPlusFolderRecord) { + goto out; + } + + rec.hfsPlusFolder.valence++; + if ((isFolder == true) && + (rec.hfsPlusFolder.flags & kHFSHasFolderCountMask)) { + rec.hfsPlusFolder.folderCount++; + } + + retval = ReplaceBTreeRecord(GPtr->calculatedCatalogFCB, &key, + kNoHint, &rec, recsize, &hint); + if (retval) { + goto out; + } + + if (isFolder == true) { + GPtr->calculatedVCB->vcbFolderCount++; + } else { + GPtr->calculatedVCB->vcbFileCount++; + } + GPtr->VIStat |= S_MDB; + GPtr->CBTStat |= S_BTH; /* leaf record count changed */ + +out: + return retval; +} + +/* The function deletes all extended attributes associated with a given + * file/folder ID. The function takes care of deallocating allocation blocks + * associated with extent based attributes. + * + * Note: This function deletes *all* attributes for a given file/folder. + * To delete a single attribute record using a key, use delete_attr_record(). + * + * On success, returns zero. On failure, returns non-zero. + */ +static int DeleteAllAttrsByID(SGlobPtr GPtr, uint32_t id) +{ + int retval; + BTreeIterator iterator; + FSBufferDescriptor btrec; + HFSPlusAttrKey *attr_key; + HFSPlusAttrRecord attr_record; + UInt16 record_size; + + /* Initialize the iterator, attribute key, and attribute record */ + ClearMemory(&iterator, sizeof(BTreeIterator)); + attr_key = (HFSPlusAttrKey *)&iterator.key; + attr_key->keyLength = kHFSPlusAttrKeyMinimumLength; + attr_key->fileID = id; + + ClearMemory(&btrec, sizeof(FSBufferDescriptor)); + btrec.bufferAddress = &attr_record; + btrec.itemCount = 1; + btrec.itemSize = sizeof(HFSPlusAttrRecord); + + /* Search for attribute with NULL name which will place the + * iterator just before the first record for given id. + */ + retval = BTSearchRecord(GPtr->calculatedAttributesFCB, &iterator, + kInvalidMRUCacheKey, &btrec, &record_size, &iterator); + if ((retval != 0) && (retval != btNotFound)) { + goto out; + } + + retval = BTIterateRecord(GPtr->calculatedAttributesFCB, kBTreeNextRecord, + &iterator, &btrec, &record_size); + while ((retval == 0) && (attr_key->fileID == id)) { + /* Delete attribute record and deallocate extents, if any */ + retval = delete_attr_record(GPtr, attr_key, &attr_record); + if (retval) { + break; + } + + retval = BTIterateRecord(GPtr->calculatedAttributesFCB, + kBTreeNextRecord, &iterator, &btrec, &record_size); + } + + if (retval == btNotFound) { + retval = 0; + } + +out: + return retval; +} + +/* The function deletes an extented attribute record when the corresponding + * record and key are provided. If the record is an extent-based attribute, + * it also takes care to deallocate all allocation blocks associated with + * the record. + * + * Note: This function does not delete all attribute records associated + * with the file/folder ID in the attribute key. To delete all attributes + * for given file/folder ID, use DeleteAllAttrsByID(). + * + * On success, returns zero. On failure, returns non-zero. + */ +static int delete_attr_record(SGlobPtr GPtr, HFSPlusAttrKey *attr_key, HFSPlusAttrRecord *attr_record) +{ + int retval; + UInt32 num_blocks_freed; + Boolean last_extent; + + retval = DeleteBTreeRecord(GPtr->calculatedAttributesFCB, attr_key); + if (retval == 0) { + /* Set bits to write back attribute btree header and map */ + GPtr->ABTStat |= S_BTH + S_BTM; + + if (attr_record->recordType == kHFSPlusAttrForkData) { + retval = ReleaseExtents(GPtr->calculatedVCB, + attr_record->forkData.theFork.extents, + &num_blocks_freed, &last_extent); + } else if (attr_record->recordType == kHFSPlusAttrExtents) { + retval = ReleaseExtents(GPtr->calculatedVCB, + attr_record->overflowExtents.extents, + &num_blocks_freed, &last_extent); + } + } + + return retval; +} + +/*------------------------------------------------------------------------------ + +Routine: ZeroFillUnusedNodes + +Function: Write zeroes to all unused nodes of a given B-tree. + +Input: GPtr - pointer to scavenger global area + fileRefNum - refnum of BTree file + +Output: ZeroFillUnusedNodes - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +static int ZeroFillUnusedNodes(SGlobPtr GPtr, short fileRefNum) +{ + BTreeControlBlock *btcb = GetBTreeControlBlock(fileRefNum); + unsigned char *bitmap = (unsigned char *) ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr; + unsigned char mask = 0x80; + OSErr err; + UInt32 nodeNum; + BlockDescriptor node; + + node.buffer = NULL; + + for (nodeNum = 0; nodeNum < btcb->totalNodes; ++nodeNum) + { + if ((*bitmap & mask) == 0) + { + /* Read the raw node, without going through hfs_swap_BTNode. */ + err = btcb->getBlockProc(btcb->fcbPtr, nodeNum, kGetBlock|kGetEmptyBlock, &node); + if (err) + { + if (debug) plog("Couldn't read node #%u\n", nodeNum); + return err; + } + + /* Fill the node with zeroes. */ + bzero(node.buffer, node.blockSize); + + /* Release and write the node without going through hfs_swap_BTNode. */ + (void) btcb->releaseBlockProc(btcb->fcbPtr, &node, kReleaseBlock|kMarkBlockDirty); + node.buffer = NULL; + } + + /* Move to the next bit in the bitmap. */ + mask >>= 1; + if (mask == 0) + { + mask = 0x80; + ++bitmap; + } + } + + return 0; +} /* end ZeroFillUnusedNodes */ diff --git a/fsck_hfs/dfalib/SRuntime.h b/fsck_hfs/dfalib/SRuntime.h new file mode 100644 index 0000000..005a4da --- /dev/null +++ b/fsck_hfs/dfalib/SRuntime.h @@ -0,0 +1,420 @@ +/* + * Copyright (c) 1999, 2005-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* SRuntime.h */ + +#ifndef __SRUNTIME__ +#define __SRUNTIME__ + +#if BSD + +#include <sys/types.h> +#include <stdlib.h> +#include <string.h> +#include <stdio.h> +#include <stdarg.h> + +#include <hfs/hfs_format.h> +#else + +#include <MacTypes.h> +#include <MacMemory.h> +#include <HFSVolumes.h> +#include <Errors.h> + +#endif + +#if BSD +/* Classic Mac OS Types */ +typedef int8_t SInt8; +typedef int16_t SInt16; +typedef int32_t SInt32; +typedef int64_t SInt64; + +typedef u_int8_t UInt8; +typedef u_int16_t UInt16; +typedef u_int32_t UInt32; +typedef u_int64_t UInt64; + +typedef void * LogicalAddress; +typedef char * Ptr; +typedef Ptr * Handle; +typedef u_int8_t Byte; +typedef size_t Size; +typedef unsigned char Boolean; +typedef u_int32_t ItemCount; +typedef u_int32_t ByteCount; +typedef u_int32_t OptionBits; + +typedef int16_t OSErr; +typedef int32_t OSStatus; + +typedef u_int32_t OSType; +typedef u_int32_t ResType; + +typedef u_int16_t UniChar; +typedef u_int32_t UniCharCount; +typedef UniChar * UniCharArrayPtr; +typedef const UniChar * ConstUniCharArrayPtr; +typedef u_int32_t TextEncoding; + +typedef unsigned char * StringPtr; +typedef unsigned char Str27[28]; +typedef unsigned char Str31[32]; +typedef unsigned char Str63[64]; +typedef unsigned char Str255[256]; + +typedef const unsigned char * ConstStr31Param; +typedef const unsigned char * ConstStr63Param; +typedef const unsigned char * ConstStr255Param; + +typedef u_int32_t HFSCatalogNodeID; + +enum { + false = 0, + true = 1 +}; + +/* OS error codes */ +enum { + noErr = 0, + dskFulErr = -34, + nsvErr = -35, + ioErr = -36, + eofErr = -39, + fnfErr = -43, + fBsyErr = -47, + paramErr = -50, + noMacDskErr = -57, + badMDBErr = -60, + memFullErr = -108, + notBTree = -410, + fileBoundsErr = -1309, +}; + +/* Finder Flags */ +enum { + kIsOnDesk = 0x0001, + kColor = 0x000E, + kIsShared = 0x0040, + kHasBeenInited = 0x0100, + kHasCustomIcon = 0x0400, + kIsStationery = 0x0800, + kNameLocked = 0x1000, + kHasBundle = 0x2000, + kIsInvisible = 0x4000, + kIsAlias = 0x8000 +}; + +#define EXTERN_API(_type) extern _type +#define EXTERN_API_C(_type) extern _type + +#define nil NULL + +EXTERN_API( void ) +DebugStr(ConstStr255Param debuggerMsg); + +typedef void * QElemPtr; +typedef void * DrvQElPtr; + + +#endif + + + +/* vcbFlags bits */ +enum { + kVCBFlagsIdleFlushBit = 3, /* Set if volume should be flushed at idle time */ + kVCBFlagsIdleFlushMask = 0x0008, + kVCBFlagsHFSPlusAPIsBit = 4, /* Set if volume implements HFS Plus APIs itself (not via emu\ + lation) */ + kVCBFlagsHFSPlusAPIsMask = 0x0010, + kVCBFlagsHardwareGoneBit = 5, /* Set if disk driver returned a hardwareGoneErr to Read or W\ + rite */ + kVCBFlagsHardwareGoneMask = 0x0020, + kVCBFlagsVolumeDirtyBit = 15, /* Set if volume information has changed since the last Flush\ + Vol */ + kVCBFlagsVolumeDirtyMask = 0x8000 +}; + + +/* Disk Cache constants */ +/* + * UTGetBlock options + */ +enum { + gbDefault = 0, /* default value - read if not found */ + /* bits and masks */ + gbReadBit = 0, /* read block from disk (forced read) */ + gbReadMask = 0x0001, + gbExistBit = 1, /* get existing cache block */ + gbExistMask = 0x0002, + gbNoReadBit = 2, /* don't read block from disk if not found in cache */ + gbNoReadMask = 0x0004, + gbReleaseBit = 3, /* release block immediately after GetBlock */ + gbReleaseMask = 0x0008 +}; + + +/* + * UTReleaseBlock options + */ +enum { + rbDefault = 0, /* default value - just mark the buffer not in-use */ + /* bits and masks */ + rbWriteBit = 0, /* force write buffer to disk */ + rbWriteMask = 0x0001, + rbTrashBit = 1, /* trash buffer contents after release */ + rbTrashMask = 0x0002, + rbDirtyBit = 2, /* mark buffer dirty */ + rbDirtyMask = 0x0004, + rbFreeBit = 3, /* free the buffer (save in the hash) */ + rbFreeMask = 0x000A /* rbFreeMask (rbFreeBit + rbTrashBit) works as rbTrash on < System 7.0 RamCache; on >= System 7.0, rbfreeMask overrides rbTrash */ +}; + + +/* + * UTFlushCache options + */ +enum { + fcDefault = 0, /* default value - pass this fcOption to just flush any dirty buffers */ + /* bits and masks */ + fcTrashBit = 0, /* (don't pass this as fcOption, use only for testing bit) */ + fcTrashMask = 0x0001, /* pass this fcOption value to flush and trash cache blocks */ + fcFreeBit = 1, /* (don't pass this as fcOption, use only for testing bit) */ + fcFreeMask = 0x0003 /* pass this fcOption to flush and free cache blocks (Note: both fcTrash and fcFree bits are set) */ +}; + + +/* + * UTCacheReadIP and UTCacheWriteIP cacheOption bits and masks are the ioPosMode + * bits and masks in Files.x + */ + +/* + * Cache routine internal error codes + */ +enum { + chNoBuf = 1, /* no free cache buffers (all in use) */ + chInUse = 2, /* requested block in use */ + chnotfound = 3, /* requested block not found */ + chNotInUse = 4 /* block being released was not in use */ +}; + + +/* + * FCBRec.fcbFlags bits + */ +enum { + fcbWriteBit = 0, /* Data can be written to this file */ + fcbWriteMask = 0x01, + fcbResourceBit = 1, /* This file is a resource fork */ + fcbResourceMask = 0x02, + fcbWriteLockedBit = 2, /* File has a locked byte range */ + fcbWriteLockedMask = 0x04, + fcbSharedWriteBit = 4, /* File is open for shared write access */ + fcbSharedWriteMask = 0x10, + fcbFileLockedBit = 5, /* File is locked (write-protected) */ + fcbFileLockedMask = 0x20, + fcbOwnClumpBit = 6, /* File has clump size specified in FCB */ + fcbOwnClumpMask = 0x40, + fcbModifiedBit = 7, /* File has changed since it was last flushed */ + fcbModifiedMask = 0x80 +}; + +enum { + fcbLargeFileBit = 3, /* File may grow beyond 2GB; cache uses file blocks, not bytes */ + fcbLargeFileMask = 0x08 +}; + +#define kSectorShift 9 /* log2(kSectorSize); used for bit shifts */ + +/* + Fork Level Access Method Block get options +*/ +enum { + kGetBlock = 0x00000000, + kForceReadBlock = 0x00000002, + kGetEmptyBlock = 0x00000008, + kSkipEndianSwap = 0x00000010 +}; +typedef OptionBits GetBlockOptions; + +/* + Fork Level Access Method Block release options +*/ +enum { + kReleaseBlock = 0x00000000, + kForceWriteBlock = 0x00000001, + kMarkBlockDirty = 0x00000002, + kTrashBlock = 0x00000004 +}; +typedef OptionBits ReleaseBlockOptions; + +struct BlockDescriptor{ + void *buffer; + void *blockHeader; + UInt64 blockNum; + UInt32 blockSize; + Boolean blockReadFromDisk; + Boolean fragmented; +}; +typedef struct BlockDescriptor BlockDescriptor; +typedef BlockDescriptor *BlockDescPtr; + + + +struct SFCB; + +struct SVCB { + UInt16 vcbSignature; + UInt16 vcbVersion; + UInt32 vcbAttributes; + UInt32 vcbLastMountedVersion; + UInt32 vcbReserved1; + UInt32 vcbCreateDate; + UInt32 vcbModifyDate; + UInt32 vcbBackupDate; + UInt32 vcbCheckedDate; + UInt32 vcbFileCount; + UInt32 vcbFolderCount; + UInt32 vcbBlockSize; + UInt32 vcbTotalBlocks; + UInt32 vcbFreeBlocks; + UInt32 vcbNextAllocation; + UInt32 vcbRsrcClumpSize; + UInt32 vcbDataClumpSize; + UInt32 vcbNextCatalogID; + UInt32 vcbWriteCount; + UInt64 vcbEncodingsBitmap; + UInt8 vcbFinderInfo[32]; + + /* MDB-specific fields... */ + SInt16 vcbNmFls; /* number of files in root folder */ + SInt16 vcbNmRtDirs; /* number of directories in root folder */ + UInt16 vcbVBMSt; /* first sector of HFS volume bitmap */ + UInt16 vcbAlBlSt; /* first allocation block in HFS volume */ + UInt16 vcbVSeqNum; /* volume backup sequence number */ + UInt16 vcbReserved2; + Str27 vcbVN; /* HFS volume name */ + + /* runtime fields... */ + struct SFCB * vcbAllocationFile; + struct SFCB * vcbExtentsFile; + struct SFCB * vcbCatalogFile; + struct SFCB * vcbAttributesFile; + struct SFCB * vcbStartupFile; + + UInt32 vcbEmbeddedOffset; /* Byte offset where HFS+ starts */ + UInt16 vcbFlags; + SInt16 vcbDriveNumber; + SInt16 vcbDriverReadRef; + SInt16 vcbDriverWriteRef; + + void * vcbBlockCache; + + struct SGlob * vcbGPtr; + + /* deprecated fields... */ + SInt16 vcbVRefNum; +}; +typedef struct SVCB SVCB; + + +struct SFCB { + UInt32 fcbFileID; + UInt32 fcbFlags; + struct SVCB * fcbVolume; + void * fcbBtree; + HFSExtentRecord fcbExtents16; + HFSPlusExtentRecord fcbExtents32; + UInt32 fcbCatalogHint; + UInt32 fcbClumpSize; + UInt64 fcbLogicalSize; + UInt64 fcbPhysicalSize; + UInt32 fcbBlockSize; +}; +typedef struct SFCB SFCB; + + +extern OSErr GetDeviceSize(int driveRefNum, UInt64 *numBlocks, UInt32 *blockSize); + +extern OSErr DeviceRead(int device, int drive, void* buffer, SInt64 offset, UInt32 reqBytes, UInt32 *actBytes); + +extern OSErr DeviceWrite(int device, int drive, void* buffer, SInt64 offset, UInt32 reqBytes, UInt32 *actBytes); + + +/* + * Block Cache Interface + */ +extern void InitBlockCache(SVCB *volume); + +extern OSStatus GetVolumeBlock (SVCB *volume, UInt64 blockNum, GetBlockOptions options, + BlockDescriptor *block); + +extern OSStatus ReleaseVolumeBlock (SVCB *volume, BlockDescriptor *block, + ReleaseBlockOptions options); + +extern OSStatus GetFileBlock (SFCB *file, UInt32 blockNum, GetBlockOptions options, + BlockDescriptor *block); + +extern OSStatus ReleaseFileBlock (SFCB *file, BlockDescriptor *block, + ReleaseBlockOptions options); + +extern OSStatus SetFileBlockSize (SFCB *file, ByteCount blockSize); + + + +#if BSD + +#define AllocateMemory(size) malloc((size_t)(size)) +#define AllocateClearMemory(size) calloc(1,(size_t)(size)) +#define ReallocateMemory(ptr,newSize) SetPtrSize((void*)(ptr),(size_t)(newSize)) +#define MemorySize(ptr) malloc_size((void*)(ptr)) +#define DisposeMemory(ptr) free((void *)(ptr)) +#define CopyMemory(src,dst,len) bcopy((void*)(src),(void*)(dst),(size_t)(len)) +#define ClearMemory(start,len) bzero((void*)(start),(size_t)(len)) + +extern UInt32 TickCount(); +extern OSErr MemError(void); +extern Handle NewHandleClear(Size byteCount); +extern Handle NewHandle(Size byteCount); +extern void DisposeHandle(Handle h); +extern Size GetHandleSize(Handle h); +extern void SetHandleSize(Handle h, Size newSize); +extern OSErr PtrAndHand(const void *ptr1, Handle hand2, long size); + +#else + +#define AllocateMemory(size) NewPtr((Size)(size)) +#define AllocateClearMemory(size) NewPtrClear((Size)(size)) +#define ReallocateMemory(ptr,newSize) SetPtrSize((Ptr)(ptr),(Size)(newSize)) +#define MemorySize(ptr) GetPtrSize((Ptr)(ptr)) +#define DisposeMemory(ptr) DisposePtr((Ptr)(ptr)) +#define CopyMemory(src,dst,len) BlockMoveData((void *)(src),(void *)(dst),(Size)(len)) +void ClearMemory(void* start, long len); +#endif + + +#endif /* __SRUNTIME__ */ + + diff --git a/fsck_hfs/dfalib/SStubs.c b/fsck_hfs/dfalib/SStubs.c new file mode 100644 index 0000000..23b4ea7 --- /dev/null +++ b/fsck_hfs/dfalib/SStubs.c @@ -0,0 +1,203 @@ +/* + * Copyright (c) 1999, 2002-2003, 2005-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* SStubs.c */ + + +#include <unistd.h> +#include <stdlib.h> +#include <stdio.h> +#include <errno.h> +#include <sys/time.h> + +#include "Scavenger.h" +#include "../fsck_messages.h" + + +/* + * This is the straight GMT conversion constant: + * 00:00:00 January 1, 1970 - 00:00:00 January 1, 1904 + * (3600 * 24 * ((365 * (1970 - 1904)) + (((1970 - 1904) / 4) + 1))) + */ +#define MAC_GMT_FACTOR 2082844800UL + +/* + * GetTimeUTC - get the GMT Mac OS time (in seconds since 1/1/1904) + * + */ +UInt32 GetTimeUTC(void) +{ + struct timeval time; + struct timezone zone; + + (void) gettimeofday(&time, &zone); + + return time.tv_sec + MAC_GMT_FACTOR; +} + +/* + * GetTimeLocal - get the local Mac OS time (in seconds since 1/1/1904) + * + */ +UInt32 GetTimeLocal(Boolean forHFS) +{ + struct timeval time; + struct timezone zone; + time_t localTime; + + (void) gettimeofday(&time, &zone); + localTime = time.tv_sec + MAC_GMT_FACTOR - (zone.tz_minuteswest * 60); + + if (forHFS && zone.tz_dsttime) + localTime += 3600; + + return (UInt32)localTime; +} + + +OSErr FlushVol(ConstStr63Param volName, short vRefNum) +{ + sync(); + + return (0); +} + + +OSErr MemError() +{ + return (0); +} + +void DebugStr(ConstStr255Param debuggerMsg) +{ + /* DebugStr is only called when built with DEBUG_BUILD set */ + plog ("\t%.*s\n", debuggerMsg[0], &debuggerMsg[1]); +} + + +UInt32 TickCount() +{ + return (0); +} + + +OSErr GetVolumeFeatures( SGlobPtr GPtr ) +{ + GPtr->volumeFeatures = supportsTrashVolumeCacheFeatureMask + supportsHFSPlusVolsFeatureMask; + + return( noErr ); +} + + +Handle NewHandleClear(Size byteCount) +{ + return NewHandle(byteCount); +} + +Handle NewHandle(Size byteCount) +{ + Handle h; + Ptr p = NULL; + + if (!(h = malloc(sizeof(Ptr) + sizeof(Size)))) + return NULL; + + if (byteCount) + if (!(p = calloc(1, byteCount))) + { + free(h); + return NULL; + } + + *h = p; + + *((Size *)(h + 1)) = byteCount; + + return h; +} + +void DisposeHandle(Handle h) +{ + if (h) + { + if (*h) + free(*h); + free(h); + } +} + +Size GetHandleSize(Handle h) +{ + return h ? *((Size *)(h + 1)) : 0; +} + +void SetHandleSize(Handle h, Size newSize) +{ + Ptr p = NULL; + + if (!h) + return; + + if ((p = realloc(*h, newSize))) + { + *h = p; + *((Size *)(h + 1)) = newSize; + } +} + + +OSErr PtrAndHand(const void *ptr1, Handle hand2, long size) +{ + Ptr p = NULL; + Size old_size = 0; + + if (!hand2) + return -109; + + if (!ptr1 || size < 1) + return 0; + + old_size = *((Size *)(hand2 + 1)); + + if (!(p = realloc(*hand2, size + old_size))) + return -108; + + *hand2 = p; + *((Size *)(hand2 + 1)) = size + old_size; + + memcpy(*hand2 + old_size, ptr1, size); + + return 0; +} + + +/* deprecated call, use fsckPrint() instead */ +void WriteError( SGlobPtr GPtr, short msgID, UInt32 tarID, UInt64 tarBlock ) +{ + fsckPrint(GPtr->context, msgID); + + if ((fsckGetVerbosity(GPtr->context) > 0) && + (fsckGetOutputStyle(GPtr->context) == fsckOutputTraditional) && + (tarID | tarBlock) != 0) { + plog("(%ld, %qd)\n", (long)tarID, tarBlock); + } +} diff --git a/fsck_hfs/dfalib/SUtils.c b/fsck_hfs/dfalib/SUtils.c new file mode 100644 index 0000000..6404891 --- /dev/null +++ b/fsck_hfs/dfalib/SUtils.c @@ -0,0 +1,2735 @@ +/* + * Copyright (c) 1999-2003, 2005-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: SUtils.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1997-1999 by Apple Computer, Inc., all rights reserved. +*/ + +#include "Scavenger.h" + +static void CompareVolHeaderBTreeSizes( SGlobPtr GPtr, + VolumeObjectPtr theVOPtr, + HFSPlusVolumeHeader * thePriVHPtr, + HFSPlusVolumeHeader * theAltVHPtr ); +static void GetEmbeddedVolumeHeaders( SGlobPtr GPtr, + HFSMasterDirectoryBlock * myMDBPtr, + Boolean isPrimaryMDB ); +static OSErr GetVolumeObjectBlock( VolumeObjectPtr theVOPtr, + UInt64 theBlockNum, + BlockDescriptor * theBlockDescPtr ); +static OSErr VolumeObjectFixPrimaryBlock( void ); + +/* + * utf_encodestr + * + * Encode a UCS-2 (Unicode) string to UTF-8 + */ +int utf_encodestr(ucsp, ucslen, utf8p, utf8len, utf8plen) + const u_int16_t * ucsp; + size_t ucslen; + unsigned char * utf8p; + size_t * utf8len; + size_t utf8plen; +{ + unsigned char * bufstart; + u_int16_t ucs_ch; + size_t charcnt; + + bufstart = utf8p; + charcnt = ucslen / 2; + + while (charcnt-- > 0 && utf8plen > 0) { + ucs_ch = *ucsp++; + + if (ucs_ch < 0x0080) { + if (ucs_ch == '\0') + continue; /* skip over embedded NULLs */ + + *utf8p++ = ucs_ch; + utf8plen--; + } else if (ucs_ch < 0x800) { + if (utf8plen < 2) /* We're about to over-flow the buffer */ + break; + utf8plen -= 2; + *utf8p++ = (ucs_ch >> 6) | 0xc0; + *utf8p++ = (ucs_ch & 0x3f) | 0x80; + } else { + if (utf8plen < 3) /* We're about to over-flow the buffer */ + break; + utf8plen -= 3; + *utf8p++ = (ucs_ch >> 12) | 0xe0; + *utf8p++ = ((ucs_ch >> 6) & 0x3f) | 0x80; + *utf8p++ = ((ucs_ch) & 0x3f) | 0x80; + } + } + + *utf8len = utf8p - bufstart; + + return (0); +} + + +/* + * utf_decodestr + * + * Decode a UTF-8 string back to UCS-2 (Unicode) + * + * N.B.: ucslen on input describes the length of the buffer; + * on return, it describes how many bytes were used. + */ +int +utf_decodestr(utf8p, utf8len, ucsp, ucslen, ucsplen) + const unsigned char * utf8p; + size_t utf8len; + u_int16_t* ucsp; + size_t *ucslen; + size_t ucsplen; +{ + u_int16_t* bufstart; + u_int16_t ucs_ch; + u_int8_t byte; + + bufstart = ucsp; + + while (utf8len-- > 0 && (byte = *utf8p++) != '\0' && ucsplen > 0) { + /* check for ascii */ + if (byte < 0x80) { + *ucsp++ = byte; + ucsplen--; + continue; + } + + switch (byte & 0xf0) { + /* 2 byte sequence*/ + case 0xc0: + case 0xd0: + /* extract bits 6 - 10 from first byte */ + ucs_ch = (byte & 0x1F) << 6; + if (ucs_ch < 0x0080) + return (-1); /* seq not minimal */ + break; + /* 3 byte sequence*/ + case 0xe0: + /* extract bits 12 - 15 from first byte */ + ucs_ch = (byte & 0x0F) << 6; + + /* extract bits 6 - 11 from second byte */ + if (((byte = *utf8p++) & 0xc0) != 0x80) + return (-1); + + utf8len--; + ucs_ch += (byte & 0x3F); + ucs_ch <<= 6; + if (ucs_ch < 0x0800) + return (-1); /* seq not minimal */ + break; + default: + return (-1); + } + + /* extract bits 0 - 5 from final byte */ + if (((byte = *utf8p++) & 0xc0) != 0x80) + return (-1); + + utf8len--; + ucs_ch += (byte & 0x3F); + *ucsp++ = ucs_ch; + ucsplen--; + } + + *ucslen = (u_int8_t*)ucsp - (u_int8_t*)bufstart; + + return (0); +} + + +OSErr GetFBlk( SGlobPtr GPtr, SInt16 fileRefNum, SInt32 blockNumber, void **bufferH ); + + +UInt32 gDFAStage; + +UInt32 GetDFAStage( void ) +{ + return (gDFAStage); +} + +void SetDFAStage( UInt32 stage ) +{ + gDFAStage = stage; +} + + +/*------------------------------------------------------------------------------ + +Routine: RcdError + +Function: Record errors detetected by scavenging operation. + +Input: GPtr - pointer to scavenger global area. + ErrCode - error code + +Output: None +------------------------------------------------------------------------------*/ + +void RcdError( SGlobPtr GPtr, OSErr errorCode ) +{ + GPtr->ErrCode = errorCode; + + WriteError( GPtr, errorCode, GPtr->TarID, GPtr->TarBlock ); // log to summary window +} + + +/*------------------------------------------------------------------------------ + +Routine: IntError + +Function: Records an internal Scavenger error. + +Input: GPtr - pointer to scavenger global area. + ErrCode - internal error code + +Output: IntError - function result: + (E_IntErr for now) +------------------------------------------------------------------------------*/ + +int IntError( SGlobPtr GPtr, OSErr errorCode ) +{ + GPtr->RepLevel = repairLevelUnrepairable; + + if ( errorCode == ioErr ) // Cast I/O errors as read errors + errorCode = R_RdErr; + + if( (errorCode == R_RdErr) || (errorCode == R_WrErr) ) + { + GPtr->ErrCode = GPtr->volumeErrorCode; + GPtr->IntErr = 0; + return( errorCode ); + } + else + { + GPtr->ErrCode = R_IntErr; + GPtr->IntErr = errorCode; + return( R_IntErr ); + } + +} // End of IntError + + + +/*------------------------------------------------------------------------------ + +Routine: AllocBTN (Allocate BTree Node) + +Function: Allocates an BTree node in a Scavenger BTree bit map. + +Input: GPtr - pointer to scavenger global area. + StABN - starting allocation block number. + NmABlks - number of allocation blocks. + +Output: AllocBTN - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +int AllocBTN( SGlobPtr GPtr, SInt16 fileRefNum, UInt32 nodeNumber ) +{ + UInt16 bitPos; + unsigned char mask; + char *byteP; + BTreeControlBlock *calculatedBTCB = GetBTreeControlBlock( fileRefNum ); + + // Allocate the node + if ( calculatedBTCB->refCon == 0) + return( noErr ); + + byteP = ( (BTreeExtensionsRec*)calculatedBTCB->refCon)->BTCBMPtr + (nodeNumber / 8 ); // ptr to starting byte + bitPos = nodeNumber % 8; // bit offset + mask = ( 0x80 >> bitPos ); + if ( (*byteP & mask) != 0 ) + { + RcdError( GPtr, E_OvlNode ); + return( E_OvlNode ); // node already allocated + } + *byteP = *byteP | mask; // allocate it + calculatedBTCB->freeNodes--; // decrement free count + + return( noErr ); +} + + +OSErr GetBTreeHeader( SGlobPtr GPtr, SFCB *fcb, BTHeaderRec *header ) +{ + OSErr err; + BTHeaderRec *headerRec; + BlockDescriptor block; + + GPtr->TarBlock = kHeaderNodeNum; + + if (fcb->fcbBlockSize == 0) + (void) SetFileBlockSize(fcb, 512); + + err = GetFileBlock(fcb, kHeaderNodeNum, kGetBlock, &block); + ReturnIfError(err); + + err = hfs_swap_BTNode(&block, fcb, kSwapBTNodeHeaderRecordOnly); + if (err != noErr) + { + (void) ReleaseFileBlock(fcb, &block, kReleaseBlock | kTrashBlock); + return err; + } + + headerRec = (BTHeaderRec *)((char*)block.buffer + sizeof(BTNodeDescriptor)); + CopyMemory(headerRec, header, sizeof(BTHeaderRec)); + + err = hfs_swap_BTNode(&block, fcb, kSwapBTNodeHeaderRecordOnly); + if (err != noErr) + { + (void) ReleaseFileBlock(fcb, &block, kReleaseBlock | kTrashBlock); + return err; + } + + err = ReleaseFileBlock (fcb, &block, kReleaseBlock); + ReturnIfError(err); + + /* Validate Node Size */ + switch (header->nodeSize) { + case 512: + case 1024: + case 2048: + case 4096: + case 8192: + case 16384: + case 32768: + break; + + default: + RcdError( GPtr, E_InvalidNodeSize ); + err = E_InvalidNodeSize; + } + + return( err ); +} + + +/*------------------------------------------------------------------------------ + +Routine: IsDuplicateRepairOrder + +Function: Search a duplicate repair order node in the GPtr->MinorRepairP + list. This function traverses the entire list of minor + repairs, and compares the following fields to determine a + match - type, forkType, correct, incorrect, maskBit, hint, + and parentID. + +Input: GPtr - scavenger globals + orig - repair order to search and compare + +Output: 0 - no duplicate was found, + 1 - duplicate was found. +------------------------------------------------------------------------------*/ +int IsDuplicateRepairOrder(SGlobPtr GPtr, RepairOrderPtr orig) +{ + RepairOrderPtr cur; + int retval = 0; + + cur = GPtr->MinorRepairsP; + while (cur) { + if (cur != orig) { + /* If all these values match, this is a duplicate */ + if ((orig->type == cur->type) && + (orig->correct == cur->correct) && + (orig->incorrect == cur->incorrect) && + (orig->parid == cur->parid) && + (orig->forkType == cur->forkType) && + (orig->maskBit == cur->maskBit) && + (orig->hint == cur->hint)) { + retval = 1; + break; + } + } + cur = cur->link; + } + + return retval; +} + +/*------------------------------------------------------------------------------ + +Routine: DeleteRepairOrder + +Function: Deletes the minor repair order that matches the repair order + provided from the list. This function should be called when + a duplicate repair order is detected. + +Input: GPtr - scavenger globals + orig - repair order to remove + +Output: Nothing +------------------------------------------------------------------------------*/ +void DeleteRepairOrder(SGlobPtr GPtr, RepairOrderPtr orig) +{ + RepairOrderPtr cur; + RepairOrderPtr prev = NULL; + + cur = GPtr->MinorRepairsP; + while (cur) { + if (cur == orig) { + if (prev) { + prev->link = cur->link; + } + if (cur == GPtr->MinorRepairsP) { + GPtr->MinorRepairsP = cur->link; + } + DisposeMemory(cur); + } + prev = cur; + cur = cur->link; + } + + return; +} + + +/*------------------------------------------------------------------------------ + +Routine: Alloc[Minor/Major]RepairOrder + +Function: Allocate a repair order node and link into the 'GPtr->RepairXxxxxP" list. + These are descriptions of minor/major repairs that need to be performed; + they are compiled during verification, and executed during minor/major repair. + +Input: GPtr - scavenger globals + n - number of extra bytes needed, in addition to standard node size. + +Output: Ptr to node, or NULL if out of memory or other error. +------------------------------------------------------------------------------*/ + +RepairOrderPtr AllocMinorRepairOrder( SGlobPtr GPtr, size_t n ) /* #extra bytes needed */ +{ + RepairOrderPtr p; // the node we allocate + + n += sizeof( RepairOrder ); // add in size of basic node + + p = (RepairOrderPtr) AllocateClearMemory( n ); // get the node + + if ( p != NULL ) // if we got one... + { + p->link = GPtr->MinorRepairsP; // then link into list of repairs + GPtr->MinorRepairsP = p; + } + else if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\t%s - AllocateClearMemory failed to allocate %d bytes \n", __FUNCTION__, n); + + if ( GPtr->RepLevel == repairLevelNoProblemsFound ) + GPtr->RepLevel = repairLevelVolumeRecoverable; + + return( p ); // return ptr to node +} + + + +void InvalidateCalculatedVolumeBitMap( SGlobPtr GPtr ) +{ + +} + + + +//------------------------------------------------------------------------------ +// Routine: GetVolumeFeatures +// +// Function: Sets up some OS and volume specific flags +// +// Input: GPtr->DrvNum The volume to check +// +// Output: GPtr->volumeFeatures Bit vector +// GPtr->realVCB Real in-memory vcb +//------------------------------------------------------------------------------ + +#if !BSD +OSErr GetVolumeFeatures( SGlobPtr GPtr ) +{ + OSErr err; + HParamBlockRec pb; + GetVolParmsInfoBuffer buffer; + long response; + + GPtr->volumeFeatures = 0; // Initialize to zero + + // Get the "real" vcb + err = GetVCBDriveNum( &GPtr->realVCB, GPtr->DrvNum ); + ReturnIfError( err ); + + if ( GPtr->realVCB != nil ) + { + GPtr->volumeFeatures |= volumeIsMountedMask; + + pb.ioParam.ioNamePtr = nil; + pb.ioParam.ioVRefNum = GPtr->realVCB->vcbVRefNum; + pb.ioParam.ioBuffer = (Ptr) &buffer; + pb.ioParam.ioReqCount = sizeof( buffer ); + + if ( PBHGetVolParms( &pb, false ) == noErr ) + { + if ( buffer.vMAttrib & (1 << bSupportsTrashVolumeCache) ) + GPtr->volumeFeatures |= supportsTrashVolumeCacheFeatureMask; + } + } + // Check if the running system is HFS+ savy + err = Gestalt( gestaltFSAttr, &response ); + ReturnIfError( err ); + if ( (response & (1 << gestaltFSSupportsHFSPlusVols)) != 0 ) + GPtr->volumeFeatures |= supportsHFSPlusVolsFeatureMask; + + return( noErr ); +} +#endif + + + +/*------------------------------------------------------------------------------- +Routine: ClearMemory - clear a block of memory + +-------------------------------------------------------------------------------*/ +#if !BSD +void ClearMemory( void* start, UInt32 length ) +{ + UInt32 zero = 0; + UInt32* dataPtr; + UInt8* bytePtr; + UInt32 fragCount; // serves as both a length and quadlong count + // for the beginning and main fragment + + if ( length == 0 ) + return; + + // is request less than 4 bytes? + if ( length < 4 ) // length = 1,2 or 3 + { + bytePtr = (UInt8 *) start; + + do + { + *bytePtr++ = zero; // clear one byte at a time + } + while ( --length ); + + return; + } + + // are we aligned on an odd boundry? + fragCount = (UInt32) start & 3; + + if ( fragCount ) // fragCount = 1,2 or 3 + { + bytePtr = (UInt8 *) start; + + do + { + *bytePtr++ = zero; // clear one byte at a time + ++fragCount; + --length; + } + while ( (fragCount < 4) && (length > 0) ); + + if ( length == 0 ) + return; + + dataPtr = (UInt32*) (((UInt32) start & 0xFFFFFFFC) + 4); // make it long word aligned + } + else + { + dataPtr = (UInt32*) ((UInt32) start & 0xFFFFFFFC); // make it long word aligned + } + + // At this point dataPtr is long aligned + + // are there odd bytes to copy? + fragCount = length & 3; + + if ( fragCount ) + { + bytePtr = (UInt8 *) ((UInt32) dataPtr + (UInt32) length - 1); // point to last byte + + length -= fragCount; // adjust remaining length + + do + { + *bytePtr-- = zero; // clear one byte at a time + } + while ( --fragCount ); + + if ( length == 0 ) + return; + } + + // At this point length is a multiple of 4 + + #if DEBUG_BUILD + if ( length < 4 ) + DebugStr("\p ClearMemory: length < 4"); + #endif + + // fix up beginning to get us on a 64 byte boundary + fragCount = length & (64-1); + + #if DEBUG_BUILD + if ( fragCount < 4 && fragCount > 0 ) + DebugStr("\p ClearMemory: fragCount < 4"); + #endif + + if ( fragCount ) + { + length -= fragCount; // subtract fragment from length now + fragCount >>= 2; // divide by 4 to get a count, for DBRA loop + do + { + // clear 4 bytes at a time... + *dataPtr++ = zero; + } + while (--fragCount); + } + + // Are we finished yet? + if ( length == 0 ) + return; + + // Time to turn on the fire hose + length >>= 6; // divide by 64 to get count + do + { + // spray 64 bytes at a time... + *dataPtr++ = zero; *dataPtr++ = zero; *dataPtr++ = zero; *dataPtr++ = zero; + *dataPtr++ = zero; *dataPtr++ = zero; *dataPtr++ = zero; *dataPtr++ = zero; + *dataPtr++ = zero; *dataPtr++ = zero; *dataPtr++ = zero; *dataPtr++ = zero; + *dataPtr++ = zero; *dataPtr++ = zero; *dataPtr++ = zero; *dataPtr++ = zero; + } + while (--length); +} +#endif + + + +void +CopyCatalogName(const CatalogName *srcName, CatalogName *dstName, Boolean isHFSPLus) +{ + size_t length; + + if ( srcName == NULL ) + { + if ( dstName != NULL ) + dstName->ustr.length = 0; // set length byte to zero (works for both unicode and pascal) + return; + } + + if (isHFSPLus) + length = sizeof(UniChar) * (srcName->ustr.length + 1); + else + length = sizeof(UInt8) + srcName->pstr[0]; + + if ( length > 1 ) + CopyMemory(srcName, dstName, length); + else + dstName->ustr.length = 0; // set length byte to zero (works for both unicode and pascal) +} + + +UInt32 +CatalogNameLength(const CatalogName *name, Boolean isHFSPlus) +{ + if (isHFSPlus) + return name->ustr.length; + else + return name->pstr[0]; +} + + +UInt32 CatalogNameSize( const CatalogName *name, Boolean isHFSPlus) +{ + UInt32 length = CatalogNameLength( name, isHFSPlus ); + + if ( isHFSPlus ) + length *= sizeof(UniChar); + + return( length ); +} + + +//****************************************************************************** +// Routine: BuildCatalogKey +// +// Function: Constructs a catalog key record (ckr) given the parent +// folder ID and CName. Works for both classic and extended +// HFS volumes. +// +//****************************************************************************** + +void +BuildCatalogKey(HFSCatalogNodeID parentID, const CatalogName *cName, Boolean isHFSPlus, CatalogKey *key) +{ + if ( isHFSPlus ) + { + key->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength; // initial key length (4 + 2) + key->hfsPlus.parentID = parentID; // set parent ID + key->hfsPlus.nodeName.length = 0; // null CName length + if ( cName != NULL ) + { + CopyCatalogName(cName, (CatalogName *) &key->hfsPlus.nodeName, isHFSPlus); + key->hfsPlus.keyLength += sizeof(UniChar) * cName->ustr.length; // add CName size to key length + } + } + else + { + key->hfs.keyLength = kHFSCatalogKeyMinimumLength; // initial key length (1 + 4 + 1) + key->hfs.reserved = 0; // clear unused byte + key->hfs.parentID = parentID; // set parent ID + key->hfs.nodeName[0] = 0; // null CName length + if ( cName != NULL ) + { + UpdateCatalogName(cName->pstr, key->hfs.nodeName); + key->hfs.keyLength += key->hfs.nodeName[0]; // add CName size to key length + } + } +} + + +// Defined in BTreesPrivate.h, but not implemented in the BTree code? +// So... here's the implementation +SInt32 CompareKeys( BTreeControlBlockPtr btreePtr, KeyPtr searchKey, KeyPtr trialKey ) +{ + KeyCompareProcPtr compareProc = (KeyCompareProcPtr)btreePtr->keyCompareProc; + + return( compareProc(searchKey, trialKey) ); +} + + +void +UpdateCatalogName(ConstStr31Param srcName, Str31 destName) +{ + Size length = srcName[0]; + + if (length > kHFSMaxFileNameChars) + length = kHFSMaxFileNameChars; // truncate to max + + destName[0] = length; // set length byte + + CopyMemory(&srcName[1], &destName[1], length); +} + + +void +UpdateVolumeEncodings(SVCB *volume, TextEncoding encoding) +{ + UInt32 index; + + encoding &= 0x7F; + + index = MapEncodingToIndex(encoding); + + volume->vcbEncodingsBitmap |= (u_int64_t)(1ULL << index); + + // vcb should already be marked dirty +} + + +//****************************************************************************** +// Routine: VolumeObjectFixPrimaryBlock +// +// Function: Use the alternate Volume Header or Master Directory block (depending +// on the type of volume) to restore the primary block. This routine +// depends upon our intialization code to set up where are blocks are +// located. +// +// Result: 0 if all is well, noMacDskErr when we do not have a primary block +// number or whatever GetVolumeObjectAlternateBlock returns. +//****************************************************************************** + +static OSErr VolumeObjectFixPrimaryBlock( void ) +{ + OSErr err; + VolumeObjectPtr myVOPtr; + UInt64 myPrimaryBlockNum; + BlockDescriptor myPrimary; + BlockDescriptor myAlternate; + + myVOPtr = GetVolumeObjectPtr( ); + myPrimary.buffer = NULL; + myAlternate.buffer = NULL; + + GetVolumeObjectPrimaryBlockNum( &myPrimaryBlockNum ); + if ( myPrimaryBlockNum == 0 ) + return( noMacDskErr ); + + // we don't care if this is a valid primary block since we're + // about to write over it + err = GetVolumeObjectPrimaryBlock( &myPrimary ); + if ( !(err == noErr || err == badMDBErr || err == noMacDskErr) ) + goto ExitThisRoutine; + + // restore the primary block from the alternate + err = GetVolumeObjectAlternateBlock( &myAlternate ); + + // invalidate if we have not marked the alternate as OK + if ( VolumeObjectIsHFS( ) ) { + if ( (myVOPtr->flags & kVO_AltMDBOK) == 0 ) + err = badMDBErr; + } + else if ( (myVOPtr->flags & kVO_AltVHBOK) == 0 ) { + err = badMDBErr; + } + + if ( err == noErr ) { + CopyMemory( myAlternate.buffer, myPrimary.buffer, Blk_Size ); + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myPrimary, kForceWriteBlock ); + myPrimary.buffer = NULL; + if ( myVOPtr->volumeType == kHFSVolumeType ) + myVOPtr->flags |= kVO_PriMDBOK; + else + myVOPtr->flags |= kVO_PriVHBOK; + } + +ExitThisRoutine: + if ( myPrimary.buffer != NULL ) + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myPrimary, kReleaseBlock ); + if ( myAlternate.buffer != NULL ) + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myAlternate, kReleaseBlock ); + + return( err ); + +} /* VolumeObjectFixPrimaryBlock */ + + +//****************************************************************************** +// Routine: GetVolumeObjectVHBorMDB +// +// Function: Get the Volume Header block or Master Directory block (depending +// on type of volume). This will normally return the alternate, but +// it may return the primary when the alternate is damaged or cannot +// be found. +// +// Result: returns 0 when all is well. +//****************************************************************************** +OSErr GetVolumeObjectVHBorMDB( BlockDescriptor * theBlockDescPtr ) +{ + UInt64 myBlockNum; + VolumeObjectPtr myVOPtr; + OSErr err; + + myVOPtr = GetVolumeObjectPtr( ); + GetVolumeObjectBlockNum( &myBlockNum ); + + err = GetVolumeObjectBlock( myVOPtr, myBlockNum, theBlockDescPtr ); + if ( err == noErr ) + { + if ( myVOPtr->volumeType == kEmbededHFSPlusVolumeType || + myVOPtr->volumeType == kPureHFSPlusVolumeType ) + { + err = ValidVolumeHeader( (HFSPlusVolumeHeader*) theBlockDescPtr->buffer ); + } + else if ( myVOPtr->volumeType == kHFSVolumeType ) + { + HFSMasterDirectoryBlock * myMDBPtr; + myMDBPtr = (HFSMasterDirectoryBlock *) theBlockDescPtr->buffer; + if ( myMDBPtr->drSigWord != kHFSSigWord ) + err = noMacDskErr; + } + else + err = noMacDskErr; + } + + return( err ); + +} /* GetVolumeObjectVHBorMDB */ + + +//****************************************************************************** +// Routine: GetVolumeObjectAlternateBlock +// +// Function: Get the alternate Volume Header block or Master Directory block +// (depending on type of volume). +// Result: returns 0 when all is well. +//****************************************************************************** +OSErr GetVolumeObjectAlternateBlock( BlockDescriptor * theBlockDescPtr ) +{ + UInt64 myBlockNum; + VolumeObjectPtr myVOPtr; + OSErr err; + + myVOPtr = GetVolumeObjectPtr( ); + GetVolumeObjectAlternateBlockNum( &myBlockNum ); + + err = GetVolumeObjectBlock( myVOPtr, myBlockNum, theBlockDescPtr ); + if ( err == noErr ) + { + if ( myVOPtr->volumeType == kEmbededHFSPlusVolumeType || + myVOPtr->volumeType == kPureHFSPlusVolumeType ) + { + err = ValidVolumeHeader( (HFSPlusVolumeHeader*) theBlockDescPtr->buffer ); + } + else if ( myVOPtr->volumeType == kHFSVolumeType ) + { + HFSMasterDirectoryBlock * myMDBPtr; + myMDBPtr = (HFSMasterDirectoryBlock *) theBlockDescPtr->buffer; + if ( myMDBPtr->drSigWord != kHFSSigWord ) + err = noMacDskErr; + } + else + err = noMacDskErr; + } + + return( err ); + +} /* GetVolumeObjectAlternateBlock */ + + +//****************************************************************************** +// Routine: GetVolumeObjectPrimaryBlock +// +// Function: Get the primary Volume Header block or Master Directory block +// (depending on type of volume). +// Result: returns 0 when all is well. +//****************************************************************************** +OSErr GetVolumeObjectPrimaryBlock( BlockDescriptor * theBlockDescPtr ) +{ + UInt64 myBlockNum; + VolumeObjectPtr myVOPtr; + OSErr err; + + myVOPtr = GetVolumeObjectPtr( ); + GetVolumeObjectPrimaryBlockNum( &myBlockNum ); + + err = GetVolumeObjectBlock( myVOPtr, myBlockNum, theBlockDescPtr ); + if ( err == noErr ) + { + if ( myVOPtr->volumeType == kEmbededHFSPlusVolumeType || + myVOPtr->volumeType == kPureHFSPlusVolumeType ) + { + err = ValidVolumeHeader( (HFSPlusVolumeHeader*) theBlockDescPtr->buffer ); + } + else if ( myVOPtr->volumeType == kHFSVolumeType ) + { + HFSMasterDirectoryBlock * myMDBPtr; + myMDBPtr = (HFSMasterDirectoryBlock *) theBlockDescPtr->buffer; + if ( myMDBPtr->drSigWord != kHFSSigWord ) + err = noMacDskErr; + } + else + err = noMacDskErr; + } + + return( err ); + +} /* GetVolumeObjectPrimaryBlock */ + + +//****************************************************************************** +// Routine: GetVolumeObjectVHB +// +// Function: Get the Volume Header block using either the primary or alternate +// block number as set up by InitializeVolumeObject. This will normally +// return the alternate, but it may return the primary when the +// alternate is damaged or cannot be found. +// +// Result: returns 0 when all is well or passes results of GetVolumeBlock or +// ValidVolumeHeader. +//****************************************************************************** +OSErr GetVolumeObjectVHB( BlockDescriptor * theBlockDescPtr ) +{ + UInt64 myBlockNum; + VolumeObjectPtr myVOPtr; + OSErr err; + + myVOPtr = GetVolumeObjectPtr( ); + myBlockNum = ((myVOPtr->flags & kVO_AltVHBOK) != 0) ? myVOPtr->alternateVHB : myVOPtr->primaryVHB; + err = GetVolumeObjectBlock( myVOPtr, myBlockNum, theBlockDescPtr ); + if ( err == noErr ) + err = ValidVolumeHeader( (HFSPlusVolumeHeader*) theBlockDescPtr->buffer ); + + return( err ); + +} /* GetVolumeObjectVHB */ + + +//****************************************************************************** +// Routine: GetVolumeObjectAlternateMDB +// +// Function: Get the Master Directory Block using the alternate master directory +// block number as set up by InitializeVolumeObject. +// +// Result: returns 0 when all is well. +//****************************************************************************** +OSErr GetVolumeObjectAlternateMDB( BlockDescriptor * theBlockDescPtr ) +{ + VolumeObjectPtr myVOPtr; + OSErr err; + + myVOPtr = GetVolumeObjectPtr( ); + err = GetVolumeObjectBlock( NULL, myVOPtr->alternateMDB, theBlockDescPtr ); + if ( err == noErr ) + { + HFSMasterDirectoryBlock * myMDBPtr; + myMDBPtr = (HFSMasterDirectoryBlock *) theBlockDescPtr->buffer; + if ( myMDBPtr->drSigWord != kHFSSigWord ) + err = noMacDskErr; + } + + return( err ); + +} /* GetVolumeObjectAlternateMDB */ + + +//****************************************************************************** +// Routine: GetVolumeObjectPrimaryMDB +// +// Function: Get the Master Directory Block using the primary master directory +// block number as set up by InitializeVolumeObject. +// +// Result: returns 0 when all is well. +//****************************************************************************** +OSErr GetVolumeObjectPrimaryMDB( BlockDescriptor * theBlockDescPtr ) +{ + VolumeObjectPtr myVOPtr; + OSErr err; + + myVOPtr = GetVolumeObjectPtr( ); + err = GetVolumeObjectBlock( NULL, myVOPtr->primaryMDB, theBlockDescPtr ); + if ( err == noErr ) + { + HFSMasterDirectoryBlock * myMDBPtr; + myMDBPtr = (HFSMasterDirectoryBlock *) theBlockDescPtr->buffer; + if ( myMDBPtr->drSigWord != kHFSSigWord ) + err = noMacDskErr; + } + + return( err ); + +} /* GetVolumeObjectPrimaryMDB */ + + +//****************************************************************************** +// Routine: GetVolumeObjectBlock +// +// Function: Get the Volume Header block or Master Directory block using the +// given block number. +// Result: returns 0 when all is well or passes results of GetVolumeBlock or +// ValidVolumeHeader. +//****************************************************************************** +static OSErr GetVolumeObjectBlock( VolumeObjectPtr theVOPtr, + UInt64 theBlockNum, + BlockDescriptor * theBlockDescPtr ) +{ + OSErr err; + + if ( theVOPtr == NULL ) + theVOPtr = GetVolumeObjectPtr( ); + + err = GetVolumeBlock( theVOPtr->vcbPtr, theBlockNum, kGetBlock, theBlockDescPtr ); + + return( err ); + +} /* GetVolumeObjectBlock */ + + +//****************************************************************************** +// Routine: GetVolumeObjectBlockNum +// +// Function: Extract the appropriate block number for the volume header or +// master directory (depanding on volume type) from the VolumeObject. +// NOTE - this routine may return the primary or alternate block +// depending on which one is valid. Preference is always given to +// the alternate. +// +// Result: returns block number of MDB or VHB or 0 if none are valid or +// if volume type is unknown. +//****************************************************************************** +void GetVolumeObjectBlockNum( UInt64 * theBlockNumPtr ) +{ + VolumeObjectPtr myVOPtr; + + myVOPtr = GetVolumeObjectPtr( ); + *theBlockNumPtr = 0; // default to none + + // NOTE - we use alternate volume header or master directory + // block before the primary because it is less likely to be damaged. + if ( myVOPtr->volumeType == kEmbededHFSPlusVolumeType || + myVOPtr->volumeType == kPureHFSPlusVolumeType ) { + if ( (myVOPtr->flags & kVO_AltVHBOK) != 0 ) + *theBlockNumPtr = myVOPtr->alternateVHB; + else + *theBlockNumPtr = myVOPtr->primaryVHB; + } + else if ( myVOPtr->volumeType == kHFSVolumeType ) { + if ( (myVOPtr->flags & kVO_AltMDBOK) != 0 ) + *theBlockNumPtr = myVOPtr->alternateMDB; + else + *theBlockNumPtr = myVOPtr->primaryMDB; + } + + return; + +} /* GetVolumeObjectBlockNum */ + + +//****************************************************************************** +// Routine: GetVolumeObjectAlternateBlockNum +// +// Function: Extract the alternate block number for the volume header or +// master directory (depanding on volume type) from the VolumeObject. +// +// Result: returns block number of alternate MDB or VHB or 0 if none are +// valid or if volume type is unknown. +//****************************************************************************** +void GetVolumeObjectAlternateBlockNum( UInt64 * theBlockNumPtr ) +{ + VolumeObjectPtr myVOPtr; + + myVOPtr = GetVolumeObjectPtr( ); + *theBlockNumPtr = 0; // default to none + + if ( myVOPtr->volumeType == kEmbededHFSPlusVolumeType || + myVOPtr->volumeType == kPureHFSPlusVolumeType ) { + *theBlockNumPtr = myVOPtr->alternateVHB; + } + else if ( myVOPtr->volumeType == kHFSVolumeType ) { + *theBlockNumPtr = myVOPtr->alternateMDB; + } + + return; + +} /* GetVolumeObjectAlternateBlockNum */ + + +//****************************************************************************** +// Routine: GetVolumeObjectPrimaryBlockNum +// +// Function: Extract the primary block number for the volume header or +// master directory (depanding on volume type) from the VolumeObject. +// +// Result: returns block number of primary MDB or VHB or 0 if none are valid +// or if volume type is unknown. +//****************************************************************************** +void GetVolumeObjectPrimaryBlockNum( UInt64 * theBlockNumPtr ) +{ + VolumeObjectPtr myVOPtr; + + myVOPtr = GetVolumeObjectPtr( ); + *theBlockNumPtr = 0; // default to none + + if ( myVOPtr->volumeType == kEmbededHFSPlusVolumeType || + myVOPtr->volumeType == kPureHFSPlusVolumeType ) { + *theBlockNumPtr = myVOPtr->primaryVHB; + } + else if ( myVOPtr->volumeType == kHFSVolumeType ) { + *theBlockNumPtr = myVOPtr->primaryMDB; + } + + return; + +} /* GetVolumeObjectPrimaryBlockNum */ + + +//****************************************************************************** +// Routine: InitializeVolumeObject +// +// Function: Locate volume headers and / or master directory blocks for this +// volume and fill where they are located on the volume and the type +// of volume we are dealing with. We have three types of HFS volumes: +// Â¥ HFS - standard (old format) where primary MDB is 2nd block into +// the volume and alternate MDB is 2nd to last block on the volume. +// Â¥ pure HFS+ - where primary volume header is 2nd block into +// the volume and alternate volume header is 2nd to last block on +// the volume. +// Â¥ wrapped HFS+ - where primary MDB is 2nd block into the volume and +// alternate MDB is 2nd to last block on the volume. The embedded +// HFS+ volume header locations are calculated from drEmbedExtent +// (in the MDB). +// +// Result: returns nothing. Will fill in SGlob.VolumeObject data +//****************************************************************************** +void InitializeVolumeObject( SGlobPtr GPtr ) +{ + OSErr err; + HFSMasterDirectoryBlock * myMDBPtr; + HFSPlusVolumeHeader * myVHPtr; + VolumeObjectPtr myVOPtr; + HFSPlusVolumeHeader myPriVolHeader; + BlockDescriptor myBlockDescriptor; + + myBlockDescriptor.buffer = NULL; + myVOPtr = GetVolumeObjectPtr( ); + myVOPtr->flags |= kVO_Inited; + myVOPtr->vcbPtr = GPtr->calculatedVCB; + + // Determine volume size in sectors + err = GetDeviceSize( GPtr->calculatedVCB->vcbDriveNumber, + &myVOPtr->totalDeviceSectors, + &myVOPtr->sectorSize ); + if ( (myVOPtr->totalDeviceSectors < 3) || (err != noErr) ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog("\tinvalid device information for volume - total sectors = %qd sector size = %d \n", + myVOPtr->totalDeviceSectors, myVOPtr->sectorSize); + } + goto ExitRoutine; + } + + // get the primary volume header or master directory block (depending on volume type) + // should always be block 2 (relative to 0) into the volume. + err = GetVolumeObjectBlock( myVOPtr, MDB_BlkN, &myBlockDescriptor ); + if ( err == noErr ) { + myMDBPtr = (HFSMasterDirectoryBlock *) myBlockDescriptor.buffer; + if ( myMDBPtr->drSigWord == kHFSPlusSigWord || myMDBPtr->drSigWord == kHFSXSigWord) { + myVHPtr = (HFSPlusVolumeHeader *) myMDBPtr; + + myVOPtr->primaryVHB = MDB_BlkN; // save location + myVOPtr->alternateVHB = myVOPtr->totalDeviceSectors - 2; // save location + err = ValidVolumeHeader( myVHPtr ); + if ( err == noErr ) { + myVOPtr->flags |= kVO_PriVHBOK; + bcopy( myVHPtr, &myPriVolHeader, sizeof( *myVHPtr ) ); + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tInvalid primary volume header - error %d \n", err ); + } + } + } + else if ( myMDBPtr->drSigWord == kHFSSigWord ) { + // we could have an HFS or wrapped HFS+ volume + myVOPtr->primaryMDB = MDB_BlkN; // save location + myVOPtr->alternateMDB = myVOPtr->totalDeviceSectors - 2; // save location + myVOPtr->flags |= kVO_PriMDBOK; + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tBlock %d is not an MDB or Volume Header \n", MDB_BlkN ); + } + } + (void) ReleaseVolumeBlock( GPtr->calculatedVCB, &myBlockDescriptor, kReleaseBlock ); + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tcould not get volume block %d, err %d \n", MDB_BlkN, err ); + } + } + + // get the alternate volume header or master directory block (depending on volume type) + // should always be 2nd to last sector. + err = GetVolumeObjectBlock( myVOPtr, myVOPtr->totalDeviceSectors - 2, &myBlockDescriptor ); + if ( err == noErr ) { + myMDBPtr = (HFSMasterDirectoryBlock *) myBlockDescriptor.buffer; + if ( myMDBPtr->drSigWord == kHFSPlusSigWord || myMDBPtr->drSigWord == kHFSXSigWord ) { + myVHPtr = (HFSPlusVolumeHeader *) myMDBPtr; + + myVOPtr->primaryVHB = MDB_BlkN; // save location + myVOPtr->alternateVHB = myVOPtr->totalDeviceSectors - 2; // save location + err = ValidVolumeHeader( myVHPtr ); + if ( err == noErr ) { + // check to see if the primary and alternates are in sync. 3137809 + myVOPtr->flags |= kVO_AltVHBOK; + CompareVolHeaderBTreeSizes( GPtr, myVOPtr, &myPriVolHeader, myVHPtr ); + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tInvalid alternate volume header - error %d \n", err ); + } + } + } + else if ( myMDBPtr->drSigWord == kHFSSigWord ) { + myVOPtr->primaryMDB = MDB_BlkN; // save location + myVOPtr->alternateMDB = myVOPtr->totalDeviceSectors - 2; // save location + myVOPtr->flags |= kVO_AltMDBOK; + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tBlock %qd is not an MDB or Volume Header \n", myVOPtr->totalDeviceSectors - 2 ); + } + } + + (void) ReleaseVolumeBlock( GPtr->calculatedVCB, &myBlockDescriptor, kReleaseBlock ); + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tcould not get alternate volume header at %qd, err %d \n", + myVOPtr->totalDeviceSectors - 2, err ); + } + } + + // get the embedded volume header (if applicable). + if ( (myVOPtr->flags & kVO_AltMDBOK) != 0 ) { + err = GetVolumeObjectBlock( myVOPtr, myVOPtr->alternateMDB, &myBlockDescriptor ); + if ( err == noErr ) { + myMDBPtr = (HFSMasterDirectoryBlock *) myBlockDescriptor.buffer; + GetEmbeddedVolumeHeaders( GPtr, myMDBPtr, false ); + (void) ReleaseVolumeBlock( GPtr->calculatedVCB, &myBlockDescriptor, kReleaseBlock ); + } + } + + // Now we will look for embedded HFS+ volume headers using the primary MDB if + // we haven't already located them. + if ( (myVOPtr->flags & kVO_PriMDBOK) != 0 && + ((myVOPtr->flags & kVO_PriVHBOK) == 0 || (myVOPtr->flags & kVO_AltVHBOK) == 0) ) { + err = GetVolumeObjectBlock( myVOPtr, myVOPtr->primaryMDB, &myBlockDescriptor ); + if ( err == noErr ) { + myMDBPtr = (HFSMasterDirectoryBlock *) myBlockDescriptor.buffer; + GetEmbeddedVolumeHeaders( GPtr, myMDBPtr, true ); + (void) ReleaseVolumeBlock( GPtr->calculatedVCB, &myBlockDescriptor, kReleaseBlock ); + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tcould not get primary MDB at block %qd, err %d \n", myVOPtr->primaryMDB, err ); + } + } + } + +ExitRoutine: + // set the type of volume using the flags we set as we located the various header / master + // blocks. + if ( ((myVOPtr->flags & kVO_PriVHBOK) != 0 || (myVOPtr->flags & kVO_AltVHBOK) != 0) && + ((myVOPtr->flags & kVO_PriMDBOK) != 0 || (myVOPtr->flags & kVO_AltMDBOK) != 0) ) { + myVOPtr->volumeType = kEmbededHFSPlusVolumeType; + } + else if ( ((myVOPtr->flags & kVO_PriVHBOK) != 0 || (myVOPtr->flags & kVO_AltVHBOK) != 0) && + (myVOPtr->flags & kVO_PriMDBOK) == 0 && (myVOPtr->flags & kVO_AltMDBOK) == 0 ) { + myVOPtr->volumeType = kPureHFSPlusVolumeType; + } + else if ( (myVOPtr->flags & kVO_PriVHBOK) == 0 && (myVOPtr->flags & kVO_AltVHBOK) == 0 && + ((myVOPtr->flags & kVO_PriMDBOK) != 0 || (myVOPtr->flags & kVO_AltMDBOK) != 0) ) { + myVOPtr->volumeType = kHFSVolumeType; + } + else + myVOPtr->volumeType = kUnknownVolumeType; + + return; + +} /* InitializeVolumeObject */ + + +//****************************************************************************** +// Routine: PrintVolumeObject +// +// Function: Print out some helpful info about the state of our VolumeObject. +// +// Result: returns nothing. +//****************************************************************************** +void PrintVolumeObject( void ) +{ + VolumeObjectPtr myVOPtr; + + myVOPtr = GetVolumeObjectPtr( ); + + if ( myVOPtr->volumeType == kHFSVolumeType ) + plog( "\tvolume type is HFS \n" ); + else if ( myVOPtr->volumeType == kEmbededHFSPlusVolumeType ) + plog( "\tvolume type is embedded HFS+ \n" ); + else if ( myVOPtr->volumeType == kPureHFSPlusVolumeType ) + plog( "\tvolume type is pure HFS+ \n" ); + else + plog( "\tunknown volume type \n" ); + + plog( "\tprimary MDB is at block %qd 0x%02qx \n", myVOPtr->primaryMDB, myVOPtr->primaryMDB ); + plog( "\talternate MDB is at block %qd 0x%02qx \n", myVOPtr->alternateMDB, myVOPtr->alternateMDB ); + plog( "\tprimary VHB is at block %qd 0x%02qx \n", myVOPtr->primaryVHB, myVOPtr->primaryVHB ); + plog( "\talternate VHB is at block %qd 0x%02qx \n", myVOPtr->alternateVHB, myVOPtr->alternateVHB ); + plog( "\tsector size = %d 0x%02x \n", myVOPtr->sectorSize, myVOPtr->sectorSize ); + plog( "\tVolumeObject flags = 0x%02X \n", myVOPtr->flags ); + plog( "\ttotal sectors for volume = %qd 0x%02qx \n", + myVOPtr->totalDeviceSectors, myVOPtr->totalDeviceSectors ); + plog( "\ttotal sectors for embedded volume = %qd 0x%02qx \n", + myVOPtr->totalEmbeddedSectors, myVOPtr->totalEmbeddedSectors ); + + return; + +} /* PrintVolumeObject */ + + +//****************************************************************************** +// Routine: GetEmbeddedVolumeHeaders +// +// Function: Given a MDB (Master Directory Block) from an HFS volume, check +// to see if there is an embedded HFS+ volume. If we find an +// embedded HFS+ volume fill in relevant SGlob.VolumeObject data. +// +// Result: returns nothing. Will fill in VolumeObject data +//****************************************************************************** + +static void GetEmbeddedVolumeHeaders( SGlobPtr GPtr, + HFSMasterDirectoryBlock * theMDBPtr, + Boolean isPrimaryMDB ) +{ + OSErr err; + HFSPlusVolumeHeader * myVHPtr; + VolumeObjectPtr myVOPtr; + UInt64 myHFSPlusSectors; + UInt64 myPrimaryBlockNum; + UInt64 myAlternateBlockNum; + HFSPlusVolumeHeader myAltVolHeader; + BlockDescriptor myBlockDescriptor; + + myBlockDescriptor.buffer = NULL; + myVOPtr = GetVolumeObjectPtr( ); + + // NOTE - If all of the embedded volume information is zero, then assume + // this really is a plain HFS disk like it says. There could be ghost + // volume headers left over when someone reinitializes a large HFS Plus + // volume as HFS. The original embedded primary volume header and + // alternate volume header are not zeroed out. + if ( theMDBPtr->drEmbedSigWord == 0 && + theMDBPtr->drEmbedExtent.blockCount == 0 && + theMDBPtr->drEmbedExtent.startBlock == 0 ) { + goto ExitRoutine; + } + + // number of sectors in our embedded HFS+ volume + myHFSPlusSectors = (theMDBPtr->drAlBlkSiz / Blk_Size) * theMDBPtr->drEmbedExtent.blockCount; + + // offset of embedded HFS+ volume (in bytes) into HFS wrapper volume + // NOTE - UInt32 is OK since we don't support HFS Wrappers on TB volumes + myVOPtr->embeddedOffset = + (theMDBPtr->drEmbedExtent.startBlock * theMDBPtr->drAlBlkSiz) + + (theMDBPtr->drAlBlSt * Blk_Size); + + // Embedded alternate volume header is always 2nd to last sector + myAlternateBlockNum = + theMDBPtr->drAlBlSt + + ((theMDBPtr->drAlBlkSiz / Blk_Size) * theMDBPtr->drEmbedExtent.startBlock) + + myHFSPlusSectors - 2; + + // Embedded primary volume header should always be block 2 (relative to 0) + // into the embedded volume + myPrimaryBlockNum = (theMDBPtr->drEmbedExtent.startBlock * theMDBPtr->drAlBlkSiz / Blk_Size) + + theMDBPtr->drAlBlSt + 2; + + // get the embedded alternate volume header + err = GetVolumeObjectBlock( myVOPtr, myAlternateBlockNum, &myBlockDescriptor ); + if ( err == noErr ) { + myVHPtr = (HFSPlusVolumeHeader *) myBlockDescriptor.buffer; + if ( myVHPtr->signature == kHFSPlusSigWord ) { + + myVOPtr->alternateVHB = myAlternateBlockNum; // save location + myVOPtr->primaryVHB = myPrimaryBlockNum; // save location + err = ValidVolumeHeader( myVHPtr ); + if ( err == noErr ) { + myVOPtr->flags |= kVO_AltVHBOK; + myVOPtr->totalEmbeddedSectors = myHFSPlusSectors; + bcopy( myVHPtr, &myAltVolHeader, sizeof( *myVHPtr ) ); + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tInvalid embedded alternate volume header at block %qd - error %d \n", myAlternateBlockNum, err ); + } + } + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tBlock number %qd is not embedded alternate volume header \n", myAlternateBlockNum ); + } + } + (void) ReleaseVolumeBlock( GPtr->calculatedVCB, &myBlockDescriptor, kReleaseBlock ); + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tcould not get embedded alternate volume header at %qd, err %d \n", + myAlternateBlockNum, err ); + } + } + + // get the embedded primary volume header + err = GetVolumeObjectBlock( myVOPtr, myPrimaryBlockNum, &myBlockDescriptor ); + if ( err == noErr ) { + myVHPtr = (HFSPlusVolumeHeader *) myBlockDescriptor.buffer; + if ( myVHPtr->signature == kHFSPlusSigWord ) { + + myVOPtr->primaryVHB = myPrimaryBlockNum; // save location + myVOPtr->alternateVHB = myAlternateBlockNum; // save location + err = ValidVolumeHeader( myVHPtr ); + if ( err == noErr ) { + myVOPtr->flags |= kVO_PriVHBOK; + myVOPtr->totalEmbeddedSectors = myHFSPlusSectors; + + // check to see if the primary and alternates are in sync. 3137809 + CompareVolHeaderBTreeSizes( GPtr, myVOPtr, myVHPtr, &myAltVolHeader ); + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tInvalid embedded primary volume header at block %qd - error %d \n", myPrimaryBlockNum, err ); + } + } + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tBlock number %qd is not embedded primary volume header \n", myPrimaryBlockNum ); + } + } + (void) ReleaseVolumeBlock( GPtr->calculatedVCB, &myBlockDescriptor, kReleaseBlock ); + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tcould not get embedded primary volume header at %qd, err %d \n", + myPrimaryBlockNum, err ); + } + } + +ExitRoutine: + return; + +} /* GetEmbeddedVolumeHeaders */ + + +//****************************************************************************** +// Routine: CompareVolHeaderBTreeSizes +// +// Function: checks to see if the primary and alternate volume headers are in +// sync with regards to the catalog and extents btree file size. If +// we find an anomaly we will give preference to the volume header +// with the larger of the btree files since these files never shrink. +// Added for radar #3137809. +// +// Result: returns nothing. +//****************************************************************************** +static void CompareVolHeaderBTreeSizes( SGlobPtr GPtr, + VolumeObjectPtr theVOPtr, + HFSPlusVolumeHeader * thePriVHPtr, + HFSPlusVolumeHeader * theAltVHPtr ) +{ + int weDisagree; + int usePrimary; + int useAlternate; + + weDisagree = usePrimary = useAlternate = 0; + + // we only check if both volume headers appear to be OK + if ( (theVOPtr->flags & kVO_PriVHBOK) == 0 || (theVOPtr->flags & kVO_AltVHBOK) == 0 ) + return; + + if ( thePriVHPtr->catalogFile.totalBlocks != theAltVHPtr->catalogFile.totalBlocks ) { + // only continue if the B*Tree files both start at the same block number + if ( thePriVHPtr->catalogFile.extents[0].startBlock == theAltVHPtr->catalogFile.extents[0].startBlock ) { + weDisagree = 1; + if ( thePriVHPtr->catalogFile.totalBlocks > theAltVHPtr->catalogFile.totalBlocks ) + usePrimary = 1; + else + useAlternate = 1; + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tvolume headers disagree on catalog file total blocks - primary %d alternate %d \n", + thePriVHPtr->catalogFile.totalBlocks, theAltVHPtr->catalogFile.totalBlocks ); + } + } + } + + if ( thePriVHPtr->extentsFile.totalBlocks != theAltVHPtr->extentsFile.totalBlocks ) { + // only continue if the B*Tree files both start at the same block number + if ( thePriVHPtr->extentsFile.extents[0].startBlock == theAltVHPtr->extentsFile.extents[0].startBlock ) { + weDisagree = 1; + if ( thePriVHPtr->extentsFile.totalBlocks > theAltVHPtr->extentsFile.totalBlocks ) + usePrimary = 1; + else + useAlternate = 1; + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tvolume headers disagree on extents file total blocks - primary %d alternate %d \n", + thePriVHPtr->extentsFile.totalBlocks, theAltVHPtr->extentsFile.totalBlocks ); + } + } + } + + if ( weDisagree == 0 ) + return; + + // we have a disagreement. we resolve the issue by using the larger of the two. + if ( usePrimary == 1 && useAlternate == 1 ) { + // this should never happen, but if it does, bail without choosing a preference + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tvolume headers disagree but there is confusion on which to use \n" ); + } + return; + } + + if ( usePrimary == 1 ) { + // mark alternate as bogus + theVOPtr->flags &= ~kVO_AltVHBOK; + } + else if ( useAlternate == 1 ) { + // mark primary as bogus + theVOPtr->flags &= ~kVO_PriVHBOK; + } + + return; + +} /* CompareVolHeaderBTreeSizes */ + + +/* + * This code should be removed after debugging is completed. + */ +#include <ctype.h> + +#ifndef MIN +#define MIN(a, b) \ + ({ __typeof(a) _a = (a); __typeof(b) _b = (b); \ + (_a < _b) ? _a : _b; }) +#endif + + +enum { WIDTH = 16, }; + +static void +DumpData(const void *data, size_t len) +{ + unsigned char *base = (unsigned char*)data; + unsigned char *end = base + len; + unsigned char *cp = base; + int allzeroes = 0; + + while (cp < end) { + unsigned char *tend = MIN(end, cp + WIDTH); + unsigned char *tmp; + int i; + size_t gap = (cp + WIDTH) - tend; + + if (gap != 0 || tend == end) + allzeroes = 0; + if (allzeroes) { + for (tmp = cp; tmp < tend; tmp++) { + if (*tmp) { + allzeroes = 0; + break; + } + } + if (allzeroes == 1) { + fprintf(stderr, ". . .\n"); + allzeroes = 2; + } + if (allzeroes) { + cp += WIDTH; + continue; + } + } + allzeroes = 1; + + fprintf(stderr, "%04x: ", (int)(cp - base)); + for (i = 0, tmp = cp; tmp < tend; tmp++) { + fprintf(stderr, "%02x", *tmp); + if (++i % 2 == 0) + fprintf(stderr, " "); + if (*tmp) + allzeroes = 0; + } + for (i = gap; i >= 0; i--) { + fprintf(stderr, " "); + if (i % 2 == 1) + fprintf(stderr, " "); + } + fprintf(stderr, " |"); + for (tmp = cp; tmp < tend; tmp++) { + fprintf(stderr, "%c", isalnum(*tmp) ? *tmp : '.'); + } + for (i = 0; i < gap; i++) { + fprintf(stderr, " "); + } + fprintf(stderr, "|\n"); + cp += WIDTH; + } + + return; + +} +//****************************************************************************** +// Routine: VolumeObjectIsValid +// +// Function: determine if the volume represented by our VolumeObject is a +// valid volume type (i.e. not unknown type) +// +// Result: returns true if volume is known volume type (i.e. HFS, HFS+) +// false otherwise. +//****************************************************************************** +Boolean VolumeObjectIsValid(void) +{ + VolumeObjectPtr myVOPtr = GetVolumeObjectPtr(); + Boolean retval = false; + + /* Check if the type is unknown type */ + if (myVOPtr->volumeType == kUnknownVolumeType) { + pwarn("volumeType is %d\n", kUnknownVolumeType); + goto done; + } + + /* Check if it is HFS+ volume */ + if (VolumeObjectIsHFSPlus() == true) { + retval = true; + goto done; + } + + /* Check if it is HFS volume */ + if (VolumeObjectIsHFS() == true) { + retval = true; + goto done; + } + +done: + /* + * This code should be removed after debugging is done. + */ + if (retval == false) { + UInt64 myBlockNum; + VolumeObjectPtr myVOPtr; + BlockDescriptor theBlockDesc; + OSErr err; + + myVOPtr = GetVolumeObjectPtr(); + GetVolumeObjectBlockNum(&myBlockNum); + err = GetVolumeBlock(myVOPtr->vcbPtr, myBlockNum, kGetBlock, &theBlockDesc); + if (err != noErr) { + fprintf(stderr, "%s: Cannot GetVolumetBlock: %d\n", __FUNCTION__, err); + } else { + uint8_t *ptr = (uint8_t*)theBlockDesc.buffer; + DumpData(ptr, theBlockDesc.blockSize); + } + } + return retval; +} /* VolumeObjectIsValid */ + +//****************************************************************************** +// Routine: VolumeObjectIsHFSPlus +// +// Function: determine if the volume represented by our VolumeObject is an +// HFS+ volume (pure or embedded). +// +// Result: returns true if volume is pure HFS+ or embedded HFS+ else false. +//****************************************************************************** +Boolean VolumeObjectIsHFSPlus( void ) +{ + VolumeObjectPtr myVOPtr; + + myVOPtr = GetVolumeObjectPtr( ); + + if ( myVOPtr->volumeType == kEmbededHFSPlusVolumeType || + myVOPtr->volumeType == kPureHFSPlusVolumeType ) { + return( true ); + } + + return( false ); + +} /* VolumeObjectIsHFSPlus */ + + +//****************************************************************************** +// Routine: VolumeObjectIsHFSX +// +// Function: determine if the volume represented by our VolumeObject is an +// HFSX volume (pure or embedded) +// +// Result: returns true if volume is pure HFSX or embedded HFSX else false. +//****************************************************************************** + +Boolean VolumeObjectIsHFSX(SGlobPtr GPtr) +{ + OSErr err; + int result = false; + HFSMasterDirectoryBlock *mdbp; + SVCB *vcb = GPtr->calculatedVCB; + BlockDescriptor block; + +#define kIDSector 2 + err = GetVolumeBlock(vcb, kIDSector, kGetBlock, &block); + if (err) return (false); + + mdbp = (HFSMasterDirectoryBlock *)block.buffer; + if (mdbp->drSigWord == kHFSXSigWord) { + result = true; + } else if (mdbp->drSigWord == kHFSSigWord) { + if (mdbp->drEmbedSigWord == kHFSXSigWord) { + result = true; + } + } + + (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock); + + return( result ); +} /* VolumeObjectIsHFSX */ + +//****************************************************************************** +// Routine: VolumeObjectIsHFS +// +// Function: determine if the volume represented by our VolumeObject is an +// HFS (standard) volume. +// +// Result: returns true if HFS (standard) volume. +//****************************************************************************** +Boolean VolumeObjectIsHFS( void ) +{ + VolumeObjectPtr myVOPtr; + + myVOPtr = GetVolumeObjectPtr( ); + + if ( myVOPtr->volumeType == kHFSVolumeType ) + return( true ); + + return( false ); + +} /* VolumeObjectIsHFS */ + + +//****************************************************************************** +// Routine: VolumeObjectIsEmbeddedHFSPlus +// +// Function: determine if the volume represented by our VolumeObject is an +// embedded HFS plus volume. +// +// Result: returns true if embedded HFS plus volume. +//****************************************************************************** +Boolean VolumeObjectIsEmbeddedHFSPlus( void ) +{ + VolumeObjectPtr myVOPtr; + + myVOPtr = GetVolumeObjectPtr( ); + + if ( myVOPtr->volumeType == kEmbededHFSPlusVolumeType ) + return( true ); + + return( false ); + +} /* VolumeObjectIsEmbeddedHFSPlus */ + + +//****************************************************************************** +// Routine: VolumeObjectIsPureHFSPlus +// +// Function: determine if the volume represented by our VolumeObject is an +// pure HFS plus volume. +// +// Result: returns true if pure HFS plus volume. +//****************************************************************************** +Boolean VolumeObjectIsPureHFSPlus( void ) +{ + VolumeObjectPtr myVOPtr; + + myVOPtr = GetVolumeObjectPtr( ); + + if ( myVOPtr->volumeType == kPureHFSPlusVolumeType ) + return( true ); + + return( false ); + +} /* VolumeObjectIsPureHFSPlus */ + + +//****************************************************************************** +// Routine: GetVolumeObjectPtr +// +// Function: Accessor routine to get a pointer to our VolumeObject structure. +// +// Result: returns pointer to our VolumeObject. +//****************************************************************************** +VolumeObjectPtr GetVolumeObjectPtr( void ) +{ + static VolumeObject myVolumeObject; + static int myInited = 0; + + if ( myInited == 0 ) { + myInited++; + bzero( &myVolumeObject, sizeof(myVolumeObject) ); + } + + return( &myVolumeObject ); + +} /* GetVolumeObjectPtr */ + + +//****************************************************************************** +// Routine: CheckEmbeddedVolInfoInMDBs +// +// Function: Check the primary and alternate MDB to see if the embedded volume +// information (drEmbedSigWord and drEmbedExtent) match. +// +// Result: NA +//****************************************************************************** +void CheckEmbeddedVolInfoInMDBs( SGlobPtr GPtr ) +{ + OSErr err; + Boolean primaryIsDamaged = false; + Boolean alternateIsDamaged = false; + VolumeObjectPtr myVOPtr; + HFSMasterDirectoryBlock * myPriMDBPtr; + HFSMasterDirectoryBlock * myAltMDBPtr; + UInt64 myOffset; + UInt64 mySectors; + BlockDescriptor myPrimary; + BlockDescriptor myAlternate; + + myVOPtr = GetVolumeObjectPtr( ); + myPrimary.buffer = NULL; + myAlternate.buffer = NULL; + + // we only check this if primary and alternate are OK at this point. OK means + // that the primary and alternate MDBs have the correct signature and at least + // one of them points to a valid embedded HFS+ volume. + if ( VolumeObjectIsEmbeddedHFSPlus( ) == false || + (myVOPtr->flags & kVO_PriMDBOK) == 0 || (myVOPtr->flags & kVO_AltMDBOK) == 0 ) + return; + + err = GetVolumeObjectPrimaryMDB( &myPrimary ); + if ( err != noErr ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tcould not get primary MDB \n" ); + } + goto ExitThisRoutine; + } + myPriMDBPtr = (HFSMasterDirectoryBlock *) myPrimary.buffer; + err = GetVolumeObjectAlternateMDB( &myAlternate ); + if ( err != noErr ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\tcould not get alternate MDB \n" ); + } + goto ExitThisRoutine; + } + myAltMDBPtr = (HFSMasterDirectoryBlock *) myAlternate.buffer; + + // bail if everything looks good. NOTE - we can bail if drEmbedExtent info + // is the same in the primary and alternate MDB because we know one of them is + // valid (or VolumeObjectIsEmbeddedHFSPlus would be false and we would not be + // here). + if ( myPriMDBPtr->drEmbedSigWord == kHFSPlusSigWord && + myAltMDBPtr->drEmbedSigWord == kHFSPlusSigWord && + myPriMDBPtr->drEmbedExtent.blockCount == myAltMDBPtr->drEmbedExtent.blockCount && + myPriMDBPtr->drEmbedExtent.startBlock == myAltMDBPtr->drEmbedExtent.startBlock ) + goto ExitThisRoutine; + + // we know that VolumeObject.embeddedOffset and VolumeObject.totalEmbeddedSectors + // are correct so we will verify the info in each MDB calculates to these values. + myOffset = (myPriMDBPtr->drEmbedExtent.startBlock * myPriMDBPtr->drAlBlkSiz) + + (myPriMDBPtr->drAlBlSt * Blk_Size); + mySectors = (myPriMDBPtr->drAlBlkSiz / Blk_Size) * myPriMDBPtr->drEmbedExtent.blockCount; + + if ( myOffset != myVOPtr->embeddedOffset || mySectors != myVOPtr->totalEmbeddedSectors ) + primaryIsDamaged = true; + + myOffset = (myAltMDBPtr->drEmbedExtent.startBlock * myAltMDBPtr->drAlBlkSiz) + + (myAltMDBPtr->drAlBlSt * Blk_Size); + mySectors = (myAltMDBPtr->drAlBlkSiz / Blk_Size) * myAltMDBPtr->drEmbedExtent.blockCount; + + if ( myOffset != myVOPtr->embeddedOffset || mySectors != myVOPtr->totalEmbeddedSectors ) + alternateIsDamaged = true; + + // now check drEmbedSigWord if everything else is OK + if ( primaryIsDamaged == false && alternateIsDamaged == false ) { + if ( myPriMDBPtr->drEmbedSigWord != kHFSPlusSigWord ) + primaryIsDamaged = true; + else if ( myAltMDBPtr->drEmbedSigWord != kHFSPlusSigWord ) + alternateIsDamaged = true; + } + + if ( primaryIsDamaged || alternateIsDamaged ) { + GPtr->VIStat |= S_WMDB; + WriteError( GPtr, E_MDBDamaged, 7, 0 ); + if ( primaryIsDamaged ) { + myVOPtr->flags &= ~kVO_PriMDBOK; // mark the primary MDB as damaged + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid primary wrapper MDB \n"); + } + else { + myVOPtr->flags &= ~kVO_AltMDBOK; // mark the alternate MDB as damaged + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid alternate wrapper MDB \n"); + } + } + +ExitThisRoutine: + if ( myPrimary.buffer != NULL ) + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myPrimary, kReleaseBlock ); + if ( myAlternate.buffer != NULL ) + (void) ReleaseVolumeBlock( myVOPtr->vcbPtr, &myAlternate, kReleaseBlock ); + + return; + +} /* CheckEmbeddedVolInfoInMDBs */ + + +//****************************************************************************** +// Routine: ValidVolumeHeader +// +// Function: Run some sanity checks to make sure the HFSPlusVolumeHeader is valid +// +// Result: error +//****************************************************************************** +OSErr ValidVolumeHeader( HFSPlusVolumeHeader *volumeHeader ) +{ + OSErr err; + + if ((volumeHeader->signature == kHFSPlusSigWord && volumeHeader->version == kHFSPlusVersion) || + (volumeHeader->signature == kHFSXSigWord && volumeHeader->version == kHFSXVersion)) + { + if ( (volumeHeader->blockSize != 0) && ((volumeHeader->blockSize & 0x01FF) == 0) ) // non zero multiple of 512 + err = noErr; + else + err = badMDBErr; //¥¥ I want badVolumeHeaderErr in Errors.i + } + else + { + err = noMacDskErr; + } + + return( err ); +} + + +//_______________________________________________________________________ +// +// InitBTreeHeader +// +// This routine initializes a B-Tree header. +// +// Note: Since large volumes will have bigger b-trees they need to +// have map nodes setup. +//_______________________________________________________________________ + +void InitBTreeHeader (UInt32 fileSize, UInt32 clumpSize, UInt16 nodeSize, UInt16 recordCount, UInt16 keySize, + UInt32 attributes, UInt32 *mapNodes, void *buffer) +{ + UInt32 nodeCount; + UInt32 usedNodes; + UInt32 nodeBitsInHeader; + BTHeaderRec *bth; + BTNodeDescriptor *ndp; + UInt32 *bitMapPtr; + SInt16 *offsetPtr; + + + ClearMemory(buffer, nodeSize); // start out with clean node + + nodeCount = fileSize / nodeSize; + nodeBitsInHeader = 8 * (nodeSize - sizeof(BTNodeDescriptor) - sizeof(BTHeaderRec) - kBTreeHeaderUserBytes - 4*sizeof(SInt16)); + + usedNodes = 1; // header takes up one node + *mapNodes = 0; // number of map nodes initially (0) + + + // FILL IN THE NODE DESCRIPTOR: + ndp = (BTNodeDescriptor*) buffer; // point to node descriptor + + ndp->kind = kBTHeaderNode; // this node contains the B-tree header + ndp->numRecords = 3; // there are 3 records (header, map, and user) + + if (nodeCount > nodeBitsInHeader) // do we need additional map nodes? + { + UInt32 nodeBitsInMapNode; + + nodeBitsInMapNode = 8 * (nodeSize - sizeof(BTNodeDescriptor) - 2*sizeof(SInt16) - 2); //¥¥ why (-2) at end??? + + if (recordCount > 0) // catalog B-tree? + ndp->fLink = 2; // link points to initial map node + //¥¥ Assumes all records will fit in one node. It would be better + //¥¥ to put the map node(s) first, then the records. + else + ndp->fLink = 1; // link points to initial map node + + *mapNodes = (nodeCount - nodeBitsInHeader + (nodeBitsInMapNode - 1)) / nodeBitsInMapNode; + usedNodes += *mapNodes; + } + + // FILL IN THE HEADER RECORD: + bth = (BTHeaderRec*) ((char*)buffer + sizeof(BTNodeDescriptor)); // point to header + + if (recordCount > 0) + { + ++usedNodes; // one more node will be used + + bth->treeDepth = 1; // tree depth is one level (leaf) + bth->rootNode = 1; // root node is also leaf + bth->firstLeafNode = 1; // first leaf node + bth->lastLeafNode = 1; // last leaf node + } + + bth->attributes = attributes; // flags for 16-bit key lengths, and variable sized index keys + bth->leafRecords = recordCount; // total number of data records + bth->nodeSize = nodeSize; // size of a node + bth->maxKeyLength = keySize; // maximum length of a key + bth->totalNodes = nodeCount; // total number of nodes + bth->freeNodes = nodeCount - usedNodes; // number of free nodes + bth->clumpSize = clumpSize; // +// bth->btreeType = 0; // 0 = meta data B-tree + + + // FILL IN THE MAP RECORD: + bitMapPtr = (UInt32*) ((Byte*) buffer + sizeof(BTNodeDescriptor) + sizeof(BTHeaderRec) + kBTreeHeaderUserBytes); // point to bitmap + + // MARK NODES THAT ARE IN USE: + // Note - worst case (32MB alloc blk) will have only 18 nodes in use. + *bitMapPtr = ~((UInt32) 0xFFFFFFFF >> usedNodes); + + + // PLACE RECORD OFFSETS AT THE END OF THE NODE: + offsetPtr = (SInt16*) ((Byte*) buffer + nodeSize - 4*sizeof(SInt16)); + + *offsetPtr++ = sizeof(BTNodeDescriptor) + sizeof(BTHeaderRec) + kBTreeHeaderUserBytes + nodeBitsInHeader/8; // offset to free space + *offsetPtr++ = sizeof(BTNodeDescriptor) + sizeof(BTHeaderRec) + kBTreeHeaderUserBytes; // offset to allocation map + *offsetPtr++ = sizeof(BTNodeDescriptor) + sizeof(BTHeaderRec); // offset to user space + *offsetPtr = sizeof(BTNodeDescriptor); // offset to BTH +} + +/*------------------------------------------------------------------------------ + +Routine: CalculateItemCount + +Function: determines number of items for progress feedback + +Input: vRefNum: the volume to count items + +Output: number of items + +------------------------------------------------------------------------------*/ + +void CalculateItemCount( SGlob *GPtr, UInt64 *itemCount, UInt64 *onePercent ) +{ + BTreeControlBlock *btcb; + VolumeObjectPtr myVOPtr; + UInt64 items; + UInt32 realFreeNodes; + SVCB *vcb = GPtr->calculatedVCB; + + /* each bitmap segment is an item */ + myVOPtr = GetVolumeObjectPtr( ); + items = GPtr->calculatedVCB->vcbTotalBlocks / 1024; + + // + // Items is the used node count and leaf record count for each btree... + // + + btcb = (BTreeControlBlock*) vcb->vcbCatalogFile->fcbBtree; + realFreeNodes = ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount; + items += (2 * btcb->leafRecords) + (btcb->totalNodes - realFreeNodes); + + btcb = (BTreeControlBlock*) vcb->vcbExtentsFile->fcbBtree; + realFreeNodes = ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount; + items += btcb->leafRecords + (btcb->totalNodes - realFreeNodes); + + if ( vcb->vcbAttributesFile != NULL ) + { + btcb = (BTreeControlBlock*) vcb->vcbAttributesFile->fcbBtree; + realFreeNodes = ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount; + + items += (btcb->leafRecords + (btcb->totalNodes - realFreeNodes)); + } + + *onePercent = items/ 100; + + // + // [2239291] We're calculating the progress for the wrapper and the embedded volume separately, which + // confuses the caller (since they see the progress jump to a large percentage while checking the wrapper, + // then jump to a small percentage when starting to check the embedded volume). To avoid this behavior, + // we pretend the wrapper has 100 times as many items as it really does. This means the progress will + // never exceed 1% for the wrapper. + // +/* fsck_hfs doesn't deal wih the wrapper at this time (8.29.2002) + if ( (myVOPtr->volumeType == kEmbededHFSPlusVolumeType) && (GPtr->inputFlags & examineWrapperMask) ) + items *= 100; */ + + // Add en extra Ã… 5% to smooth the progress + items += *onePercent * 5; + + *itemCount = items; +} + + +SFCB* ResolveFCB(short fileRefNum) +{ + return( (SFCB*)((unsigned long)GetFCBSPtr() + (unsigned long)fileRefNum) ); +} + + +//****************************************************************************** +// Routine: SetupFCB fills in the FCB info +// +// Returns: The filled up FCB +//****************************************************************************** +void SetupFCB( SVCB *vcb, SInt16 refNum, UInt32 fileID, UInt32 fileClumpSize ) +{ + SFCB *fcb; + + fcb = ResolveFCB(refNum); + + fcb->fcbFileID = fileID; + fcb->fcbVolume = vcb; + fcb->fcbClumpSize = fileClumpSize; +} + + +//****************************************************************************** +// +// Routine: ResolveFileRefNum +// +// Purpose: Return a file reference number for a given file control block +// pointer. +// +// Input: +// fileCtrlBlockPtr Pointer to the SFCB +// +// Output: +// result File reference number, +// or 0 if fileCtrlBlockPtr is invalid +// +pascal short ResolveFileRefNum(SFCB * fileCtrlBlockPtr) +{ + return( (unsigned long)fileCtrlBlockPtr - (unsigned long)GetFCBSPtr() ); +} + + + +Ptr gFCBSPtr; + +void SetFCBSPtr( Ptr value ) +{ + gFCBSPtr = value; +} + +Ptr GetFCBSPtr( void ) +{ + return (gFCBSPtr); +} + + +//_______________________________________________________________________ +// +// Routine: FlushVolumeControlBlock +// Arguments: SVCB *vcb +// Output: OSErr err +// +// Function: Flush volume information to either the HFSPlusVolumeHeader +// of the Master Directory Block +//_______________________________________________________________________ + +OSErr FlushVolumeControlBlock( SVCB *vcb ) +{ + OSErr err; + HFSPlusVolumeHeader *volumeHeader; + SFCB *fcb; + BlockDescriptor block; + + if ( ! IsVCBDirty( vcb ) ) // if it's not dirty + return( noErr ); + + block.buffer = NULL; + err = GetVolumeObjectPrimaryBlock( &block ); + if ( err != noErr ) + { + // attempt to fix the primary with alternate + if ( block.buffer != NULL ) { + (void) ReleaseVolumeBlock( vcb, &block, kReleaseBlock ); + block.buffer = NULL; + } + + err = VolumeObjectFixPrimaryBlock( ); + ReturnIfError( err ); + + // should be able to get it now + err = GetVolumeObjectPrimaryBlock( &block ); + ReturnIfError( err ); + } + + if ( vcb->vcbSignature == kHFSPlusSigWord ) + { + volumeHeader = (HFSPlusVolumeHeader *) block.buffer; + + // 2005507, Keep the MDB creation date and HFSPlusVolumeHeader creation date in sync. + if ( vcb->vcbEmbeddedOffset != 0 ) // It's a wrapped HFS+ volume + { + HFSMasterDirectoryBlock *mdb; + BlockDescriptor mdb_block; + + mdb_block.buffer = NULL; + err = GetVolumeObjectPrimaryMDB( &mdb_block ); + if ( err == noErr ) + { + mdb = (HFSMasterDirectoryBlock *) mdb_block.buffer; + if ( mdb->drCrDate != vcb->vcbCreateDate ) // The creation date changed + { + mdb->drCrDate = vcb->vcbCreateDate; + (void) ReleaseVolumeBlock(vcb, &mdb_block, kForceWriteBlock); + mdb_block.buffer = NULL; + } + } + if ( mdb_block.buffer != NULL ) + (void) ReleaseVolumeBlock(vcb, &mdb_block, kReleaseBlock); + } + + volumeHeader->attributes = vcb->vcbAttributes; + volumeHeader->lastMountedVersion = kFSCKMountVersion; + volumeHeader->createDate = vcb->vcbCreateDate; // NOTE: local time, not GMT! + volumeHeader->modifyDate = vcb->vcbModifyDate; + volumeHeader->backupDate = vcb->vcbBackupDate; + volumeHeader->checkedDate = vcb->vcbCheckedDate; + volumeHeader->fileCount = vcb->vcbFileCount; + volumeHeader->folderCount = vcb->vcbFolderCount; + volumeHeader->blockSize = vcb->vcbBlockSize; + volumeHeader->totalBlocks = vcb->vcbTotalBlocks; + volumeHeader->freeBlocks = vcb->vcbFreeBlocks; + volumeHeader->nextAllocation = vcb->vcbNextAllocation; + volumeHeader->rsrcClumpSize = vcb->vcbRsrcClumpSize; + volumeHeader->dataClumpSize = vcb->vcbDataClumpSize; + volumeHeader->nextCatalogID = vcb->vcbNextCatalogID; + volumeHeader->writeCount = vcb->vcbWriteCount; + volumeHeader->encodingsBitmap = vcb->vcbEncodingsBitmap; + + //¥¥Êshould we use the vcb or fcb clumpSize values ????? -djb + volumeHeader->allocationFile.clumpSize = vcb->vcbAllocationFile->fcbClumpSize; + volumeHeader->extentsFile.clumpSize = vcb->vcbExtentsFile->fcbClumpSize; + volumeHeader->catalogFile.clumpSize = vcb->vcbCatalogFile->fcbClumpSize; + + CopyMemory( vcb->vcbFinderInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo) ); + + fcb = vcb->vcbExtentsFile; + CopyMemory( fcb->fcbExtents32, volumeHeader->extentsFile.extents, sizeof(HFSPlusExtentRecord) ); + volumeHeader->extentsFile.logicalSize = fcb->fcbLogicalSize; + volumeHeader->extentsFile.totalBlocks = fcb->fcbPhysicalSize / vcb->vcbBlockSize; + + fcb = vcb->vcbCatalogFile; + CopyMemory( fcb->fcbExtents32, volumeHeader->catalogFile.extents, sizeof(HFSPlusExtentRecord) ); + volumeHeader->catalogFile.logicalSize = fcb->fcbLogicalSize; + volumeHeader->catalogFile.totalBlocks = fcb->fcbPhysicalSize / vcb->vcbBlockSize; + + fcb = vcb->vcbAllocationFile; + CopyMemory( fcb->fcbExtents32, volumeHeader->allocationFile.extents, sizeof(HFSPlusExtentRecord) ); + volumeHeader->allocationFile.logicalSize = fcb->fcbLogicalSize; + volumeHeader->allocationFile.totalBlocks = fcb->fcbPhysicalSize / vcb->vcbBlockSize; + + if (vcb->vcbAttributesFile != NULL) // Only update fields if an attributes file existed and was open + { + fcb = vcb->vcbAttributesFile; + CopyMemory( fcb->fcbExtents32, volumeHeader->attributesFile.extents, sizeof(HFSPlusExtentRecord) ); + volumeHeader->attributesFile.logicalSize = fcb->fcbLogicalSize; + volumeHeader->attributesFile.clumpSize = fcb->fcbClumpSize; + volumeHeader->attributesFile.totalBlocks = fcb->fcbPhysicalSize / vcb->vcbBlockSize; + } + } + else + { + HFSMasterDirectoryBlock *mdbP; + + mdbP = (HFSMasterDirectoryBlock *) block.buffer; + + mdbP->drCrDate = vcb->vcbCreateDate; + mdbP->drLsMod = vcb->vcbModifyDate; + mdbP->drAtrb = (UInt16)vcb->vcbAttributes; + mdbP->drClpSiz = vcb->vcbDataClumpSize; + mdbP->drNxtCNID = vcb->vcbNextCatalogID; + mdbP->drFreeBks = vcb->vcbFreeBlocks; + mdbP->drXTClpSiz = vcb->vcbExtentsFile->fcbClumpSize; + mdbP->drCTClpSiz = vcb->vcbCatalogFile->fcbClumpSize; + + mdbP->drNmFls = vcb->vcbNmFls; + mdbP->drNmRtDirs = vcb->vcbNmRtDirs; + mdbP->drFilCnt = vcb->vcbFileCount; + mdbP->drDirCnt = vcb->vcbFolderCount; + + fcb = vcb->vcbExtentsFile; + CopyMemory( fcb->fcbExtents16, mdbP->drXTExtRec, sizeof( mdbP->drXTExtRec ) ); + + fcb = vcb->vcbCatalogFile; + CopyMemory( fcb->fcbExtents16, mdbP->drCTExtRec, sizeof( mdbP->drCTExtRec ) ); + } + + //-- Write the VHB/MDB out by releasing the block dirty + if ( block.buffer != NULL ) { + err = ReleaseVolumeBlock(vcb, &block, kForceWriteBlock); + block.buffer = NULL; + } + MarkVCBClean( vcb ); + + return( err ); +} + + +//_______________________________________________________________________ +// +// Routine: FlushAlternateVolumeControlBlock +// Arguments: SVCB *vcb +// Boolean ifHFSPlus +// Output: OSErr err +// +// Function: Flush volume information to either the Alternate HFSPlusVolumeHeader or the +// Alternate Master Directory Block. Called by the BTree when the catalog +// or extent files grow. Simply BlockMoves the original to the alternate +// location. +//_______________________________________________________________________ + +OSErr FlushAlternateVolumeControlBlock( SVCB *vcb, Boolean isHFSPlus ) +{ + OSErr err; + VolumeObjectPtr myVOPtr; + UInt64 myBlockNum; + BlockDescriptor pri_block, alt_block; + + pri_block.buffer = NULL; + alt_block.buffer = NULL; + myVOPtr = GetVolumeObjectPtr( ); + + err = FlushVolumeControlBlock( vcb ); + err = GetVolumeObjectPrimaryBlock( &pri_block ); + + // invalidate if we have not marked the primary as OK + if ( VolumeObjectIsHFS( ) ) { + if ( (myVOPtr->flags & kVO_PriMDBOK) == 0 ) + err = badMDBErr; + } + else if ( (myVOPtr->flags & kVO_PriVHBOK) == 0 ) { + err = badMDBErr; + } + if ( err != noErr ) + goto ExitThisRoutine; + + GetVolumeObjectAlternateBlockNum( &myBlockNum ); + if ( myBlockNum != 0 ) { + // we don't care if this is an invalid MDB / VHB since we will write over it + err = GetVolumeObjectAlternateBlock( &alt_block ); + if ( err == noErr || err == badMDBErr || err == noMacDskErr ) { + CopyMemory( pri_block.buffer, alt_block.buffer, Blk_Size ); + (void) ReleaseVolumeBlock(vcb, &alt_block, kForceWriteBlock); + alt_block.buffer = NULL; + } + } + +ExitThisRoutine: + if ( pri_block.buffer != NULL ) + (void) ReleaseVolumeBlock( vcb, &pri_block, kReleaseBlock ); + if ( alt_block.buffer != NULL ) + (void) ReleaseVolumeBlock( vcb, &alt_block, kReleaseBlock ); + + return( err ); +} + +void +ConvertToHFSPlusExtent( const HFSExtentRecord oldExtents, HFSPlusExtentRecord newExtents) +{ + UInt16 i; + + // go backwards so we can convert in place! + + for (i = kHFSPlusExtentDensity-1; i > 2; --i) + { + newExtents[i].blockCount = 0; + newExtents[i].startBlock = 0; + } + + newExtents[2].blockCount = oldExtents[2].blockCount; + newExtents[2].startBlock = oldExtents[2].startBlock; + newExtents[1].blockCount = oldExtents[1].blockCount; + newExtents[1].startBlock = oldExtents[1].startBlock; + newExtents[0].blockCount = oldExtents[0].blockCount; + newExtents[0].startBlock = oldExtents[0].startBlock; +} + + + +OSErr CacheWriteInPlace( SVCB *vcb, UInt32 fileRefNum, HIOParam *iopb, UInt64 currentPosition, UInt32 maximumBytes, UInt32 *actualBytes ) +{ + OSErr err; + UInt64 diskBlock; + UInt32 contiguousBytes; + void* buffer; + + *actualBytes = 0; + buffer = (char*)iopb->ioBuffer + iopb->ioActCount; + + err = MapFileBlockC(vcb, ResolveFCB(fileRefNum), maximumBytes, (currentPosition >> kSectorShift), + &diskBlock, &contiguousBytes ); + if (err) + return (err); + + err = DeviceWrite(vcb->vcbDriverWriteRef, vcb->vcbDriveNumber, buffer, (diskBlock << Log2BlkLo), contiguousBytes, actualBytes); + + return( err ); +} + + +void PrintName( int theCount, const UInt8 *theNamePtr, Boolean isUnicodeString ) +{ + int myCount; + int i; + + myCount = (isUnicodeString) ? (theCount * 2) : theCount; + for ( i = 0; i < myCount; i++ ) + plog( "%02X ", *(theNamePtr + i) ); + plog( "\n" ); + +} /* PrintName */ + +/* Function: add_prime_bucket_uint32 + * + * Description: + * This function increments the prime number buckets in the prime bucket + * set based on the uint32_t number provided. This function increments + * each prime number bucket by one at an offset of the corresponding + * remainder of the division. This function is based on Chinese Remainder + * Theorem and adds the given number to the set to compare later. + * + * Input: + * 1. Corresponding prime bucket to increment. + * 2. uint32_t number to add to the set. + * + * Output: nil + */ +void add_prime_bucket_uint32(PrimeBuckets *cur, uint32_t num) +{ + int r32, r27, r25, r7, r11, r13, r17, r19, r23, r29, r31; + + if (!cur) { + return; + } + + /* Perform the necessary divisions here */ + r32 = num % 32; + r27 = num % 27; + r25 = num % 25; + r7 = num % 7; + r11 = num % 11; + r13 = num % 13; + r17 = num % 17; + r19 = num % 19; + r23 = num % 23; + r29 = num % 29; + r31 = num % 31; + + /* Update bucket for attribute bit */ + cur->n32[r32]++; + cur->n27[r27]++; + cur->n25[r25]++; + cur->n7[r7]++; + cur->n11[r11]++; + cur->n13[r13]++; + cur->n17[r17]++; + cur->n19[r19]++; + cur->n23[r23]++; + cur->n29[r29]++; + cur->n31[r31]++; + + return; +} + +/* Function: add_prime_bucket_uint64 + * + * Description: + * This function increments the prime number buckets in the prime bucket + * set based on the uint64_t number provided. This function increments + * each prime number bucket by one at an offset of the corresponding + * remainder of the division. This function is based on Chinese Remainder + * Theorem and adds the given number to the set to compare later. + * + * Input: + * 1. Corresponding prime bucket to increment. + * 2. uint64_t number to add to the set. + * + * Output: nil + */ +void add_prime_bucket_uint64(PrimeBuckets *cur, uint64_t num) +{ + size_t r32, r27, r25, r7, r11, r13, r17, r19, r23, r29, r31; + + if (!cur) { + return; + } + + /* Perform the necessary divisions here */ + r32 = num % 32; + r27 = num % 27; + r25 = num % 25; + r7 = num % 7; + r11 = num % 11; + r13 = num % 13; + r17 = num % 17; + r19 = num % 19; + r23 = num % 23; + r29 = num % 29; + r31 = num % 31; + + /* Update bucket for attribute bit */ + cur->n32[r32]++; + cur->n27[r27]++; + cur->n25[r25]++; + cur->n7[r7]++; + cur->n11[r11]++; + cur->n13[r13]++; + cur->n17[r17]++; + cur->n19[r19]++; + cur->n23[r23]++; + cur->n29[r29]++; + cur->n31[r31]++; + + return; +} + +/* Compares the two prime buckets provided. + * Returns - + * zero - If the two buckets are same. + * non-zero - If the two buckets do not match. + */ +int compare_prime_buckets(PrimeBuckets *bucket1, PrimeBuckets *bucket2) +{ + int retval = 1; + int i; + + for (i=0; i<32; i++) { + if (bucket1->n32[i] != bucket2->n32[i]) { + goto out; + } + } + + for (i=0; i<27; i++) { + if (bucket1->n27[i] != bucket2->n27[i]) { + goto out; + } + } + + for (i=0; i<25; i++) { + if (bucket1->n25[i] != bucket2->n25[i]) { + goto out; + } + } + + for (i=0; i<7; i++) { + if (bucket1->n7[i] != bucket2->n7[i]) { + goto out; + } + } + + for (i=0; i<11; i++) { + if (bucket1->n11[i] != bucket2->n11[i]) { + goto out; + } + } + + for (i=0; i<13; i++) { + if (bucket1->n13[i] != bucket2->n13[i]) { + goto out; + } + } + + for (i=0; i<17; i++) { + if (bucket1->n17[i] != bucket2->n17[i]) { + goto out; + } + } + + for (i=0; i<19; i++) { + if (bucket1->n19[i] != bucket2->n19[i]) { + goto out; + } + } + + for (i=0; i<23; i++) { + if (bucket1->n23[i] != bucket2->n23[i]) { + goto out; + } + } + + for (i=0; i<29; i++) { + if (bucket1->n29[i] != bucket2->n29[i]) { + goto out; + } + } + + for (i=0; i<31; i++) { + if (bucket1->n31[i] != bucket2->n31[i]) { + goto out; + } + } + + retval = 0; + +out: + return retval; +} + +/* Prints the prime number bucket for the passed pointer */ +void print_prime_buckets(PrimeBuckets *cur) +{ + int i; + + plog ("n32 = { "); + for (i=0; i<32; i++) { + plog ("%d,", cur->n32[i]); + } + plog ("}\n"); + + plog ("n27 = { "); + for (i=0; i<27; i++) { + plog ("%d,", cur->n27[i]); + } + plog ("}\n"); + + plog ("n25 = { "); + for (i=0; i<25; i++) { + plog ("%d,", cur->n25[i]); + } + plog ("}\n"); + + plog ("n7 = { "); + for (i=0; i<7; i++) { + plog ("%d,", cur->n7[i]); + } + plog ("}\n"); + + plog ("n11 = { "); + for (i=0; i<11; i++) { + plog ("%d,", cur->n11[i]); + } + plog ("}\n"); + + plog ("n13 = { "); + for (i=0; i<13; i++) { + plog ("%d,", cur->n13[i]); + } + plog ("}\n"); + + plog ("n17 = { "); + for (i=0; i<17; i++) { + plog ("%d,", cur->n17[i]); + } + plog ("}\n"); + + plog ("n19 = { "); + for (i=0; i<19; i++) { + plog ("%d,", cur->n19[i]); + } + plog ("}\n"); + + plog ("n23 = { "); + for (i=0; i<23; i++) { + plog ("%d,", cur->n23[i]); + } + plog ("}\n"); + + plog ("n29 = { "); + for (i=0; i<29; i++) { + plog ("%d,", cur->n29[i]); + } + plog ("}\n"); + + plog ("n31 = { "); + for (i=0; i<31; i++) { + plog ("%d,", cur->n31[i]); + } + plog ("}\n"); +} diff --git a/fsck_hfs/dfalib/SVerify1.c b/fsck_hfs/dfalib/SVerify1.c new file mode 100644 index 0000000..a1dfd62 --- /dev/null +++ b/fsck_hfs/dfalib/SVerify1.c @@ -0,0 +1,4514 @@ +/* + * Copyright (c) 1999-2009 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: SVerify1.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1997-1999 by Apple Computer, Inc., all rights reserved. + +*/ + +#include "Scavenger.h" +#include "../cache.h" +#include <stdlib.h> +#include <stddef.h> +#include <unistd.h> +#include <errno.h> +#include <fcntl.h> +#include <limits.h> + +#include <libkern/OSByteOrder.h> +#define SW16(x) OSSwapBigToHostInt16(x) +#define SW32(x) OSSwapBigToHostInt32(x) +#define SW64(x) OSSwapBigToHostInt64(x) + +extern int OpenDeviceByUUID(void *uuidp, char **nameptr); + +// internal routine prototypes + +static int RcdValErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID parid ); + +static int RcdNameLockedErr( SGlobPtr GPtr, OSErr type, UInt32 incorrect ); + +static OSErr RcdMDBEmbededVolDescriptionErr( SGlobPtr GPtr, OSErr type, HFSMasterDirectoryBlock *mdb ); + +static OSErr CheckNodesFirstOffset( SGlobPtr GPtr, BTreeControlBlock *btcb ); + +static OSErr ScavengeVolumeType( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb, UInt32 *volumeType ); +static OSErr SeekVolumeHeader( SGlobPtr GPtr, UInt64 startSector, UInt32 numSectors, UInt64 *vHSector ); + +/* overlapping extents verification functions prototype */ +static OSErr AddExtentToOverlapList( SGlobPtr GPtr, HFSCatalogNodeID fileNumber, const char *attrName, UInt32 extentStartBlock, UInt32 extentBlockCount, UInt8 forkType ); + +static Boolean ExtentInfoExists( ExtentsTable **extentsTableH, ExtentInfo *extentInfo); + +static void CheckHFSPlusExtentRecords(SGlobPtr GPtr, UInt32 fileID, const char *attrname, HFSPlusExtentRecord extent, UInt8 forkType); + +static void CheckHFSExtentRecords(SGlobPtr GPtr, UInt32 fileID, HFSExtentRecord extent, UInt8 forkType); + +static Boolean DoesOverlap(SGlobPtr GPtr, UInt32 fileID, const char *attrname, UInt32 startBlock, UInt32 blockCount, UInt8 forkType); + +static int CompareExtentFileID(const void *first, const void *second); + +/* + * Check if a volume is journaled. + * + * If journal_bit_only is true, the function only checks + * if kHFSVolumeJournaledBit is set or not. If the bit + * is set, function returns 1 otherwise 0. + * + * If journal_bit_only is false, in addition to checking + * kHFSVolumeJournaledBit, the function also checks if the + * last mounted version indicates failed journal replay, + * or runtime corruption was detected or simply the volume + * is not journaled and it was not unmounted cleanly. + * If all of the above conditions are false and the journal + * bit is set, function returns 1 to indicate that the + * volume is journaled truly otherwise returns 1 to fake + * that volume is not journaled. + * + * returns: 0 not journaled or any of the above conditions are true + * 1 journaled + * + */ +int +CheckIfJournaled(SGlobPtr GPtr, Boolean journal_bit_only) +{ +#define kIDSector 2 + + OSErr err; + int result; + HFSMasterDirectoryBlock *mdbp; + HFSPlusVolumeHeader *vhp; + SVCB *vcb = GPtr->calculatedVCB; + ReleaseBlockOptions rbOptions; + BlockDescriptor block; + + vhp = (HFSPlusVolumeHeader *) NULL; + rbOptions = kReleaseBlock; + + err = GetVolumeBlock(vcb, kIDSector, kGetBlock, &block); + if (err) return (0); + + mdbp = (HFSMasterDirectoryBlock *) block.buffer; + + if (mdbp->drSigWord == kHFSPlusSigWord || mdbp->drSigWord == kHFSXSigWord) { + vhp = (HFSPlusVolumeHeader *) block.buffer; + + } else if (mdbp->drSigWord == kHFSSigWord) { + + if (mdbp->drEmbedSigWord == kHFSPlusSigWord) { + UInt32 vhSector; + UInt32 blkSectors; + + blkSectors = mdbp->drAlBlkSiz / 512; + vhSector = mdbp->drAlBlSt; + vhSector += blkSectors * mdbp->drEmbedExtent.startBlock; + vhSector += kIDSector; + + (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock); + err = GetVolumeBlock(vcb, vhSector, kGetBlock, &block); + if (err) return (0); + + vhp = (HFSPlusVolumeHeader *) block.buffer; + mdbp = (HFSMasterDirectoryBlock *) NULL; + + } + } + + if ((vhp != NULL) && (ValidVolumeHeader(vhp) == noErr)) { + result = ((vhp->attributes & kHFSVolumeJournaledMask) != 0); + if (journal_bit_only == true) { + goto out; + } + + // even if journaling is enabled for this volume, we'll return + // false if it wasn't unmounted cleanly and it was previously + // mounted by someone that doesn't know about journaling. + // or if lastMountedVersion is kFSKMountVersion + if ( vhp->lastMountedVersion == kFSKMountVersion || + (vhp->attributes & kHFSVolumeInconsistentMask) || + ((vhp->lastMountedVersion != kHFSJMountVersion) && + (vhp->attributes & kHFSVolumeUnmountedMask) == 0)) { + result = 0; + } + } else { + result = 0; + } + +out: + (void) ReleaseVolumeBlock(vcb, &block, rbOptions); + + return (result); +} + +/* + * Get the JournalInfoBlock from a volume. + * + * It borrows code to get the volume header. Note that it + * uses the primary volume header, not the alternate one. + * It returns 0 on success, or an error value. + * If requested, it will also set the block size (as a 32-bit + * value), via bsizep -- this is useful because the journal code + * needs to know the volume blocksize, but it doesn't necessarily + * have the header. + * + * Note also that it does direct reads, rather than going through + * the cache code. This simplifies getting the JIB. + */ + +static OSErr +GetJournalInfoBlock(SGlobPtr GPtr, JournalInfoBlock *jibp, UInt32 *bsizep) +{ +#define kIDSector 2 + + OSErr err; + int result = 0; + UInt32 jiBlk = 0; + HFSMasterDirectoryBlock *mdbp; + HFSPlusVolumeHeader *vhp; + SVCB *vcb = GPtr->calculatedVCB; + ReleaseBlockOptions rbOptions; + BlockDescriptor block; + size_t blockSize = 0; + off_t embeddedOffset = 0; + + vhp = (HFSPlusVolumeHeader *) NULL; + rbOptions = kReleaseBlock; + + if (jibp == NULL) + return paramErr; + + err = GetVolumeBlock(vcb, kIDSector, kGetBlock, &block); + if (err) return (err); + + mdbp = (HFSMasterDirectoryBlock *) block.buffer; + + if (mdbp->drSigWord == kHFSPlusSigWord || mdbp->drSigWord == kHFSXSigWord) { + vhp = (HFSPlusVolumeHeader *) block.buffer; + + } else if (mdbp->drSigWord == kHFSSigWord) { + + if (mdbp->drEmbedSigWord == kHFSPlusSigWord) { + UInt32 vhSector; + UInt32 blkSectors; + + blkSectors = mdbp->drAlBlkSiz / 512; + vhSector = mdbp->drAlBlSt; + vhSector += blkSectors * mdbp->drEmbedExtent.startBlock; + vhSector += kIDSector; + + embeddedOffset = (mdbp->drEmbedExtent.startBlock * mdbp->drAlBlkSiz) + (mdbp->drAlBlSt * Blk_Size); + if (debug) + plog("Embedded offset is %lld\n", embeddedOffset); + + (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock); + err = GetVolumeBlock(vcb, vhSector, kGetBlock, &block); + if (err) return (err); + + vhp = (HFSPlusVolumeHeader *) block.buffer; + mdbp = (HFSMasterDirectoryBlock *) NULL; + + } + } + + if (vhp == NULL) { + result = paramErr; + goto out; + } + if ((err = ValidVolumeHeader(vhp)) != noErr) { + result = err; + goto out; + } + + // journalInfoBlock is not automatically swapped + jiBlk = SW32(vhp->journalInfoBlock); + blockSize = vhp->blockSize; + (void)ReleaseVolumeBlock(vcb, &block, rbOptions); + + if (jiBlk) { + int jfd = GPtr->DrvNum; + uint8_t block[blockSize]; + ssize_t nread; + + nread = pread(jfd, block, blockSize, (off_t)jiBlk * blockSize + embeddedOffset); + if (nread == blockSize) { + if (jibp) + memcpy(jibp, block, sizeof(JournalInfoBlock)); + if (bsizep) + *bsizep = blockSize; + result = 0; + } else { + if (debug) { + plog("%s: Tried to read JIB, got %zd\n", __FUNCTION__, nread); + result = EINVAL; + } + } + } + +out: + return (result); +} + +/* + * Journal checksum calculation, taken directly from TN1150. + */ +static int +calc_checksum(unsigned char *ptr, int len) +{ + int i, cksum=0; + + for(i=0; i < len; i++, ptr++) { + cksum = (cksum << 8) ^ (cksum + *ptr); + } + + return (~cksum); +} + +/* + * The journal_header structure is not defined in <hfs/hfs_format.h>; + * it's described in TN1150. It is on disk in the endian mode that was + * used to write it, so we may or may not need to swap the fields. + */ +typedef struct journal_header { + UInt32 magic; + UInt32 endian; + UInt64 start; + UInt64 end; + UInt64 size; + UInt32 blhdr_size; + UInt32 checksum; + UInt32 jhdr_size; + UInt32 sequence_num; +} journal_header; + +#define JOURNAL_HEADER_MAGIC 0x4a4e4c78 +#define ENDIAN_MAGIC 0x12345678 +#define JOURNAL_HEADER_CKSUM_SIZE (offsetof(struct journal_header, sequence_num)) + +/* + * Determine if a journal is empty. + * This code can use an in-filesystem, or external, journal. + * In general, it returns 0 if the journal exists, and appears to + * be non-empty (that is, start and end in the journal header are + * the same); it will return 1 if it exists and is empty, or if + * there was a problem getting the journal. (This behaviour was + * chosen because it mimics the existing behaviour of fsck_hfs, + * which has traditionally done nothing with the journal. Future + * versions may be more demanding.) + * + * <jp> is an OUT parameter: the contents of the structure it points + * to are filled in by this routine. (The reasoning for doing this + * is because this rountine has to open the journal info block, and read + * from the journal device, so putting this in another function was + * duplicative and error-prone. By making it a structure instead of + * discrete arguments, it can also be extended in the future if necessary.) + */ +int +IsJournalEmpty(SGlobPtr GPtr, fsckJournalInfo_t *jp) +{ + int retval = 1; + OSErr result; + OSErr err = 0; + JournalInfoBlock jib; + UInt32 bsize; + + result = GetJournalInfoBlock(GPtr, &jib, &bsize); + if (result == 0) { + /* jib is not byte swapped */ + /* If the journal needs to be initialized, it's empty. */ + if ((SW32(jib.flags) & kJIJournalNeedInitMask) == 0) { + off_t hdrOffset = SW64(jib.offset); + struct journal_header *jhdr; + uint8_t block[bsize]; + ssize_t nread; + int jfd = -1; + + /* If it's an external journal, kJIJournalInSFMask will not be set */ + if (SW32(jib.flags) & kJIJournalInFSMask) { + jfd = dup(GPtr->DrvNum); + jp->name = strdup(GPtr->deviceNode); + } else { + char **namePtr = jp ? &jp->name : NULL; + if (debug) + plog("External Journal device\n"); + jfd = OpenDeviceByUUID(&jib.ext_jnl_uuid, namePtr); + } + if (jfd == -1) { + if (debug) { + plog("Unable to get journal file descriptor, journal flags = %#x\n", SW32(jib.flags)); + } + goto out; + } + if (jp) { + jp->jnlfd = jfd; + jp->jnlOffset = SW64(jib.offset); + jp->jnlSize = SW64(jib.size); + } + + nread = pread(jfd, block, bsize, hdrOffset); + if (nread == -1) { + if (debug) { + plog("Could not read journal from descriptor %d: %s", jfd, strerror(errno)); + } + err = errno; + } else if (nread != bsize) { + if (debug) { + plog("Only read %zd bytes from journal (expected %zd)", nread, bsize); + err = EINVAL; + } + } + if (jp == NULL) + close(jfd); + /* We got the journal header, now we need to check it */ + if (err == noErr) { + int swap = 0; + UInt32 cksum = 0; + + jhdr = (struct journal_header*)block; + + if (jhdr->magic == JOURNAL_HEADER_MAGIC || + SW32(jhdr->magic) == JOURNAL_HEADER_MAGIC) { + if (jhdr->endian == ENDIAN_MAGIC) + swap = 0; + else if (SW32(jhdr->endian) == ENDIAN_MAGIC) + swap = 1; + else + swap = 2; + + if (swap != 2) { + cksum = swap ? SW32(jhdr->checksum) : jhdr->checksum; + UInt32 calc_sum; + jhdr->checksum = 0; + /* Checksum calculation needs the checksum field to be zero. */ + calc_sum = calc_checksum((unsigned char*)jhdr, JOURNAL_HEADER_CKSUM_SIZE); + /* But, for now, this is for debugging purposes only */ + if (calc_sum != cksum) { + if (debug) + plog("Journal checksum doesn't match: orig %x != calc %x\n", cksum, calc_sum); + } + /* We have a journal, we got the header, now we check the start and end */ + if (jhdr->start != jhdr->end) { + retval = 0; + if (debug) + plog("Non-empty journal: start = %lld, end = %lld\n", + swap ? SW64(jhdr->start) : jhdr->start, + swap ? SW64(jhdr->end) : jhdr->end); + } + } + } + } + } + } +out: + return retval; +} + +/* + * The functions checks whether the volume is clean or dirty. It + * also marks the volume as clean/dirty depending on the type + * of operation specified. It modifies the volume header only + * if the old values are not same as the new values. If the volume + * header is updated, it also sets the last mounted version for HFS+. + * + * Input: + * GPtr - Pointer to scavenger global area + * operation - Type of operation to perform + * kCheckVolume, // check if volume is clean/dirty + * kMarkVolumeDirty, // mark the volume dirty + * kMarkVolumeClean // mark the volume clean + * + * Output: + * modified - true if the VH/MDB was modified, otherwise false. + * Return Value - + * -1 - if the volume is not an HFS/HFS+ volume + * 0 - if the volume was dirty or marked dirty + * 1 - if the volume was clean or marked clean + * If the operation requested was to mark the volume clean/dirty, + * the return value is dependent on type of operation (described above). + */ +int CheckForClean(SGlobPtr GPtr, UInt8 operation, Boolean *modified) +{ + enum { unknownVolume = -1, cleanUnmount = 1, dirtyUnmount = 0}; + int result = unknownVolume; + Boolean update = false; + HFSMasterDirectoryBlock *mdbp; + HFSPlusVolumeHeader *vhp; + BlockDescriptor block; + ReleaseBlockOptions rbOptions; + UInt64 blockNum; + SVCB *vcb; + + *modified = false; + vcb = GPtr->calculatedVCB; + block.buffer = NULL; + rbOptions = kReleaseBlock; + + /* Get the block number for VH/MDB */ + GetVolumeObjectBlockNum(&blockNum); + if (blockNum == 0) { + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) + plog( "\t%s - unknown volume type \n", __FUNCTION__ ); + goto ExitThisRoutine; + } + + /* Get VH or MDB depending on the type of volume */ + result = GetVolumeObjectPrimaryBlock(&block); + if (result) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\t%s - could not get VHB/MDB at block %qd \n", __FUNCTION__, blockNum ); + result = unknownVolume; + goto ExitThisRoutine; + } + + result = cleanUnmount; + + if (VolumeObjectIsHFSPlus()) { + vhp = (HFSPlusVolumeHeader *) block.buffer; + + /* Check unmount bit and volume inconsistent bit */ + if (((vhp->attributes & kHFSVolumeUnmountedMask) == 0) || + (vhp->attributes & kHFSVolumeInconsistentMask)) + result = dirtyUnmount; + + /* Check last mounted version. If kFSKMountVersion, bad + * journal was encountered during mount. Force dirty volume. + */ + + if (vhp->lastMountedVersion == kFSKMountVersion) { + GPtr->JStat |= S_BadJournal; + RcdError (GPtr, E_BadJournal); + result = dirtyUnmount; + } + + if (operation == kMarkVolumeDirty) { + /* Mark volume was not unmounted cleanly */ + if (vhp->attributes & kHFSVolumeUnmountedMask) { + vhp->attributes &= ~kHFSVolumeUnmountedMask; + update = true; + } + /* Mark volume inconsistent */ + if ((vhp->attributes & kHFSVolumeInconsistentMask) == 0) { + vhp->attributes |= kHFSVolumeInconsistentMask; + update = true; + } + } else if (operation == kMarkVolumeClean) { + /* Mark volume was unmounted cleanly */ + if ((vhp->attributes & kHFSVolumeUnmountedMask) == 0) { + vhp->attributes |= kHFSVolumeUnmountedMask; + update = true; + } + /* Mark volume consistent */ + if (vhp->attributes & kHFSVolumeInconsistentMask) { + vhp->attributes &= ~kHFSVolumeInconsistentMask; + update = true; + } + } + + /* If any changes to VH, update the last mounted version */ + if (update == true) { + vhp->lastMountedVersion = kFSCKMountVersion; + } + } else if (VolumeObjectIsHFS()) { + mdbp = (HFSMasterDirectoryBlock *) block.buffer; + + /* Check unmount bit and volume inconsistent bit */ + if (((mdbp->drAtrb & kHFSVolumeUnmountedMask) == 0) || + (mdbp->drAtrb & kHFSVolumeInconsistentMask)) + result = dirtyUnmount; + + if (operation == kMarkVolumeDirty) { + /* Mark volume was not unmounted cleanly */ + if (mdbp->drAtrb & kHFSVolumeUnmountedMask) { + mdbp->drAtrb &= ~kHFSVolumeUnmountedMask; + update = true; + } + /* Mark volume inconsistent */ + if ((mdbp->drAtrb & kHFSVolumeInconsistentMask) == 0) { + mdbp->drAtrb |= kHFSVolumeInconsistentMask; + update = true; + } + } else if (operation == kMarkVolumeClean) { + /* Mark volume was unmounted cleanly */ + if ((mdbp->drAtrb & kHFSVolumeUnmountedMask) == 0) { + mdbp->drAtrb |= kHFSVolumeUnmountedMask; + update = true; + } + /* Mark volume consistent */ + if (mdbp->drAtrb & kHFSVolumeInconsistentMask) { + mdbp->drAtrb &= ~kHFSVolumeInconsistentMask; + update = true; + } + } + } + +ExitThisRoutine: + if (update == true) { + *modified = true; + rbOptions = kForceWriteBlock; + /* Set appropriate return value */ + if (operation == kMarkVolumeDirty) { + result = dirtyUnmount; + } else if (operation == kMarkVolumeClean) { + result = cleanUnmount; + } + } + if (block.buffer != NULL) + (void) ReleaseVolumeBlock(vcb, &block, rbOptions); + + return (result); +} + +/*------------------------------------------------------------------------------ + +Function: IVChk - (Initial Volume Check) + +Function: Performs an initial check of the volume to be scavenged to confirm + that the volume can be accessed and that it is a HFS/HFS+ volume. + +Input: GPtr - pointer to scavenger global area + +Output: IVChk - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ +#define kBitsPerSector 4096 + +OSErr IVChk( SGlobPtr GPtr ) +{ + OSErr err; + HFSMasterDirectoryBlock * myMDBPtr; + HFSPlusVolumeHeader * myVHBPtr; + UInt32 numABlks; + UInt32 minABlkSz; + UInt32 maxNumberOfAllocationBlocks; + UInt32 realAllocationBlockSize; + UInt32 realTotalBlocks; + UInt32 i; + BTreeControlBlock *btcb; + SVCB *vcb = GPtr->calculatedVCB; + VolumeObjectPtr myVOPtr; + UInt64 blockNum; + UInt64 totalSectors; + BlockDescriptor myBlockDescriptor; + + // Set up + GPtr->TarID = AMDB_FNum; // target = alt MDB + GPtr->TarBlock = 0; + maxNumberOfAllocationBlocks = 0xFFFFFFFF; + realAllocationBlockSize = 0; + realTotalBlocks = 0; + + myBlockDescriptor.buffer = NULL; + myVOPtr = GetVolumeObjectPtr( ); + + // check volume size + if ( myVOPtr->totalDeviceSectors < 3 ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid device information for volume - total sectors = %qd sector size = %d \n", + myVOPtr->totalDeviceSectors, myVOPtr->sectorSize); + return( 123 ); + } + + GetVolumeObjectBlockNum( &blockNum ); + if ( blockNum == 0 || myVOPtr->volumeType == kUnknownVolumeType ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\t%s - unknown volume type \n", __FUNCTION__ ); + err = R_BadSig; /* doesn't bear the HFS signature */ + goto ReleaseAndBail; + } + + // get Volume Header (HFS+) or Master Directory (HFS) block + err = GetVolumeObjectVHBorMDB( &myBlockDescriptor ); + if ( err != noErr ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err ); + goto ReleaseAndBail; + } + myMDBPtr = (HFSMasterDirectoryBlock *) myBlockDescriptor.buffer; + + // if this is an HFS (kHFSVolumeType) volume and the MDB indicates this + // might contain an embedded HFS+ volume then we need to scan + // for an embedded HFS+ volume. I'm told there were some old problems + // where we could lose track of the embedded volume. + if ( VolumeObjectIsHFS( ) && + (myMDBPtr->drEmbedSigWord != 0 || + myMDBPtr->drEmbedExtent.blockCount != 0 || + myMDBPtr->drEmbedExtent.startBlock != 0) ) { + + err = ScavengeVolumeType( GPtr, myMDBPtr, &myVOPtr->volumeType ); + if ( err == E_InvalidMDBdrAlBlSt ) + err = RcdMDBEmbededVolDescriptionErr( GPtr, E_InvalidMDBdrAlBlSt, myMDBPtr ); + + if ( VolumeObjectIsEmbeddedHFSPlus( ) ) { + // we changed volume types so let's get the VHB + (void) ReleaseVolumeBlock( vcb, &myBlockDescriptor, kReleaseBlock ); + myBlockDescriptor.buffer = NULL; + myMDBPtr = NULL; + err = GetVolumeObjectVHB( &myBlockDescriptor ); + if ( err != noErr ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err ); + WriteError( GPtr, E_InvalidVolumeHeader, 1, 0 ); + err = E_InvalidVolumeHeader; + goto ReleaseAndBail; + } + + GetVolumeObjectBlockNum( &blockNum ); // get the new Volume header block number + } + else { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err ); + WriteError( GPtr, E_InvalidVolumeHeader, 1, 0 ); + err = E_InvalidVolumeHeader; + goto ReleaseAndBail; + } + } + + totalSectors = ( VolumeObjectIsEmbeddedHFSPlus( ) ) ? myVOPtr->totalEmbeddedSectors : myVOPtr->totalDeviceSectors; + + // indicate what type of volume we are dealing with + if ( VolumeObjectIsHFSPlus( ) ) { + + myVHBPtr = (HFSPlusVolumeHeader *) myBlockDescriptor.buffer; + if (myVHBPtr->attributes & kHFSVolumeJournaledMask) { + fsckPrint(GPtr->context, hfsJournalVolCheck); + } else { + fsckPrint(GPtr->context, hfsCheckNoJnl); + } + GPtr->numExtents = kHFSPlusExtentDensity; + vcb->vcbSignature = kHFSPlusSigWord; + + // Further populate the VCB with VolumeHeader info + vcb->vcbAlBlSt = myVOPtr->embeddedOffset / 512; + vcb->vcbEmbeddedOffset = myVOPtr->embeddedOffset; + realAllocationBlockSize = myVHBPtr->blockSize; + realTotalBlocks = myVHBPtr->totalBlocks; + vcb->vcbNextCatalogID = myVHBPtr->nextCatalogID; + vcb->vcbCreateDate = myVHBPtr->createDate; + vcb->vcbAttributes = myVHBPtr->attributes & kHFSCatalogNodeIDsReused; + + if ( myVHBPtr->attributesFile.totalBlocks == 0 ) + vcb->vcbAttributesFile = NULL; /* XXX memory leak ? */ + + // Make sure the Extents B-Tree is set to use 16-bit key lengths. + // We access it before completely setting up the control block. + btcb = (BTreeControlBlock *) vcb->vcbExtentsFile->fcbBtree; + btcb->attributes |= kBTBigKeysMask; + + // catch the case where the volume allocation block count is greater than + // maximum number of device allocation blocks. - bug 2916021 + numABlks = myVOPtr->totalDeviceSectors / ( myVHBPtr->blockSize / Blk_Size ); + if ( myVHBPtr->totalBlocks > numABlks ) { + RcdError( GPtr, E_NABlks ); + err = E_NABlks; + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) { + plog( "\t%s - volume header total allocation blocks is greater than device size \n", __FUNCTION__ ); + plog( "\tvolume allocation block count %d device allocation block count %d \n", + myVHBPtr->totalBlocks, numABlks ); + } + goto ReleaseAndBail; + } + } + else if ( VolumeObjectIsHFS( ) ) { + +// fsckPrint(GPtr->context, fsckCheckingVolume); + fsckPrint(GPtr->context, hfsCheckHFS); + + GPtr->numExtents = kHFSExtentDensity; + vcb->vcbSignature = myMDBPtr->drSigWord; + maxNumberOfAllocationBlocks = 0xFFFF; + // set up next file ID, CheckBTreeKey makse sure we are under this value + vcb->vcbNextCatalogID = myMDBPtr->drNxtCNID; + vcb->vcbCreateDate = myMDBPtr->drCrDate; + + realAllocationBlockSize = myMDBPtr->drAlBlkSiz; + realTotalBlocks = myMDBPtr->drNmAlBlks; + } + + GPtr->TarBlock = blockNum; // target block + + // verify volume allocation info + // Note: i is the number of sectors per allocation block + numABlks = totalSectors; + minABlkSz = Blk_Size; // init minimum ablock size + // loop while #ablocks won't fit + for( i = 2; numABlks > maxNumberOfAllocationBlocks; i++ ) { + minABlkSz = i * Blk_Size; // jack up minimum + numABlks = totalSectors / i; // recompute #ablocks, assuming this size + } + + vcb->vcbBlockSize = realAllocationBlockSize; + numABlks = totalSectors / ( realAllocationBlockSize / Blk_Size ); + if ( VolumeObjectIsHFSPlus( ) ) { + // HFS Plus allocation block size must be power of 2 + if ( (realAllocationBlockSize < minABlkSz) || + (realAllocationBlockSize & (realAllocationBlockSize - 1)) != 0 ) + realAllocationBlockSize = 0; + } + else { + if ( (realAllocationBlockSize < minABlkSz) || + (realAllocationBlockSize > Max_ABSiz) || + ((realAllocationBlockSize % Blk_Size) != 0) ) + realAllocationBlockSize = 0; + } + + if ( realAllocationBlockSize == 0 ) { + RcdError( GPtr, E_ABlkSz ); + err = E_ABlkSz; // bad allocation block size + goto ReleaseAndBail; + } + + vcb->vcbTotalBlocks = realTotalBlocks; + vcb->vcbFreeBlocks = 0; + + // Only do these tests on HFS volumes, since they are either + // or, getting the VolumeHeader would have already failed. + if ( VolumeObjectIsHFS( ) ) { + UInt32 bitMapSizeInSectors; + + // Calculate the volume bitmap size + bitMapSizeInSectors = ( numABlks + kBitsPerSector - 1 ) / kBitsPerSector; // VBM size in blocks + + //¥¥ Calculate the validaty of HFS Allocation blocks, I think realTotalBlocks == numABlks + numABlks = (totalSectors - 3 - bitMapSizeInSectors) / (realAllocationBlockSize / Blk_Size); // actual # of alloc blks + + if ( realTotalBlocks > numABlks ) { + RcdError( GPtr, E_NABlks ); + err = E_NABlks; // invalid number of allocation blocks + goto ReleaseAndBail; + } + + if ( myMDBPtr->drVBMSt <= MDB_BlkN ) { + RcdError(GPtr,E_VBMSt); + err = E_VBMSt; // invalid VBM start block + goto ReleaseAndBail; + } + vcb->vcbVBMSt = myMDBPtr->drVBMSt; + + if (myMDBPtr->drAlBlSt < (myMDBPtr->drVBMSt + bitMapSizeInSectors)) { + RcdError(GPtr,E_ABlkSt); + err = E_ABlkSt; // invalid starting alloc block + goto ReleaseAndBail; + } + vcb->vcbAlBlSt = myMDBPtr->drAlBlSt; + } + +ReleaseAndBail: + if (myBlockDescriptor.buffer != NULL) + (void) ReleaseVolumeBlock(vcb, &myBlockDescriptor, kReleaseBlock); + + return( err ); +} + + +static OSErr ScavengeVolumeType( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb, UInt32 *volumeType ) +{ + UInt64 vHSector; + UInt64 startSector; + UInt64 altVHSector; + UInt64 hfsPlusSectors = 0; + UInt32 sectorsPerBlock; + UInt32 numSectorsToSearch; + OSErr err; + HFSPlusVolumeHeader *volumeHeader; + HFSExtentDescriptor embededExtent; + SVCB *calculatedVCB = GPtr->calculatedVCB; + VolumeObjectPtr myVOPtr; + UInt16 embedSigWord = mdb->drEmbedSigWord; + BlockDescriptor block; + + /* + * If all of the embedded volume information is zero, then assume + * this really is a plain HFS disk like it says. Otherwise, if + * you reinitialize a large HFS Plus volume as HFS, the original + * embedded volume's volume header and alternate volume header will + * still be there, and we'll try to repair the embedded volume. + */ + if (embedSigWord == 0 && + mdb->drEmbedExtent.blockCount == 0 && + mdb->drEmbedExtent.startBlock == 0) + { + *volumeType = kHFSVolumeType; + return noErr; + } + + myVOPtr = GetVolumeObjectPtr( ); + *volumeType = kEmbededHFSPlusVolumeType; // Assume HFS+ + + // + // First see if it is an HFS+ volume and the relevent structures look OK + // + if ( embedSigWord == kHFSPlusSigWord ) + { + /* look for primary volume header */ + vHSector = (UInt64)mdb->drAlBlSt + + ((UInt64)(mdb->drAlBlkSiz / Blk_Size) * (UInt64)mdb->drEmbedExtent.startBlock) + 2; + + err = GetVolumeBlock(calculatedVCB, vHSector, kGetBlock, &block); + volumeHeader = (HFSPlusVolumeHeader *) block.buffer; + if ( err != noErr ) goto AssumeHFS; + + myVOPtr->primaryVHB = vHSector; + err = ValidVolumeHeader( volumeHeader ); + (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock); + if ( err == noErr ) { + myVOPtr->flags |= kVO_PriVHBOK; + return( noErr ); + } + } + + sectorsPerBlock = mdb->drAlBlkSiz / Blk_Size; + + // Search the end of the disk to see if a Volume Header is present at all + if ( embedSigWord != kHFSPlusSigWord ) + { + numSectorsToSearch = mdb->drAlBlkSiz / Blk_Size; + startSector = myVOPtr->totalDeviceSectors - 4 - numSectorsToSearch; + + err = SeekVolumeHeader( GPtr, startSector, numSectorsToSearch, &altVHSector ); + if ( err != noErr ) goto AssumeHFS; + + // We found the Alt VH, so this must be a damaged embeded HFS+ volume + // Now Scavenge for the Primary VolumeHeader + myVOPtr->alternateVHB = altVHSector; + myVOPtr->flags |= kVO_AltVHBOK; + startSector = mdb->drAlBlSt + (4 * sectorsPerBlock); // Start looking at 4th HFS allocation block + numSectorsToSearch = 10 * sectorsPerBlock; // search for VH in next 10 allocation blocks + + err = SeekVolumeHeader( GPtr, startSector, numSectorsToSearch, &vHSector ); + if ( err != noErr ) goto AssumeHFS; + + myVOPtr->primaryVHB = vHSector; + myVOPtr->flags |= kVO_PriVHBOK; + hfsPlusSectors = altVHSector - vHSector + 1 + 2 + 1; // numSectors + BB + end + + // Fix the embeded extent + embededExtent.blockCount = hfsPlusSectors / sectorsPerBlock; + embededExtent.startBlock = (vHSector - 2 - mdb->drAlBlSt ) / sectorsPerBlock; + embedSigWord = kHFSPlusSigWord; + + myVOPtr->embeddedOffset = + (embededExtent.startBlock * mdb->drAlBlkSiz) + (mdb->drAlBlSt * Blk_Size); + } + else + { + embedSigWord = mdb->drEmbedSigWord; + embededExtent.blockCount = mdb->drEmbedExtent.blockCount; + embededExtent.startBlock = mdb->drEmbedExtent.startBlock; + } + + if ( embedSigWord == kHFSPlusSigWord ) + { + startSector = 2 + mdb->drAlBlSt + + ((UInt64)embededExtent.startBlock * (mdb->drAlBlkSiz / Blk_Size)); + + err = SeekVolumeHeader( GPtr, startSector, mdb->drAlBlkSiz / Blk_Size, &vHSector ); + if ( err != noErr ) goto AssumeHFS; + + // Now replace the bad fields and mark the error + mdb->drEmbedExtent.blockCount = embededExtent.blockCount; + mdb->drEmbedExtent.startBlock = embededExtent.startBlock; + mdb->drEmbedSigWord = kHFSPlusSigWord; + mdb->drAlBlSt += vHSector - startSector; // Fix the bad field + myVOPtr->totalEmbeddedSectors = (mdb->drAlBlkSiz / Blk_Size) * mdb->drEmbedExtent.blockCount; + myVOPtr->embeddedOffset = + (mdb->drEmbedExtent.startBlock * mdb->drAlBlkSiz) + (mdb->drAlBlSt * Blk_Size); + myVOPtr->primaryVHB = vHSector; + myVOPtr->flags |= kVO_PriVHBOK; + + GPtr->VIStat = GPtr->VIStat | S_MDB; // write out our MDB + return( E_InvalidMDBdrAlBlSt ); + } + +AssumeHFS: + *volumeType = kHFSVolumeType; + return( noErr ); + +} /* ScavengeVolumeType */ + + +static OSErr SeekVolumeHeader( SGlobPtr GPtr, UInt64 startSector, UInt32 numSectors, UInt64 *vHSector ) +{ + OSErr err; + HFSPlusVolumeHeader *volumeHeader; + SVCB *calculatedVCB = GPtr->calculatedVCB; + BlockDescriptor block; + + for ( *vHSector = startSector ; *vHSector < startSector + numSectors ; (*vHSector)++ ) + { + err = GetVolumeBlock(calculatedVCB, *vHSector, kGetBlock, &block); + volumeHeader = (HFSPlusVolumeHeader *) block.buffer; + if ( err != noErr ) return( err ); + + err = ValidVolumeHeader(volumeHeader); + + (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock); + if ( err == noErr ) + return( noErr ); + } + + return( fnfErr ); +} + + +#if 0 // not used at this time +static OSErr CheckWrapperExtents( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb ) +{ + OSErr err = noErr; + + // See if Norton Disk Doctor 2.0 corrupted the catalog's first extent + if ( mdb->drCTExtRec[0].startBlock >= mdb->drEmbedExtent.startBlock) + { + // Fix the field in the in-memory copy, and record the error + mdb->drCTExtRec[0].startBlock = mdb->drXTExtRec[0].startBlock + mdb->drXTExtRec[0].blockCount; + GPtr->VIStat = GPtr->VIStat | S_MDB; // write out our MDB + err = RcdInvalidWrapperExtents( GPtr, E_InvalidWrapperExtents ); + } + + return err; +} +#endif + +/*------------------------------------------------------------------------------ + +Function: CreateExtentsBTreeControlBlock + +Function: Create the calculated ExtentsBTree Control Block + +Input: GPtr - pointer to scavenger global area + +Output: - 0 = no error + n = error code +------------------------------------------------------------------------------*/ + +OSErr CreateExtentsBTreeControlBlock( SGlobPtr GPtr ) +{ + OSErr err; + SInt32 size; + UInt32 numABlks; + BTHeaderRec header; + BTreeControlBlock * btcb; + SVCB * vcb; + BlockDescriptor block; + Boolean isHFSPlus; + + // Set up + isHFSPlus = VolumeObjectIsHFSPlus( ); + GPtr->TarID = kHFSExtentsFileID; // target = extent file + GPtr->TarBlock = kHeaderNodeNum; // target block = header node + vcb = GPtr->calculatedVCB; + btcb = GPtr->calculatedExtentsBTCB; + block.buffer = NULL; + + // get Volume Header (HFS+) or Master Directory (HFS) block + err = GetVolumeObjectVHBorMDB( &block ); + if (err) goto exit; + // + // check out allocation info for the Extents File + // + if (isHFSPlus) + { + HFSPlusVolumeHeader *volumeHeader; + + volumeHeader = (HFSPlusVolumeHeader *) block.buffer; + + CopyMemory(volumeHeader->extentsFile.extents, GPtr->calculatedExtentsFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) ); + + err = CheckFileExtents( GPtr, kHFSExtentsFileID, kDataFork, NULL, (void *)GPtr->calculatedExtentsFCB->fcbExtents32, &numABlks); // check out extent info + + if (err) goto exit; + + if ( volumeHeader->extentsFile.totalBlocks != numABlks ) // check out the PEOF + { + RcdError( GPtr, E_ExtPEOF ); + err = E_ExtPEOF; + if (debug) + plog("Extents File totalBlocks = %u, numABlks = %u\n", volumeHeader->extentsFile.totalBlocks, numABlks); + goto exit; + } + else + { + GPtr->calculatedExtentsFCB->fcbLogicalSize = volumeHeader->extentsFile.logicalSize; // Set Extents tree's LEOF + GPtr->calculatedExtentsFCB->fcbPhysicalSize = (UInt64)volumeHeader->extentsFile.totalBlocks * + (UInt64)volumeHeader->blockSize; // Set Extents tree's PEOF + } + + // + // Set up the minimal BTreeControlBlock structure + // + + // Read the BTreeHeader from disk & also validate it's node size. + err = GetBTreeHeader(GPtr, GPtr->calculatedExtentsFCB, &header); + if (err) goto exit; + + btcb->maxKeyLength = kHFSPlusExtentKeyMaximumLength; // max key length + btcb->keyCompareProc = (void *)CompareExtentKeysPlus; + btcb->attributes |=kBTBigKeysMask; // HFS+ Extent files have 16-bit key length + btcb->leafRecords = header.leafRecords; + btcb->treeDepth = header.treeDepth; + btcb->rootNode = header.rootNode; + btcb->firstLeafNode = header.firstLeafNode; + btcb->lastLeafNode = header.lastLeafNode; + + btcb->nodeSize = header.nodeSize; + btcb->totalNodes = ( GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize ); + btcb->freeNodes = btcb->totalNodes; // start with everything free + + // Make sure the header nodes size field is correct by looking at the 1st record offset + err = CheckNodesFirstOffset( GPtr, btcb ); + if ( (err != noErr) && (btcb->nodeSize != 1024) ) // default HFS+ Extents node size is 1024 + { + btcb->nodeSize = 1024; + btcb->totalNodes = ( GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize ); + btcb->freeNodes = btcb->totalNodes; // start with everything free + + err = CheckNodesFirstOffset( GPtr, btcb ); + if (err) goto exit; + + GPtr->EBTStat |= S_BTH; // update the Btree header + } + } + else // Classic HFS + { + HFSMasterDirectoryBlock *alternateMDB; + + alternateMDB = (HFSMasterDirectoryBlock *) block.buffer; + + CopyMemory(alternateMDB->drXTExtRec, GPtr->calculatedExtentsFCB->fcbExtents16, sizeof(HFSExtentRecord) ); + // ExtDataRecToExtents(alternateMDB->drXTExtRec, GPtr->calculatedExtentsFCB->fcbExtents); + + + err = CheckFileExtents( GPtr, kHFSExtentsFileID, kDataFork, NULL, (void *)GPtr->calculatedExtentsFCB->fcbExtents16, &numABlks); /* check out extent info */ + if (err) goto exit; + + if (alternateMDB->drXTFlSize != ((UInt64)numABlks * (UInt64)GPtr->calculatedVCB->vcbBlockSize))// check out the PEOF + { + RcdError(GPtr,E_ExtPEOF); + err = E_ExtPEOF; + if (debug) + plog("Alternate MDB drXTFlSize = %llu, should be %llu\n", (long long)alternateMDB->drXTFlSize, (long long)numABlks * (UInt64)GPtr->calculatedVCB->vcbBlockSize); + goto exit; + } + else + { + GPtr->calculatedExtentsFCB->fcbPhysicalSize = alternateMDB->drXTFlSize; // set up PEOF and EOF in FCB + GPtr->calculatedExtentsFCB->fcbLogicalSize = GPtr->calculatedExtentsFCB->fcbPhysicalSize; + } + + // + // Set up the minimal BTreeControlBlock structure + // + + // Read the BTreeHeader from disk & also validate it's node size. + err = GetBTreeHeader(GPtr, GPtr->calculatedExtentsFCB, &header); + if (err) goto exit; + + btcb->maxKeyLength = kHFSExtentKeyMaximumLength; // max key length + btcb->keyCompareProc = (void *)CompareExtentKeys; + btcb->leafRecords = header.leafRecords; + btcb->treeDepth = header.treeDepth; + btcb->rootNode = header.rootNode; + btcb->firstLeafNode = header.firstLeafNode; + btcb->lastLeafNode = header.lastLeafNode; + + btcb->nodeSize = header.nodeSize; + btcb->totalNodes = (GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize ); + btcb->freeNodes = btcb->totalNodes; // start with everything free + + // Make sure the header nodes size field is correct by looking at the 1st record offset + err = CheckNodesFirstOffset( GPtr, btcb ); + if (err) goto exit; + } + + if ( header.btreeType != kHFSBTreeType ) + { + GPtr->EBTStat |= S_ReservedBTH; // Repair reserved fields in Btree header + } + + // + // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes. + // + btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions + if ( btcb->refCon == nil ) { + err = R_NoMem; + goto exit; + } + size = (btcb->totalNodes + 7) / 8; // size of BTree bit map + ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap + if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil ) + { + err = R_NoMem; + goto exit; + } + + ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is + ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes;// keep track of real free nodes for progress +exit: + if ( block.buffer != NULL ) + (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock); + + return (err); +} + + + +/*------------------------------------------------------------------------------ + +Function: CheckNodesFirstOffset + +Function: Minimal check verifies that the 1st offset is within bounds. If it's not + the nodeSize may be wrong. In the future this routine could be modified + to try different size values until one fits. + +------------------------------------------------------------------------------*/ +#define GetRecordOffset(btreePtr,node,index) (*(short *) ((UInt8 *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize)) +static OSErr CheckNodesFirstOffset( SGlobPtr GPtr, BTreeControlBlock *btcb ) +{ + NodeRec nodeRec; + UInt16 offset; + OSErr err; + + (void) SetFileBlockSize(btcb->fcbPtr, btcb->nodeSize); + + err = GetNode( btcb, kHeaderNodeNum, &nodeRec ); + + if ( err == noErr ) + { + offset = GetRecordOffset( btcb, (NodeDescPtr)nodeRec.buffer, 0 ); + if ( (offset < sizeof (BTNodeDescriptor)) || // offset < minimum + (offset & 1) || // offset is odd + (offset >= btcb->nodeSize) ) // offset beyond end of node + { + if (debug) fprintf(stderr, "%s(%d): offset is wrong\n", __FUNCTION__, __LINE__); + err = fsBTInvalidNodeErr; + } + } + + if ( err != noErr ) + RcdError( GPtr, E_InvalidNodeSize ); + + (void) ReleaseNode(btcb, &nodeRec); + + return( err ); +} + + + +/*------------------------------------------------------------------------------ + +Function: ExtBTChk - (Extent BTree Check) + +Function: Verifies the extent BTree structure. + +Input: GPtr - pointer to scavenger global area + +Output: ExtBTChk - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ + +OSErr ExtBTChk( SGlobPtr GPtr ) +{ + OSErr err; + + // Set up + GPtr->TarID = kHFSExtentsFileID; // target = extent file + GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB + + // + // check out the BTree structure + // + + err = BTCheck(GPtr, kCalculatedExtentRefNum, NULL); + ReturnIfError( err ); // invalid extent file BTree + + // + // check out the allocation map structure + // + + err = BTMapChk( GPtr, kCalculatedExtentRefNum ); + ReturnIfError( err ); // Invalid extent BTree map + + // + // Make sure unused nodes in the B-tree are zero filled. + // + err = BTCheckUnusedNodes(GPtr, kCalculatedExtentRefNum, &GPtr->EBTStat); + ReturnIfError( err ); + + // + // compare BTree header record on disk with scavenger's BTree header record + // + + err = CmpBTH( GPtr, kCalculatedExtentRefNum ); + ReturnIfError( err ); + + // + // compare BTree map on disk with scavenger's BTree map + // + + err = CmpBTM( GPtr, kCalculatedExtentRefNum ); + + return( err ); +} + + + +/*------------------------------------------------------------------------------ + +Function: BadBlockFileExtentCheck - (Check extents of bad block file) + +Function: + Verifies the extents of bad block file (kHFSBadBlockFileID) that + exist in extents Btree. + + Note that the extents for other file IDs < kHFSFirstUserCatalogNodeID + are being taken care in the following functions: + + kHFSExtentsFileID - CreateExtentsBTreeControlBlock + kHFSCatalogFileID - CreateCatalogBTreeControlBlock + kHFSAllocationFileID - CreateExtendedAllocationsFCB + kHFSStartupFileID - CreateExtendedAllocationsFCB + kHFSAttributesFileID - CreateAttributesBTreeControlBlock + +Input: GPtr - pointer to scavenger global area + +Output: BadBlockFileExtentCheck - function result: + 0 = no error + +n = error code +------------------------------------------------------------------------------*/ + +OSErr BadBlockFileExtentCheck( SGlobPtr GPtr ) +{ + UInt32 attributes; + void *p; + OSErr result; + SVCB *vcb; + Boolean isHFSPlus; + BlockDescriptor block; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + block.buffer = NULL; + + // + // process the bad block extents (created by the disk init pkg to hide badspots) + // + vcb = GPtr->calculatedVCB; + + result = GetVolumeObjectVHBorMDB( &block ); + if ( result != noErr ) goto ExitThisRoutine; // error, could't get it + + p = (void *) block.buffer; + attributes = isHFSPlus == true ? ((HFSPlusVolumeHeader*)p)->attributes : ((HFSMasterDirectoryBlock*)p)->drAtrb; + + //¥¥ Does HFS+ honnor the same mask? + if ( attributes & kHFSVolumeSparedBlocksMask ) // if any badspots + { + HFSPlusExtentRecord zeroXdr; // dummy passed to 'CheckFileExtents' + UInt32 numBadBlocks; + + ClearMemory ( zeroXdr, sizeof( HFSPlusExtentRecord ) ); + result = CheckFileExtents( GPtr, kHFSBadBlockFileID, kDataFork, NULL, (void *)zeroXdr, &numBadBlocks); // check and mark bitmap + } + +ExitThisRoutine: + if ( block.buffer != NULL ) + (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock); + + return (result); +} + + +/*------------------------------------------------------------------------------ + +Function: CreateCatalogBTreeControlBlock + +Function: Create the calculated CatalogBTree Control Block + +Input: GPtr - pointer to scavenger global area + +Output: - 0 = no error + n = error code +------------------------------------------------------------------------------*/ +OSErr CreateCatalogBTreeControlBlock( SGlobPtr GPtr ) +{ + OSErr err; + SInt32 size; + UInt32 numABlks; + BTHeaderRec header; + BTreeControlBlock * btcb; + SVCB * vcb; + BlockDescriptor block; + Boolean isHFSPlus; + + // Set up + isHFSPlus = VolumeObjectIsHFSPlus( ); + GPtr->TarID = kHFSCatalogFileID; + GPtr->TarBlock = kHeaderNodeNum; + vcb = GPtr->calculatedVCB; + btcb = GPtr->calculatedCatalogBTCB; + block.buffer = NULL; + + err = GetVolumeObjectVHBorMDB( &block ); + if ( err != noErr ) goto ExitThisRoutine; // error, could't get it + // + // check out allocation info for the Catalog File + // + if (isHFSPlus) + { + HFSPlusVolumeHeader * volumeHeader; + + volumeHeader = (HFSPlusVolumeHeader *) block.buffer; + + CopyMemory(volumeHeader->catalogFile.extents, GPtr->calculatedCatalogFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) ); + + err = CheckFileExtents( GPtr, kHFSCatalogFileID, kDataFork, NULL, (void *)GPtr->calculatedCatalogFCB->fcbExtents32, &numABlks); + if (err) goto exit; + + if ( volumeHeader->catalogFile.totalBlocks != numABlks ) + { + RcdError( GPtr, E_CatPEOF ); + err = E_CatPEOF; + goto exit; + } + else + { + GPtr->calculatedCatalogFCB->fcbLogicalSize = volumeHeader->catalogFile.logicalSize; + GPtr->calculatedCatalogFCB->fcbPhysicalSize = (UInt64)volumeHeader->catalogFile.totalBlocks * + (UInt64)volumeHeader->blockSize; + } + + // + // Set up the minimal BTreeControlBlock structure + // + + // read the BTreeHeader from disk & also validate it's node size. + err = GetBTreeHeader(GPtr, GPtr->calculatedCatalogFCB, &header); + if (err) goto exit; + + btcb->maxKeyLength = kHFSPlusCatalogKeyMaximumLength; // max key length + + /* + * Figure out the type of key string compare + * (case-insensitive or case-sensitive) + * + * To do: should enforce an "HX" volume is require for kHFSBinaryCompare. + */ + if (header.keyCompareType == kHFSBinaryCompare) + { + btcb->keyCompareProc = (void *)CaseSensitiveCatalogKeyCompare; + fsckPrint(GPtr->context, hfsCaseSensitive); + } + else + { + btcb->keyCompareProc = (void *)CompareExtendedCatalogKeys; + } + btcb->keyCompareType = header.keyCompareType; + btcb->leafRecords = header.leafRecords; + btcb->nodeSize = header.nodeSize; + btcb->totalNodes = ( GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize ); + btcb->freeNodes = btcb->totalNodes; // start with everything free + btcb->attributes |=(kBTBigKeysMask + kBTVariableIndexKeysMask); // HFS+ Catalog files have large, variable-sized keys + + btcb->treeDepth = header.treeDepth; + btcb->rootNode = header.rootNode; + btcb->firstLeafNode = header.firstLeafNode; + btcb->lastLeafNode = header.lastLeafNode; + + + // Make sure the header nodes size field is correct by looking at the 1st record offset + err = CheckNodesFirstOffset( GPtr, btcb ); + if ( (err != noErr) && (btcb->nodeSize != 4096) ) // default HFS+ Catalog node size is 4096 + { + btcb->nodeSize = 4096; + btcb->totalNodes = ( GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize ); + btcb->freeNodes = btcb->totalNodes; // start with everything free + + err = CheckNodesFirstOffset( GPtr, btcb ); + if (err) goto exit; + + GPtr->CBTStat |= S_BTH; // update the Btree header + } + } + else // HFS + { + HFSMasterDirectoryBlock *alternateMDB; + + alternateMDB = (HFSMasterDirectoryBlock *) block.buffer; + + CopyMemory( alternateMDB->drCTExtRec, GPtr->calculatedCatalogFCB->fcbExtents16, sizeof(HFSExtentRecord) ); + // ExtDataRecToExtents(alternateMDB->drCTExtRec, GPtr->calculatedCatalogFCB->fcbExtents); + + err = CheckFileExtents( GPtr, kHFSCatalogFileID, kDataFork, NULL, (void *)GPtr->calculatedCatalogFCB->fcbExtents16, &numABlks); /* check out extent info */ + if (err) goto exit; + + if (alternateMDB->drCTFlSize != ((UInt64)numABlks * (UInt64)vcb->vcbBlockSize)) // check out the PEOF + { + RcdError( GPtr, E_CatPEOF ); + err = E_CatPEOF; + goto exit; + } + else + { + GPtr->calculatedCatalogFCB->fcbPhysicalSize = alternateMDB->drCTFlSize; // set up PEOF and EOF in FCB + GPtr->calculatedCatalogFCB->fcbLogicalSize = GPtr->calculatedCatalogFCB->fcbPhysicalSize; + } + + // + // Set up the minimal BTreeControlBlock structure + // + + // read the BTreeHeader from disk & also validate it's node size. + err = GetBTreeHeader(GPtr, GPtr->calculatedCatalogFCB, &header); + if (err) goto exit; + + btcb->maxKeyLength = kHFSCatalogKeyMaximumLength; // max key length + btcb->keyCompareProc = (void *) CompareCatalogKeys; + btcb->leafRecords = header.leafRecords; + btcb->nodeSize = header.nodeSize; + btcb->totalNodes = (GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize ); + btcb->freeNodes = btcb->totalNodes; // start with everything free + + btcb->treeDepth = header.treeDepth; + btcb->rootNode = header.rootNode; + btcb->firstLeafNode = header.firstLeafNode; + btcb->lastLeafNode = header.lastLeafNode; + + // Make sure the header nodes size field is correct by looking at the 1st record offset + err = CheckNodesFirstOffset( GPtr, btcb ); + if (err) goto exit; + } +#if 0 + plog(" Catalog B-tree is %qd bytes\n", (UInt64)btcb->totalNodes * (UInt64) btcb->nodeSize); +#endif + + if ( header.btreeType != kHFSBTreeType ) + { + GPtr->CBTStat |= S_ReservedBTH; // Repair reserved fields in Btree header + } + + // + // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes. + // + + btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions + if ( btcb->refCon == nil ) { + err = R_NoMem; + goto exit; + } + size = (btcb->totalNodes + 7) / 8; // size of BTree bit map + ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap + if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil ) + { + err = R_NoMem; + goto exit; + } + + ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is + ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes; // keep track of real free nodes for progress + + /* it should be OK at this point to get volume name and stuff it into our global */ + { + OSErr result; + UInt16 recSize; + CatalogKey key; + CatalogRecord record; + + BuildCatalogKey( kHFSRootFolderID, NULL, isHFSPlus, &key ); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, NULL, &record, &recSize, NULL ); + if ( result == noErr ) { + if ( isHFSPlus ) { + size_t len; + HFSPlusCatalogThread * recPtr = &record.hfsPlusThread; + (void) utf_encodestr( recPtr->nodeName.unicode, + recPtr->nodeName.length * 2, + GPtr->volumeName, &len, sizeof(GPtr->volumeName) ); + GPtr->volumeName[len] = '\0'; + } + else { + HFSCatalogThread * recPtr = &record.hfsThread; + bcopy( &recPtr->nodeName[1], GPtr->volumeName, recPtr->nodeName[0] ); + GPtr->volumeName[ recPtr->nodeName[0] ] = '\0'; + } + fsckPrint(GPtr->context, fsckVolumeName, GPtr->volumeName); + } + } + +exit: +ExitThisRoutine: + if ( block.buffer != NULL ) + (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock); + + return (err); +} + + +/*------------------------------------------------------------------------------ + +Function: CreateExtendedAllocationsFCB + +Function: Create the calculated ExtentsBTree Control Block for + kHFSAllocationFileID and kHFSStartupFileID. + +Input: GPtr - pointer to scavenger global area + +Output: - 0 = no error + n = error code +------------------------------------------------------------------------------*/ +OSErr CreateExtendedAllocationsFCB( SGlobPtr GPtr ) +{ + OSErr err = 0; + UInt32 numABlks; + SVCB * vcb; + Boolean isHFSPlus; + BlockDescriptor block; + + // Set up + isHFSPlus = VolumeObjectIsHFSPlus( ); + GPtr->TarID = kHFSAllocationFileID; + GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB + vcb = GPtr->calculatedVCB; + block.buffer = NULL; + + // + // check out allocation info for the allocation File + // + + if ( isHFSPlus ) + { + SFCB * fcb; + HFSPlusVolumeHeader *volumeHeader; + + err = GetVolumeObjectVHB( &block ); + if ( err != noErr ) + goto exit; + volumeHeader = (HFSPlusVolumeHeader *) block.buffer; + + fcb = GPtr->calculatedAllocationsFCB; + CopyMemory( volumeHeader->allocationFile.extents, fcb->fcbExtents32, sizeof(HFSPlusExtentRecord) ); + + err = CheckFileExtents( GPtr, kHFSAllocationFileID, kDataFork, NULL, (void *)fcb->fcbExtents32, &numABlks); + if (err) goto exit; + + // + // The allocation file will get processed in whole allocation blocks, or + // maximal-sized cache blocks, whichever is smaller. This means the cache + // doesn't need to cope with buffers that are larger than a cache block. + if (vcb->vcbBlockSize < fscache.BlockSize) + (void) SetFileBlockSize (fcb, vcb->vcbBlockSize); + else + (void) SetFileBlockSize (fcb, fscache.BlockSize); + + if ( volumeHeader->allocationFile.totalBlocks != numABlks ) + { + RcdError( GPtr, E_CatPEOF ); + err = E_CatPEOF; + goto exit; + } + else + { + fcb->fcbLogicalSize = volumeHeader->allocationFile.logicalSize; + fcb->fcbPhysicalSize = (UInt64) volumeHeader->allocationFile.totalBlocks * + (UInt64) volumeHeader->blockSize; + } + + /* while we're here, also get startup file extents... */ + fcb = GPtr->calculatedStartupFCB; + CopyMemory( volumeHeader->startupFile.extents, fcb->fcbExtents32, sizeof(HFSPlusExtentRecord) ); + + err = CheckFileExtents( GPtr, kHFSStartupFileID, kDataFork, NULL, (void *)fcb->fcbExtents32, &numABlks); + if (err) goto exit; + + fcb->fcbLogicalSize = volumeHeader->startupFile.logicalSize; + fcb->fcbPhysicalSize = (UInt64) volumeHeader->startupFile.totalBlocks * + (UInt64) volumeHeader->blockSize; + } + +exit: + if (block.buffer) + (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock); + + return (err); + +} + + +/*------------------------------------------------------------------------------ + +Function: CatHChk - (Catalog Hierarchy Check) + +Function: Verifies the catalog hierarchy. + +Input: GPtr - pointer to scavenger global area + +Output: CatHChk - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ + +OSErr CatHChk( SGlobPtr GPtr ) +{ + SInt16 i; + OSErr result; + UInt16 recSize; + SInt16 selCode; + UInt32 hint; + UInt32 dirCnt; + UInt32 filCnt; + SInt16 rtdirCnt; + SInt16 rtfilCnt; + SVCB *calculatedVCB; + SDPR *dprP; + SDPR *dprP1; + CatalogKey foundKey; + Boolean validKeyFound; + CatalogKey key; + CatalogRecord record; + CatalogRecord record2; + HFSPlusCatalogFolder *largeCatalogFolderP; + HFSPlusCatalogFile *largeCatalogFileP; + HFSCatalogFile *smallCatalogFileP; + HFSCatalogFolder *smallCatalogFolderP; + CatalogName catalogName; + UInt32 valence; + CatalogRecord threadRecord; + HFSCatalogNodeID parID; + Boolean isHFSPlus; + + // set up + isHFSPlus = VolumeObjectIsHFSPlus( ); + calculatedVCB = GPtr->calculatedVCB; + GPtr->TarID = kHFSCatalogFileID; /* target = catalog file */ + GPtr->TarBlock = 0; /* no target block yet */ + + // + // position to the beginning of catalog + // + + //¥¥ Can we ignore this part by just taking advantage of setting the selCode = 0x8001; + { + BuildCatalogKey( 1, (const CatalogName *)nil, isHFSPlus, &key ); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint ); + + GPtr->TarBlock = hint; /* set target block */ + if ( result != btNotFound ) + { + RcdError( GPtr, E_CatRec ); + return( E_CatRec ); + } + } + + GPtr->DirLevel = 1; + dprP = &(GPtr->DirPTPtr)[0]; + dprP->directoryID = 1; + + dirCnt = filCnt = rtdirCnt = rtfilCnt = 0; + + result = noErr; + selCode = 0x8001; /* start with root directory */ + + // + // enumerate the entire catalog + // + while ( (GPtr->DirLevel > 0) && (result == noErr) ) + { + dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1]; + + validKeyFound = true; + record.recordType = 0; + + // get the next record + result = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &foundKey, &record, &recSize, &hint ); + + GPtr->TarBlock = hint; /* set target block */ + if ( result != noErr ) + { + if ( result == btNotFound ) + { + result = noErr; + validKeyFound = false; + } + else + { + result = IntError( GPtr, result ); /* error from BTGetRecord */ + return( result ); + } + } + selCode = 1; /* get next rec from now on */ + + GPtr->itemsProcessed++; + + // + // if same ParID ... + // + parID = isHFSPlus == true ? foundKey.hfsPlus.parentID : foundKey.hfs.parentID; + if ( (validKeyFound == true) && (parID == dprP->directoryID) ) + { + dprP->offspringIndex++; /* increment offspring index */ + + // if new directory ... + + if ( record.recordType == kHFSPlusFolderRecord ) + { + result = CheckForStop( GPtr ); ReturnIfError( result ); // Permit the user to interrupt + + largeCatalogFolderP = (HFSPlusCatalogFolder *) &record; + GPtr->TarID = largeCatalogFolderP->folderID; // target ID = directory ID + GPtr->CNType = record.recordType; // target CNode type = directory ID + CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus ); + + if ( dprP->directoryID > 1 ) + { + GPtr->DirLevel++; // we have a new directory level + dirCnt++; + } + if ( dprP->directoryID == kHFSRootFolderID ) // bump root dir count + rtdirCnt++; + + if ( GPtr->DirLevel > GPtr->dirPathCount ) + { + void *ptr; + + ptr = realloc(GPtr->DirPTPtr, (GPtr->dirPathCount + CMMaxDepth) * sizeof(SDPR)); + if (ptr == nil) + { + fsckPrint(GPtr->context, E_CatDepth, GPtr->dirPathCount); + return noErr; /* abort this check, but let other checks proceed */ + } + ClearMemory((char *)ptr + (GPtr->dirPathCount * sizeof(SDPR)), (CMMaxDepth * sizeof(SDPR))); + GPtr->dirPathCount += CMMaxDepth; + GPtr->DirPTPtr = ptr; + } + + dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1]; + dprP->directoryID = largeCatalogFolderP->folderID; + dprP->offspringIndex = 1; + dprP->directoryHint = hint; + dprP->parentDirID = foundKey.hfsPlus.parentID; + CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &dprP->directoryName, isHFSPlus ); + + for ( i = 1; i < GPtr->DirLevel; i++ ) + { + dprP1 = &(GPtr->DirPTPtr)[i - 1]; + if (dprP->directoryID == dprP1->directoryID) + { + RcdError( GPtr,E_DirLoop ); // loop in directory hierarchy + return( E_DirLoop ); + } + } + + /* + * Find thread record + */ + BuildCatalogKey( dprP->directoryID, (const CatalogName *) nil, isHFSPlus, &key ); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint ); + if ( result != noErr ) { + struct MissingThread *mtp; + + /* Report the error */ + fsckPrint(GPtr->context, E_NoThd, dprP->directoryID); + + /* HFS will exit here */ + if ( !isHFSPlus ) + return (E_NoThd); + /* + * A directory thread is missing. If we can find this + * ID on the missing-thread list then we know where the + * child entries reside and can resume our enumeration. + */ + for (mtp = GPtr->missingThreadList; mtp != NULL; mtp = mtp->link) { + if (mtp->threadID == dprP->directoryID) { + mtp->thread.recordType = kHFSPlusFolderThreadRecord; + mtp->thread.parentID = dprP->parentDirID; + CopyCatalogName(&dprP->directoryName, (CatalogName *)&mtp->thread.nodeName, isHFSPlus); + + /* Reposition to the first child of target directory */ + result = SearchBTreeRecord(GPtr->calculatedCatalogFCB, &mtp->nextKey, + kNoHint, &foundKey, &threadRecord, &recSize, &hint); + if (result) { + return (E_NoThd); + } + selCode = 0; /* use current record instead of next */ + break; + } + } + if (selCode != 0) { + /* + * A directory thread is missing but we know this + * directory has no children (since we didn't find + * its ID on the missing-thread list above). + * + * At this point we can resume the enumeration at + * our previous position in our parent directory. + */ + goto resumeAtParent; + } + } + dprP->threadHint = hint; + GPtr->TarBlock = hint; + } + + // LargeCatalogFile + else if ( record.recordType == kHFSPlusFileRecord ) + { + largeCatalogFileP = (HFSPlusCatalogFile *) &record; + GPtr->TarID = largeCatalogFileP->fileID; // target ID = file number + GPtr->CNType = record.recordType; // target CNode type = thread + CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus ); + filCnt++; + if (dprP->directoryID == kHFSRootFolderID) + rtfilCnt++; + } + + else if ( record.recordType == kHFSFolderRecord ) + { + result = CheckForStop( GPtr ); ReturnIfError( result ); // Permit the user to interrupt + + smallCatalogFolderP = (HFSCatalogFolder *) &record; + GPtr->TarID = smallCatalogFolderP->folderID; /* target ID = directory ID */ + GPtr->CNType = record.recordType; /* target CNode type = directory ID */ + CopyCatalogName( (const CatalogName *) &key.hfs.nodeName, &GPtr->CName, isHFSPlus ); /* target CName = directory name */ + + if (dprP->directoryID > 1) + { + GPtr->DirLevel++; /* we have a new directory level */ + dirCnt++; + } + if (dprP->directoryID == kHFSRootFolderID) /* bump root dir count */ + rtdirCnt++; + + if ( GPtr->DirLevel > GPtr->dirPathCount ) + { + void *ptr; + + ptr = realloc(GPtr->DirPTPtr, (GPtr->dirPathCount + CMMaxDepth) * sizeof(SDPR)); + if (ptr == nil) + { + fsckPrint(GPtr->context, E_CatDepth, GPtr->dirPathCount); + return noErr; /* abort this check, but let other checks proceed */ + } + ClearMemory((char *)ptr + (GPtr->dirPathCount * sizeof(SDPR)), (CMMaxDepth * sizeof(SDPR))); + GPtr->dirPathCount += CMMaxDepth; + GPtr->DirPTPtr = ptr; + } + + dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1]; + dprP->directoryID = smallCatalogFolderP->folderID; + dprP->offspringIndex = 1; + dprP->directoryHint = hint; + dprP->parentDirID = foundKey.hfs.parentID; + + CopyCatalogName( (const CatalogName *) &foundKey.hfs.nodeName, &dprP->directoryName, isHFSPlus ); + + for (i = 1; i < GPtr->DirLevel; i++) + { + dprP1 = &(GPtr->DirPTPtr)[i - 1]; + if (dprP->directoryID == dprP1->directoryID) + { + RcdError( GPtr,E_DirLoop ); /* loop in directory hierarchy */ + return( E_DirLoop ); + } + } + + BuildCatalogKey( dprP->directoryID, (const CatalogName *)0, isHFSPlus, &key ); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint ); + if (result != noErr ) + { + result = IntError(GPtr,result); /* error from BTSearch */ + return(result); + } + dprP->threadHint = hint; /* save hint for thread */ + GPtr->TarBlock = hint; /* set target block */ + } + + // HFSCatalogFile... + else if ( record.recordType == kHFSFileRecord ) + { + smallCatalogFileP = (HFSCatalogFile *) &record; + GPtr->TarID = smallCatalogFileP->fileID; /* target ID = file number */ + GPtr->CNType = record.recordType; /* target CNode type = thread */ + CopyCatalogName( (const CatalogName *) &foundKey.hfs.nodeName, &GPtr->CName, isHFSPlus ); /* target CName = directory name */ + filCnt++; + if (dprP->directoryID == kHFSRootFolderID) + rtfilCnt++; + } + + // Unknown/Bad record type + else + { + M_DebugStr("\p Unknown-Bad record type"); + return( 123 ); + } + } + + // + // if not same ParID or no record + // + else if ( (record.recordType == kHFSFileThreadRecord) || (record.recordType == kHFSPlusFileThreadRecord) ) /* it's a file thread, skip past it */ + { + GPtr->TarID = parID; // target ID = file number + GPtr->CNType = record.recordType; // target CNode type = thread + GPtr->CName.ustr.length = 0; // no target CName + } + + else + { +resumeAtParent: + GPtr->TarID = dprP->directoryID; /* target ID = current directory ID */ + GPtr->CNType = record.recordType; /* target CNode type = directory */ + CopyCatalogName( (const CatalogName *) &dprP->directoryName, &GPtr->CName, isHFSPlus ); // copy the string name + + // re-locate current directory + CopyCatalogName( (const CatalogName *) &dprP->directoryName, &catalogName, isHFSPlus ); + BuildCatalogKey( dprP->parentDirID, (const CatalogName *)&catalogName, isHFSPlus, &key ); + result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, dprP->directoryHint, &foundKey, &record2, &recSize, &hint ); + + if ( result != noErr ) + { + result = IntError(GPtr,result); /* error from BTSearch */ + return(result); + } + GPtr->TarBlock = hint; /* set target block */ + + + valence = isHFSPlus == true ? record2.hfsPlusFolder.valence : (UInt32)record2.hfsFolder.valence; + + if ( valence != dprP->offspringIndex -1 ) /* check its valence */ + if ( ( result = RcdValErr( GPtr, E_DirVal, dprP->offspringIndex -1, valence, dprP->parentDirID ) ) ) + return( result ); + + GPtr->DirLevel--; /* move up a level */ + + if(GPtr->DirLevel > 0) + { + dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1]; + GPtr->TarID = dprP->directoryID; /* target ID = current directory ID */ + GPtr->CNType = record.recordType; /* target CNode type = directory */ + CopyCatalogName( (const CatalogName *) &dprP->directoryName, &GPtr->CName, isHFSPlus ); + } + } + } // end while + + // + // verify directory and file counts (all nonfatal, repairable errors) + // + if (!isHFSPlus && (rtdirCnt != calculatedVCB->vcbNmRtDirs)) /* check count of dirs in root */ + if ( ( result = RcdValErr(GPtr,E_RtDirCnt,rtdirCnt,calculatedVCB->vcbNmRtDirs,0) ) ) + return( result ); + + if (!isHFSPlus && (rtfilCnt != calculatedVCB->vcbNmFls)) /* check count of files in root */ + if ( ( result = RcdValErr(GPtr,E_RtFilCnt,rtfilCnt,calculatedVCB->vcbNmFls,0) ) ) + return( result ); + + if (dirCnt != calculatedVCB->vcbFolderCount) /* check count of dirs in volume */ + if ( ( result = RcdValErr(GPtr,E_DirCnt,dirCnt,calculatedVCB->vcbFolderCount,0) ) ) + return( result ); + + if (filCnt != calculatedVCB->vcbFileCount) /* check count of files in volume */ + if ( ( result = RcdValErr(GPtr,E_FilCnt,filCnt,calculatedVCB->vcbFileCount,0) ) ) + return( result ); + + return( noErr ); + +} /* end of CatHChk */ + + + +/*------------------------------------------------------------------------------ + +Function: CreateAttributesBTreeControlBlock + +Function: Create the calculated AttributesBTree Control Block + +Input: GPtr - pointer to scavenger global area + +Output: - 0 = no error + n = error code +------------------------------------------------------------------------------*/ +OSErr CreateAttributesBTreeControlBlock( SGlobPtr GPtr ) +{ + OSErr err = 0; + SInt32 size; + UInt32 numABlks; + BTreeControlBlock * btcb; + SVCB * vcb; + Boolean isHFSPlus; + BTHeaderRec header; + BlockDescriptor block; + + // Set up + isHFSPlus = VolumeObjectIsHFSPlus( ); + GPtr->TarID = kHFSAttributesFileID; + GPtr->TarBlock = kHeaderNodeNum; + block.buffer = NULL; + btcb = GPtr->calculatedAttributesBTCB; + vcb = GPtr->calculatedVCB; + + // + // check out allocation info for the Attributes File + // + + if (isHFSPlus) + { + HFSPlusVolumeHeader *volumeHeader; + + err = GetVolumeObjectVHB( &block ); + if ( err != noErr ) + goto exit; + volumeHeader = (HFSPlusVolumeHeader *) block.buffer; + + CopyMemory( volumeHeader->attributesFile.extents, GPtr->calculatedAttributesFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) ); + + err = CheckFileExtents( GPtr, kHFSAttributesFileID, kDataFork, NULL, (void *)GPtr->calculatedAttributesFCB->fcbExtents32, &numABlks); + if (err) goto exit; + + if ( volumeHeader->attributesFile.totalBlocks != numABlks ) // check out the PEOF + { + RcdError( GPtr, E_CatPEOF ); + err = E_CatPEOF; + goto exit; + } + else + { + GPtr->calculatedAttributesFCB->fcbLogicalSize = (UInt64) volumeHeader->attributesFile.logicalSize; // Set Attributes tree's LEOF + GPtr->calculatedAttributesFCB->fcbPhysicalSize = (UInt64) volumeHeader->attributesFile.totalBlocks * + (UInt64) volumeHeader->blockSize; // Set Attributes tree's PEOF + } + + // + // See if we actually have an attributes BTree + // + if (numABlks == 0) + { + btcb->maxKeyLength = 0; + btcb->keyCompareProc = 0; + btcb->leafRecords = 0; + btcb->nodeSize = 0; + btcb->totalNodes = 0; + btcb->freeNodes = 0; + btcb->attributes = 0; + + btcb->treeDepth = 0; + btcb->rootNode = 0; + btcb->firstLeafNode = 0; + btcb->lastLeafNode = 0; + + // GPtr->calculatedVCB->attributesRefNum = 0; + GPtr->calculatedVCB->vcbAttributesFile = NULL; + } + else + { + // read the BTreeHeader from disk & also validate it's node size. + err = GetBTreeHeader(GPtr, GPtr->calculatedAttributesFCB, &header); + if (err) goto exit; + + btcb->maxKeyLength = kAttributeKeyMaximumLength; // max key length + btcb->keyCompareProc = (void *)CompareAttributeKeys; + btcb->leafRecords = header.leafRecords; + btcb->nodeSize = header.nodeSize; + btcb->totalNodes = ( GPtr->calculatedAttributesFCB->fcbPhysicalSize / btcb->nodeSize ); + btcb->freeNodes = btcb->totalNodes; // start with everything free + btcb->attributes |=(kBTBigKeysMask + kBTVariableIndexKeysMask); // HFS+ Attributes files have large, variable-sized keys + + btcb->treeDepth = header.treeDepth; + btcb->rootNode = header.rootNode; + btcb->firstLeafNode = header.firstLeafNode; + btcb->lastLeafNode = header.lastLeafNode; + + // + // Make sure the header nodes size field is correct by looking at the 1st record offset + // + err = CheckNodesFirstOffset( GPtr, btcb ); + if (err) goto exit; + } + } + else + { + btcb->maxKeyLength = 0; + btcb->keyCompareProc = 0; + btcb->leafRecords = 0; + btcb->nodeSize = 0; + btcb->totalNodes = 0; + btcb->freeNodes = 0; + btcb->attributes = 0; + + btcb->treeDepth = 0; + btcb->rootNode = 0; + btcb->firstLeafNode = 0; + btcb->lastLeafNode = 0; + + GPtr->calculatedVCB->vcbAttributesFile = NULL; + } + + // + // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes. + // + btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions + if ( btcb->refCon == nil ) { + err = R_NoMem; + goto exit; + } + + if (btcb->totalNodes == 0) + { + ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = nil; + ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = 0; + ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = 0; + } + else + { + if ( btcb->refCon == nil ) { + err = R_NoMem; + goto exit; + } + size = (btcb->totalNodes + 7) / 8; // size of BTree bit map + ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap + if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil ) + { + err = R_NoMem; + goto exit; + } + + ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is + ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes; // keep track of real free nodes for progress + } + +exit: + if (block.buffer) + (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock); + + return (err); +} + +/* + * Function: RecordLastAttrBits + * + * Description: + * Updates the Chinese Remainder Theorem buckets with extended attribute + * information for the previous fileID stored in the global structure. + * + * Input: + * GPtr - pointer to scavenger global area + * * GPtr->lastAttrInfo.fileID - fileID of last attribute seen + * + * Output: Nothing + */ +static void RecordLastAttrBits(SGlobPtr GPtr) +{ + /* lastAttrInfo structure is initialized to zero and hence ignore + * recording information for fileID = 0. fileIDs < 16 (except for + * fileID = 2) can have extended attributes but do not have + * corresponding entry in catalog Btree. Ignore recording these + * fileIDs for Chinese Remainder Theorem buckets. Currently we only + * set extended attributes for fileID = 1 among these fileIDs + * and this can change in future (see 3984119) + */ + if ((GPtr->lastAttrInfo.fileID == 0) || + ((GPtr->lastAttrInfo.fileID < kHFSFirstUserCatalogNodeID) && + (GPtr->lastAttrInfo.fileID != kHFSRootFolderID))) { + return; + } + + if (GPtr->lastAttrInfo.hasSecurity == true) { + /* fileID has both extended attribute and ACL */ + RecordXAttrBits(GPtr, kHFSHasAttributesMask | kHFSHasSecurityMask, + GPtr->lastAttrInfo.fileID, kCalculatedAttributesRefNum); + GPtr->lastAttrInfo.hasSecurity = false; + } else { + /* fileID only has extended attribute */ + RecordXAttrBits(GPtr, kHFSHasAttributesMask, + GPtr->lastAttrInfo.fileID, kCalculatedAttributesRefNum); + } +} + +/* + * Function: setLastAttrAllocInfo + * + * Description: + * Set the global structure of last extended attribute with + * the allocation block information. Also set the isValid to true + * to indicate that the data is valid and should be used to verify + * allocation blocks. + * + * Input: + * GPtr - pointer to scavenger global area + * totalBlocks - total blocks allocated by the attribute + * logicalSize - logical size of the attribute + * calculatedBlocks - blocks accounted by the attribute in current extent + * + * Output: Nothing + */ +static void setLastAttrAllocInfo(SGlobPtr GPtr, u_int32_t totalBlocks, + u_int64_t logicalSize, u_int32_t calculatedTotalBlocks) +{ + GPtr->lastAttrInfo.totalBlocks = totalBlocks; + GPtr->lastAttrInfo.logicalSize = logicalSize; + GPtr->lastAttrInfo.calculatedTotalBlocks = calculatedTotalBlocks; + GPtr->lastAttrInfo.isValid = true; +} + +/* + * Function: CheckLastAttrAllocation + * + * Description: + * Checks the allocation block information stored for the last + * extended attribute seen during extended attribute BTree traversal. + * Always resets the information stored for last EA allocation. + * + * Input: GPtr - pointer to scavenger global area + * + * Output: int - function result: + * zero - no error + * non-zero - error + */ +static int CheckLastAttrAllocation(SGlobPtr GPtr) +{ + int result = 0; + u_int64_t bytes; + + if (GPtr->lastAttrInfo.isValid == true) { + if (GPtr->lastAttrInfo.totalBlocks != + GPtr->lastAttrInfo.calculatedTotalBlocks) { + result = RecordBadAllocation(GPtr->lastAttrInfo.fileID, + GPtr->lastAttrInfo.attrname, kEAData, + GPtr->lastAttrInfo.totalBlocks, + GPtr->lastAttrInfo.calculatedTotalBlocks); + } else { + bytes = (u_int64_t)GPtr->lastAttrInfo.calculatedTotalBlocks * + (u_int64_t)GPtr->calculatedVCB->vcbBlockSize; + if (GPtr->lastAttrInfo.logicalSize > bytes) { + result = RecordTruncation(GPtr->lastAttrInfo.fileID, + GPtr->lastAttrInfo.attrname, kEAData, + GPtr->lastAttrInfo.logicalSize, bytes); + } + } + + /* Invalidate information in the global structure */ + GPtr->lastAttrInfo.isValid = false; + } + + return (result); +} + +/*------------------------------------------------------------------------------ +Function: CheckAttributeRecord + +Description: + This is call back function called for all leaf records in + Attribute BTree during the verify and repair stage. The basic + functionality of the function is same during verify and repair + stages except that whenever it finds corruption, the verify + stage prints message and the repair stage repairs it. In the verify + stage, this function accounts for allocation blocks used + by extent-based extended attributes and also updates the chinese + remainder theorem buckets corresponding the extended attribute + and security bit. + + 1. Only in the verify stage, if the fileID or attribute name of current + extended attribute are not same as the previous attribute, check the + allocation block counts for the previous attribute. + + 2. Only in the verify stage, If the fileID of current attribute is not + same as the previous attribute, record the previous fileID information + for Chinese Remainder Theorem. + + 3. For attribute type, + kHFSPlusAttrForkData: + --------------------- + Do all of the following during verify stage and nothing in repair + stage - + + Check the start block for extended attribute from the key. If not + zero, print error. + + Account for blocks occupied by this extent and store the allocation + information for this extent to check in future. Also update the + last attribute information in the global structure. + + kHFSPlusAttrExtents: + -------------------- + If the current attribute's fileID is not same as previous fileID, or + if the previous recordType is not a valid forkData or overflow extent + record, report an error in verify stage or mark it for deletion in + repair stage. + + Do all of the following during verify stage and nothing in repair + stage - + + Check the start block for extended attribute from the key. If not + equal to the total blocks seen uptil last attribtue, print error. + + Account for blocks occupied by this extent. Update previous + attribute allocation information with blocks seen in current + extent. Also update last attribute block information in the global + structure. + + kHFSPlusAttrInlineData: + ----------------------- + Only in the verify stage, check if the start block in the key is + equal to zero. If not, print error. + + Unknown type: + ------------- + In verify stage, report error. In repair stage, mark the record + to delete. + + 4. If a record is marked for deletion, delete the record. + + 5. Before exiting from the function, always do the following - + a. Indicate if the extended attribute was an ACL + b. Update previous fileID and recordType with current information. + c. Update previous attribute name with current attribute name. + +Input: GPtr - pointer to scavenger global area + key - key for current attribute + rec - attribute record + reclen - length of the record + +Output: int - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ +int +CheckAttributeRecord(SGlobPtr GPtr, const HFSPlusAttrKey *key, const HFSPlusAttrRecord *rec, UInt16 reclen) +{ + int result = 0; + unsigned char attrname[XATTR_MAXNAMELEN+1]; + size_t attrlen; + u_int32_t blocks; + u_int32_t fileID; + struct attributeInfo *prevAttr; + Boolean isSameAttr = true; + Boolean doDelete = false; + u_int16_t dfaStage = GetDFAStage(); + + /* Assert if volume is not HFS Plus */ + assert(VolumeObjectIsHFSPlus() == true); + + prevAttr = &(GPtr->lastAttrInfo); + fileID = key->fileID; + /* Convert unicode attribute name to UTF-8 string */ + (void) utf_encodestr(key->attrName, key->attrNameLen * 2, attrname, &attrlen, sizeof(attrname)); + attrname[attrlen] = '\0'; + + /* Compare the current attribute to last attribute seen */ + if ((fileID != prevAttr->fileID) || + (strcmp((char *)attrname, (char *)prevAttr->attrname) != 0)) { + isSameAttr = false; + } + + /* We check allocation block information and record EA information for + * CRT bucket in verify stage and hence no need to do it again in + * repair stage. + */ + if (dfaStage == kVerifyStage) { + /* Different attribute - check allocation block information */ + if (isSameAttr == false) { + result = CheckLastAttrAllocation(GPtr); + if (result) { + goto update_out; + } + } + + /* Different fileID - record information in CRT bucket */ + if (fileID != prevAttr->fileID) { + RecordLastAttrBits(GPtr); + } + } + + switch (rec->recordType) { + case kHFSPlusAttrForkData: { + /* Check start block only in verify stage to avoid printing message + * in repair stage. Note that this corruption is not repairable + * currently. Also check extents only in verify stage to avoid + * false overlap extents error. + */ + if (dfaStage == kVerifyStage) { + /* Start block in the key should be zero */ + if (key->startBlock != 0) { + RcdError(GPtr, E_ABlkSt); + result = E_ABlkSt; + goto err_out; + } + + /* Check the extent information and record overlapping extents, if any */ + result = CheckFileExtents (GPtr, fileID, kEAData, attrname, + rec->forkData.theFork.extents, &blocks); + if (result) { + goto update_out; + } + + /* Store allocation information to check in future */ + (void) setLastAttrAllocInfo(GPtr, rec->forkData.theFork.totalBlocks, + rec->forkData.theFork.logicalSize, blocks); + } + break; + } + + case kHFSPlusAttrExtents: { + /* Different attribute/fileID or incorrect previous record type */ + if ((isSameAttr == false) || + ((prevAttr->recordType != kHFSPlusAttrExtents) && + (prevAttr->recordType != kHFSPlusAttrForkData))) { + if (dfaStage == kRepairStage) { + /* Delete record in repair stage */ + doDelete = true; + } else { + /* Report error in verify stage */ + RcdError(GPtr, E_AttrRec); + GPtr->ABTStat |= S_AttrRec; + goto err_out; + } + } + + /* Check start block only in verify stage to avoid printing message + * in repair stage. Note that this corruption is not repairable + * currently. Also check extents only in verify stage to avoid + * false overlap extents error. + */ + if (dfaStage == kVerifyStage) { + /* startBlock in the key should be equal to total blocks + * seen uptil last attribute. + */ + if (key->startBlock != prevAttr->calculatedTotalBlocks) { + RcdError(GPtr, E_ABlkSt); + result = E_ABlkSt; + goto err_out; + } + + /* Check the extent information and record overlapping extents, if any */ + result = CheckFileExtents (GPtr, fileID, kEAData, attrname, + rec->overflowExtents.extents, &blocks); + if (result) { + goto update_out; + } + + /* Increment the blocks seen uptil now for this attribute */ + prevAttr->calculatedTotalBlocks += blocks; + } + break; + } + + case kHFSPlusAttrInlineData: { + /* Check start block only in verify stage to avoid printing message + * in repair stage. + */ + if (dfaStage == kVerifyStage) { + /* Start block in the key should be zero */ + if (key->startBlock != 0) { + RcdError(GPtr, E_ABlkSt); + result = E_ABlkSt; + goto err_out; + } + } + break; + } + + default: { + /* Unknown attribute record */ + if (dfaStage == kRepairStage) { + /* Delete record in repair stage */ + doDelete = true; + } else { + /* Report error in verify stage */ + RcdError(GPtr, E_AttrRec); + GPtr->ABTStat |= S_AttrRec; + goto err_out; + } + break; + } + }; + + if (doDelete == true) { + result = DeleteBTreeRecord(GPtr->calculatedAttributesFCB, key); + DPRINTF (d_info|d_xattr, "%s: Deleting attribute %s for fileID %d, type = %d\n", __FUNCTION__, attrname, key->fileID, rec->recordType); + if (result) { + DPRINTF (d_error|d_xattr, "%s: Error in deleting record for %s for fileID %d, type = %d\n", __FUNCTION__, attrname, key->fileID, rec->recordType); + } + + /* Set flags to mark header and map dirty */ + GPtr->ABTStat |= S_BTH + S_BTM; + goto err_out; + } + +update_out: + /* Note that an ACL exists for this fileID */ + if (strcmp((char *)attrname, KAUTH_FILESEC_XATTR) == 0) { + prevAttr->hasSecurity = true; + } + + /* Always update the last recordType, fileID and attribute name before exiting */ + prevAttr->recordType = rec->recordType; + prevAttr->fileID = fileID; + (void) strlcpy((char *)prevAttr->attrname, (char *)attrname, sizeof(prevAttr->attrname)); + + goto out; + +err_out: + /* If the current record is invalid/bogus, decide whether to update + * fileID stored in global structure for future comparison based on the + * previous fileID. + * If the current bogus record's fileID is different from fileID of the + * previous good record, we do not want to account for bogus fileID in + * the Chinese Remainder Theorem when we see next good record. + * Hence reset the fileID in global structure to dummy value. Example, + * if the fileIDs are 10 15 20 and record with ID=15 is bogus, we do not + * want to account for record with ID=15. + * If the current bogus record's fileID is same as the fileID of the + * previous good record, we want to account for this fileID in the + * next good record we see after this bogus record. Hence do not + * reset the fileID to dummy value. Example, if the records have fileID + * 10 10 30 and the second record with ID=10 is bogus, we want to + * account for ID=10 when we see record with ID=30. + */ + if (prevAttr->fileID != fileID) { + prevAttr->fileID = 0; + } + +out: + return(result); +} + +/* Function: RecordXAttrBits + * + * Description: + * This function increments the prime number buckets for the associated + * prime bucket set based on the flags and btreetype to determine + * the discrepancy between the attribute btree and catalog btree for + * extended attribute data consistency. This function is based on + * Chinese Remainder Theorem. + * + * Alogrithm: + * 1. If none of kHFSHasAttributesMask or kHFSHasSecurity mask is set, + * return. + * 2. Based on btreetype and the flags, determine which prime number + * bucket should be updated. Initialize pointers accordingly. + * 3. Divide the fileID with pre-defined prime numbers. Store the + * remainder. + * 4. Increment each prime number bucket at an offset of the + * corresponding remainder with one. + * + * Input: 1. GPtr - pointer to global scavenger area + * 2. flags - can include kHFSHasAttributesMask and/or kHFSHasSecurityMask + * 3. fileid - fileID for which particular extended attribute is seen + * 4. btreetye - can be kHFSPlusCatalogRecord or kHFSPlusAttributeRecord + * indicates which btree prime number bucket should be incremented + * + * Output: nil + */ +void RecordXAttrBits(SGlobPtr GPtr, UInt16 flags, HFSCatalogNodeID fileid, UInt16 btreetype) +{ + PrimeBuckets *cur_attr = NULL; + PrimeBuckets *cur_sec = NULL; + + if ( ((flags & kHFSHasAttributesMask) == 0) && + ((flags & kHFSHasSecurityMask) == 0) ) { + /* No attributes exists for this fileID */ + goto out; + } + + /* Determine which bucket are we updating */ + if (btreetype == kCalculatedCatalogRefNum) { + /* Catalog BTree buckets */ + if (flags & kHFSHasAttributesMask) { + cur_attr = &(GPtr->CBTAttrBucket); + GPtr->cat_ea_count++; + } + if (flags & kHFSHasSecurityMask) { + cur_sec = &(GPtr->CBTSecurityBucket); + GPtr->cat_acl_count++; + } + } else if (btreetype == kCalculatedAttributesRefNum) { + /* Attribute BTree buckets */ + if (flags & kHFSHasAttributesMask) { + cur_attr = &(GPtr->ABTAttrBucket); + GPtr->attr_ea_count++; + } + if (flags & kHFSHasSecurityMask) { + cur_sec = &(GPtr->ABTSecurityBucket); + GPtr->attr_acl_count++; + } + } else { + /* Incorrect btreetype found */ + goto out; + } + + if (cur_attr) { + add_prime_bucket_uint32(cur_attr, fileid); + } + + if (cur_sec) { + add_prime_bucket_uint32(cur_sec, fileid); + } + +out: + return; +} + +/* Function: CompareXattrPrimeBuckets + * + * Description: + * This function compares the prime number buckets for catalog btree + * and attribute btree for the given attribute type (normal attribute + * bit or security bit). + * + * Input: 1. GPtr - pointer to global scavenger area + * 2. BitMask - indicate which attribute type should be compared. + * can include kHFSHasAttributesMask and/or kHFSHasSecurityMask + * Output: zero - buckets were compared successfully + * non-zero - buckets were not compared + */ +static int CompareXattrPrimeBuckets(SGlobPtr GPtr, UInt16 BitMask) +{ + int result = 1; + PrimeBuckets *cat; /* Catalog BTree */ + PrimeBuckets *attr; /* Attribute BTree */ + + /* Find the correct PrimeBuckets to compare */ + if (BitMask & kHFSHasAttributesMask) { + /* Compare buckets for attribute bit */ + cat = &(GPtr->CBTAttrBucket); + attr = &(GPtr->ABTAttrBucket); + } else if (BitMask & kHFSHasSecurityMask) { + /* Compare buckets for security bit */ + cat = &(GPtr->CBTSecurityBucket); + attr = &(GPtr->ABTSecurityBucket); + } else { + plog ("%s: Incorrect BitMask found.\n", __FUNCTION__); + goto out; + } + + result = compare_prime_buckets(cat, attr); + if (result) { + char catbtree[32], attrbtree[32]; + /* Unequal values found, set the error bit in ABTStat */ + if (BitMask & kHFSHasAttributesMask) { + fsckPrint(GPtr->context, E_IncorrectAttrCount); + sprintf (catbtree, "%u", GPtr->cat_ea_count); + sprintf (attrbtree, "%u", GPtr->attr_ea_count); + fsckPrint(GPtr->context, E_BadValue, attrbtree, catbtree); + GPtr->ABTStat |= S_AttributeCount; + } else { + fsckPrint(GPtr->context, E_IncorrectSecurityCount); + sprintf (catbtree, "%u", GPtr->cat_acl_count); + sprintf (attrbtree, "%u", GPtr->attr_acl_count); + fsckPrint (GPtr->context, E_BadValue, attrbtree, catbtree); + GPtr->ABTStat |= S_SecurityCount; + } + } + + result = 0; + +out: + return result; +} + +/*------------------------------------------------------------------------------ + +Function: AttrBTChk - (Attributes BTree Check) + +Function: Verifies the attributes BTree structure. + +Input: GPtr - pointer to scavenger global area + +Output: ExtBTChk - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ + +OSErr AttrBTChk( SGlobPtr GPtr ) +{ + OSErr err; + + // + // If this volume has no attributes BTree, then skip this check + // + if (GPtr->calculatedVCB->vcbAttributesFile == NULL) + return noErr; + + // Write the status message here to avoid potential confusion to user. + fsckPrint(GPtr->context, hfsExtAttrBTCheck); + + // Set up + GPtr->TarID = kHFSAttributesFileID; // target = attributes file + GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB + + // + // check out the BTree structure + // + + err = BTCheck( GPtr, kCalculatedAttributesRefNum, (CheckLeafRecordProcPtr)CheckAttributeRecord); + ReturnIfError( err ); // invalid attributes file BTree + + // check the allocation block information about the last attribute + err = CheckLastAttrAllocation(GPtr); + ReturnIfError(err); + + // record the last fileID for Chinese Remainder Theorem comparison + RecordLastAttrBits(GPtr); + + // compare the attributes prime buckets calculated from catalog btree and attribute btree + err = CompareXattrPrimeBuckets(GPtr, kHFSHasAttributesMask); + ReturnIfError( err ); + + // compare the security prime buckets calculated from catalog btree and attribute btree + err = CompareXattrPrimeBuckets(GPtr, kHFSHasSecurityMask); + ReturnIfError( err ); + + // + // check out the allocation map structure + // + + err = BTMapChk( GPtr, kCalculatedAttributesRefNum ); + ReturnIfError( err ); // Invalid attributes BTree map + + // + // Make sure unused nodes in the B-tree are zero filled. + // + err = BTCheckUnusedNodes(GPtr, kCalculatedAttributesRefNum, &GPtr->ABTStat); + ReturnIfError( err ); + + // + // compare BTree header record on disk with scavenger's BTree header record + // + + err = CmpBTH( GPtr, kCalculatedAttributesRefNum ); + ReturnIfError( err ); + + // + // compare BTree map on disk with scavenger's BTree map + // + + err = CmpBTM( GPtr, kCalculatedAttributesRefNum ); + + return( err ); +} + + +/*------------------------------------------------------------------------------ + +Name: RcdValErr - (Record Valence Error) + +Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP' + list, to describe an incorrect valence count for possible repair. + +Input: GPtr - ptr to scavenger global data + type - error code (E_xxx), which should be >0 + correct - the correct valence, as computed here + incorrect - the incorrect valence as found in volume + parid - the parent id, if S_Valence error + +Output: 0 - no error + R_NoMem - not enough mem to allocate record +------------------------------------------------------------------------------*/ + +static int RcdValErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID parid ) /* the ParID, if needed */ +{ + RepairOrderPtr p; /* the new node we compile */ + SInt16 n; /* size of node we allocate */ + Boolean isHFSPlus; + char goodStr[32], badStr[32]; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + fsckPrint(GPtr->context, type); + sprintf(goodStr, "%u", correct); + sprintf(badStr, "%u", incorrect); + fsckPrint(GPtr->context, E_BadValue, goodStr, badStr); + + if (type == E_DirVal) /* if normal directory valence error */ + n = CatalogNameSize( &GPtr->CName, isHFSPlus); + else + n = 0; /* other errors don't need the name */ + + p = AllocMinorRepairOrder( GPtr,n ); /* get the node */ + if (p==NULL) /* quit if out of room */ + return (R_NoMem); + + p->type = type; /* save error info */ + p->correct = correct; + p->incorrect = incorrect; + p->parid = parid; + + if ( n != 0 ) /* if name needed */ + CopyCatalogName( (const CatalogName *) &GPtr->CName, (CatalogName*)&p->name, isHFSPlus ); + + GPtr->CatStat |= S_Valence; /* set flag to trigger repair */ + + return( noErr ); /* successful return */ +} + +/*------------------------------------------------------------------------------ + +Name: RcdHsFldCntErr - (Record HasFolderCount) + +Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP' + list, to describe folder flag missing the HasFolderCount bit + +Input: GPtr - ptr to scavenger global data + type - error code (E_xxx), which should be >0 + correct - the folder mask, as computed here + incorrect - the folder mask, as found in volume + fid - the folder id + +Output: 0 - no error + R_NoMem - not enough mem to allocate record +------------------------------------------------------------------------------*/ + +int RcdHsFldCntErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID fid ) +{ + RepairOrderPtr p; /* the new node we compile */ + char goodStr[32], badStr[32]; + fsckPrint(GPtr->context, type, fid); + sprintf(goodStr, "%#x", correct); + sprintf(badStr, "%#x", incorrect); + fsckPrint(GPtr->context, E_BadValue, goodStr, badStr); + + p = AllocMinorRepairOrder( GPtr,0 ); /* get the node */ + if (p==NULL) /* quit if out of room */ + return (R_NoMem); + + p->type = type; /* save error info */ + p->correct = correct; + p->incorrect = incorrect; + p->parid = fid; + + return( noErr ); /* successful return */ +} +/*------------------------------------------------------------------------------ + +Name: RcdFCntErr - (Record Folder Count) + +Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP' + list, to describe an incorrect folder count for possible repair. + +Input: GPtr - ptr to scavenger global data + type - error code (E_xxx), which should be >0 + correct - the correct folder count, as computed here + incorrect - the incorrect folder count as found in volume + fid - the folder id + +Output: 0 - no error + R_NoMem - not enough mem to allocate record +------------------------------------------------------------------------------*/ + +int RcdFCntErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID fid ) +{ + RepairOrderPtr p; /* the new node we compile */ + char goodStr[32], badStr[32]; + + fsckPrint(GPtr->context, type, fid); + sprintf(goodStr, "%u", correct); + sprintf(badStr, "%u", incorrect); + fsckPrint(GPtr->context, E_BadValue, goodStr, badStr); + + p = AllocMinorRepairOrder( GPtr,0 ); /* get the node */ + if (p==NULL) /* quit if out of room */ + return (R_NoMem); + + p->type = type; /* save error info */ + p->correct = correct; + p->incorrect = incorrect; + p->parid = fid; + + return( noErr ); /* successful return */ +} + +/*------------------------------------------------------------------------------ + +Name: RcdMDBAllocationBlockStartErr - (Record Allocation Block Start Error) + +Function: Allocates a RepairOrder node and linking it into the 'GPtr->RepairP' + list, to describe the error for possible repair. + +Input: GPtr - ptr to scavenger global data + type - error code (E_xxx), which should be >0 + correct - the correct valence, as computed here + incorrect - the incorrect valence as found in volume + +Output: 0 - no error + R_NoMem - not enough mem to allocate record +------------------------------------------------------------------------------*/ + +static OSErr RcdMDBEmbededVolDescriptionErr( SGlobPtr GPtr, OSErr type, HFSMasterDirectoryBlock *mdb ) +{ + RepairOrderPtr p; // the new node we compile + EmbededVolDescription *desc; + + RcdError( GPtr, type ); // first, record the error + + p = AllocMinorRepairOrder( GPtr, sizeof(EmbededVolDescription) ); // get the node + if ( p == nil ) return( R_NoMem ); + + p->type = type; // save error info + desc = (EmbededVolDescription *) &(p->name); + desc->drAlBlSt = mdb->drAlBlSt; + desc->drEmbedSigWord = mdb->drEmbedSigWord; + desc->drEmbedExtent.startBlock = mdb->drEmbedExtent.startBlock; + desc->drEmbedExtent.blockCount = mdb->drEmbedExtent.blockCount; + + GPtr->VIStat |= S_InvalidWrapperExtents; // set flag to trigger repair + + return( noErr ); // successful return +} + + +#if 0 // not used at this time +/*------------------------------------------------------------------------------ + +Name: RcdInvalidWrapperExtents - (Record Invalid Wrapper Extents) + +Function: Allocates a RepairOrder node and linking it into the 'GPtr->RepairP' + list, to describe the error for possible repair. + +Input: GPtr - ptr to scavenger global data + type - error code (E_xxx), which should be >0 + correct - the correct valence, as computed here + incorrect - the incorrect valence as found in volume + +Output: 0 - no error + R_NoMem - not enough mem to allocate record +------------------------------------------------------------------------------*/ + +static OSErr RcdInvalidWrapperExtents( SGlobPtr GPtr, OSErr type ) +{ + RepairOrderPtr p; // the new node we compile + + RcdError( GPtr, type ); // first, record the error + + p = AllocMinorRepairOrder( GPtr, 0 ); // get the node + if ( p == nil ) return( R_NoMem ); + + p->type = type; // save error info + + GPtr->VIStat |= S_BadMDBdrAlBlSt; // set flag to trigger repair + + return( noErr ); // successful return +} +#endif + + +#if 0 // We just check and fix them in SRepair.c +/*------------------------------------------------------------------------------ + +Name: RcdOrphanedExtentErr + +Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP' + list, to describe an locked volume name for possible repair. + +Input: GPtr - ptr to scavenger global data + type - error code (E_xxx), which should be >0 + incorrect - the incorrect file flags as found in file record + +Output: 0 - no error + R_NoMem - not enough mem to allocate record +------------------------------------------------------------------------------*/ + +static OSErr RcdOrphanedExtentErr ( SGlobPtr GPtr, SInt16 type, void *theKey ) +{ + RepairOrderPtr p; /* the new node we compile */ + SInt16 n; /* size of node we allocate */ + Boolean isHFSPlus; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + RcdError( GPtr,type ); /* first, record the error */ + + if ( isHFSPlus ) + n = sizeof( HFSPlusExtentKey ); + else + n = sizeof( HFSExtentKey ); + + p = AllocMinorRepairOrder( GPtr, n ); /* get the node */ + if ( p == NULL ) /* quit if out of room */ + return( R_NoMem ); + + CopyMemory( theKey, p->name, n ); /* copy in the key */ + + p->type = type; /* save error info */ + + GPtr->EBTStat |= S_OrphanedExtent; /* set flag to trigger repair */ + + return( noErr ); /* successful return */ +} +#endif + + +/*------------------------------------------------------------------------------ + +Function: VInfoChk - (Volume Info Check) + +Function: Verifies volume level information. + +Input: GPtr - pointer to scavenger global area + +Output: VInfoChk - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ + +OSErr VInfoChk( SGlobPtr GPtr ) +{ + OSErr result; + UInt16 recSize; + Boolean isHFSPlus; + UInt32 hint; + UInt64 maxClump; + SVCB *vcb; + VolumeObjectPtr myVOPtr; + CatalogRecord record; + CatalogKey foundKey; + BlockDescriptor altBlock; + BlockDescriptor priBlock; + + vcb = GPtr->calculatedVCB; + altBlock.buffer = priBlock.buffer = NULL; + isHFSPlus = VolumeObjectIsHFSPlus( ); + myVOPtr = GetVolumeObjectPtr( ); + + // locate the catalog record for the root directoryÉ + result = GetBTreeRecord( GPtr->calculatedCatalogFCB, 0x8001, &foundKey, &record, &recSize, &hint ); + GPtr->TarID = kHFSCatalogFileID; /* target = catalog */ + GPtr->TarBlock = hint; /* target block = returned hint */ + if ( result != noErr ) + { + result = IntError( GPtr, result ); + return( result ); + } + + GPtr->TarID = AMDB_FNum; // target = alternate MDB or VHB + GetVolumeObjectAlternateBlockNum( &GPtr->TarBlock ); + result = GetVolumeObjectAlternateBlock( &altBlock ); + + // invalidate if we have not marked the alternate as OK + if ( isHFSPlus ) { + if ( (myVOPtr->flags & kVO_AltVHBOK) == 0 ) + result = badMDBErr; + } + else if ( (myVOPtr->flags & kVO_AltMDBOK) == 0 ) { + result = badMDBErr; + } + if ( result != noErr ) { + GPtr->VIStat = GPtr->VIStat | S_MDB; + if ( VolumeObjectIsHFS( ) ) { + WriteError( GPtr, E_MDBDamaged, 0, 0 ); + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid alternate MDB at %qd result %d \n", GPtr->TarBlock, result); + } + else { + WriteError( GPtr, E_VolumeHeaderDamaged, 0, 0 ); + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid alternate VHB at %qd result %d \n", GPtr->TarBlock, result); + } + result = noErr; + goto exit; + } + + GPtr->TarID = MDB_FNum; // target = primary MDB or VHB + GetVolumeObjectPrimaryBlockNum( &GPtr->TarBlock ); + result = GetVolumeObjectPrimaryBlock( &priBlock ); + + // invalidate if we have not marked the primary as OK + if ( isHFSPlus ) { + if ( (myVOPtr->flags & kVO_PriVHBOK) == 0 ) + result = badMDBErr; + } + else if ( (myVOPtr->flags & kVO_PriMDBOK) == 0 ) { + result = badMDBErr; + } + if ( result != noErr ) { + GPtr->VIStat = GPtr->VIStat | S_MDB; + if ( VolumeObjectIsHFS( ) ) { + WriteError( GPtr, E_MDBDamaged, 1, 0 ); + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid primary MDB at %qd result %d \n", GPtr->TarBlock, result); + } + else { + WriteError( GPtr, E_VolumeHeaderDamaged, 1, 0 ); + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid primary VHB at %qd result %d \n", GPtr->TarBlock, result); + } + result = noErr; + goto exit; + } + + // check to see that embedded HFS plus volumes still have both (alternate and primary) MDBs + if ( VolumeObjectIsEmbeddedHFSPlus( ) && + ( (myVOPtr->flags & kVO_PriMDBOK) == 0 || (myVOPtr->flags & kVO_AltMDBOK) == 0 ) ) + { + GPtr->VIStat |= S_WMDB; + WriteError( GPtr, E_MDBDamaged, 0, 0 ); + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid wrapper MDB \n"); + } + + if ( isHFSPlus ) + { + HFSPlusVolumeHeader * volumeHeader; + HFSPlusVolumeHeader * alternateVolumeHeader; + + alternateVolumeHeader = (HFSPlusVolumeHeader *) altBlock.buffer; + volumeHeader = (HFSPlusVolumeHeader *) priBlock.buffer; + + maxClump = (UInt64) (vcb->vcbTotalBlocks / 4) * vcb->vcbBlockSize; /* max clump = 1/4 volume size */ + + // check out creation and last mod dates + vcb->vcbCreateDate = alternateVolumeHeader->createDate; // use creation date in alt MDB + vcb->vcbModifyDate = volumeHeader->modifyDate; // don't change last mod date + vcb->vcbCheckedDate = volumeHeader->checkedDate; // don't change checked date + + // 3882639: Removed check for volume attributes in HFS Plus + vcb->vcbAttributes = volumeHeader->attributes; + + // verify allocation map ptr + if ( volumeHeader->nextAllocation < vcb->vcbTotalBlocks ) + vcb->vcbNextAllocation = volumeHeader->nextAllocation; + else + vcb->vcbNextAllocation = 0; + + // verify default clump sizes + if ( (volumeHeader->rsrcClumpSize > 0) && + (volumeHeader->rsrcClumpSize <= kMaxClumpSize) && + ((volumeHeader->rsrcClumpSize % vcb->vcbBlockSize) == 0) ) + vcb->vcbRsrcClumpSize = volumeHeader->rsrcClumpSize; + else if ( (alternateVolumeHeader->rsrcClumpSize > 0) && + (alternateVolumeHeader->rsrcClumpSize <= kMaxClumpSize) && + ((alternateVolumeHeader->rsrcClumpSize % vcb->vcbBlockSize) == 0) ) + vcb->vcbRsrcClumpSize = alternateVolumeHeader->rsrcClumpSize; + else if (4ULL * vcb->vcbBlockSize <= kMaxClumpSize) + vcb->vcbRsrcClumpSize = 4 * vcb->vcbBlockSize; + else + vcb->vcbRsrcClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */ + + if ( vcb->vcbRsrcClumpSize > kMaxClumpSize ) + vcb->vcbRsrcClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */ + + if ( (volumeHeader->dataClumpSize > 0) && (volumeHeader->dataClumpSize <= kMaxClumpSize) && + ((volumeHeader->dataClumpSize % vcb->vcbBlockSize) == 0) ) + vcb->vcbDataClumpSize = volumeHeader->dataClumpSize; + else if ( (alternateVolumeHeader->dataClumpSize > 0) && + (alternateVolumeHeader->dataClumpSize <= kMaxClumpSize) && + ((alternateVolumeHeader->dataClumpSize % vcb->vcbBlockSize) == 0) ) + vcb->vcbDataClumpSize = alternateVolumeHeader->dataClumpSize; + else if (4ULL * vcb->vcbBlockSize <= kMaxClumpSize) + vcb->vcbDataClumpSize = 4 * vcb->vcbBlockSize; + else + vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */ + + if ( vcb->vcbDataClumpSize > kMaxClumpSize ) + vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */ + + /* Verify next CNode ID. + * If volumeHeader->nextCatalogID < vcb->vcbNextCatalogID, probably + * nextCatalogID has wrapped around. + * If volumeHeader->nextCatalogID > vcb->vcbNextCatalogID, probably + * many files were created and deleted, followed by no new file + * creation. + */ + if ( (volumeHeader->nextCatalogID > vcb->vcbNextCatalogID) ) + vcb->vcbNextCatalogID = volumeHeader->nextCatalogID; + + //¥¥TBD location and unicode? volumename + // verify the volume name + result = ChkCName( GPtr, (const CatalogName*) &foundKey.hfsPlus.nodeName, isHFSPlus ); + + // verify last backup date and backup seqence number + vcb->vcbBackupDate = volumeHeader->backupDate; /* don't change last backup date */ + + // verify write count + vcb->vcbWriteCount = volumeHeader->writeCount; /* don't change write count */ + + // check out extent file clump size + if ( ((volumeHeader->extentsFile.clumpSize % vcb->vcbBlockSize) == 0) && + (volumeHeader->extentsFile.clumpSize <= maxClump) ) + vcb->vcbExtentsFile->fcbClumpSize = volumeHeader->extentsFile.clumpSize; + else if ( ((alternateVolumeHeader->extentsFile.clumpSize % vcb->vcbBlockSize) == 0) && + (alternateVolumeHeader->extentsFile.clumpSize <= maxClump) ) + vcb->vcbExtentsFile->fcbClumpSize = alternateVolumeHeader->extentsFile.clumpSize; + else + vcb->vcbExtentsFile->fcbClumpSize = + (alternateVolumeHeader->extentsFile.extents[0].blockCount * vcb->vcbBlockSize); + + // check out catalog file clump size + if ( ((volumeHeader->catalogFile.clumpSize % vcb->vcbBlockSize) == 0) && + (volumeHeader->catalogFile.clumpSize <= maxClump) ) + vcb->vcbCatalogFile->fcbClumpSize = volumeHeader->catalogFile.clumpSize; + else if ( ((alternateVolumeHeader->catalogFile.clumpSize % vcb->vcbBlockSize) == 0) && + (alternateVolumeHeader->catalogFile.clumpSize <= maxClump) ) + vcb->vcbCatalogFile->fcbClumpSize = alternateVolumeHeader->catalogFile.clumpSize; + else + vcb->vcbCatalogFile->fcbClumpSize = + (alternateVolumeHeader->catalogFile.extents[0].blockCount * vcb->vcbBlockSize); + + // check out allocations file clump size + if ( ((volumeHeader->allocationFile.clumpSize % vcb->vcbBlockSize) == 0) && + (volumeHeader->allocationFile.clumpSize <= maxClump) ) + vcb->vcbAllocationFile->fcbClumpSize = volumeHeader->allocationFile.clumpSize; + else if ( ((alternateVolumeHeader->allocationFile.clumpSize % vcb->vcbBlockSize) == 0) && + (alternateVolumeHeader->allocationFile.clumpSize <= maxClump) ) + vcb->vcbAllocationFile->fcbClumpSize = alternateVolumeHeader->allocationFile.clumpSize; + else + vcb->vcbAllocationFile->fcbClumpSize = + (alternateVolumeHeader->allocationFile.extents[0].blockCount * vcb->vcbBlockSize); + + // check out attribute file clump size + if (vcb->vcbAttributesFile) { + if ( ((volumeHeader->attributesFile.clumpSize % vcb->vcbBlockSize) == 0) && + (volumeHeader->attributesFile.clumpSize <= maxClump) && + (volumeHeader->attributesFile.clumpSize != 0)) + vcb->vcbAttributesFile->fcbClumpSize = volumeHeader->attributesFile.clumpSize; + else if ( ((alternateVolumeHeader->attributesFile.clumpSize % vcb->vcbBlockSize) == 0) && + (alternateVolumeHeader->attributesFile.clumpSize <= maxClump) && + (alternateVolumeHeader->attributesFile.clumpSize != 0)) + vcb->vcbAttributesFile->fcbClumpSize = alternateVolumeHeader->attributesFile.clumpSize; + else if (vcb->vcbCatalogFile->fcbClumpSize != 0) + // The original attribute clump may be too small, use catalog's + vcb->vcbAttributesFile->fcbClumpSize = vcb->vcbCatalogFile->fcbClumpSize; + else + vcb->vcbAttributesFile->fcbClumpSize = + alternateVolumeHeader->attributesFile.extents[0].blockCount * vcb->vcbBlockSize; + } + + CopyMemory( volumeHeader->finderInfo, vcb->vcbFinderInfo, sizeof(vcb->vcbFinderInfo) ); + + // Now compare verified Volume Header info (in the form of a vcb) with Volume Header info on disk + result = CompareVolumeHeader( GPtr, volumeHeader ); + + // check to see that embedded volume info is correct in both wrapper MDBs + CheckEmbeddedVolInfoInMDBs( GPtr ); + + } + else // HFS + { + HFSMasterDirectoryBlock *mdbP; + HFSMasterDirectoryBlock *alternateMDB; + + // + // get volume name from BTree Key + // + + alternateMDB = (HFSMasterDirectoryBlock *) altBlock.buffer; + mdbP = (HFSMasterDirectoryBlock *) priBlock.buffer; + + maxClump = (UInt64) (vcb->vcbTotalBlocks / 4) * vcb->vcbBlockSize; /* max clump = 1/4 volume size */ + + // check out creation and last mod dates + vcb->vcbCreateDate = alternateMDB->drCrDate; /* use creation date in alt MDB */ + vcb->vcbModifyDate = mdbP->drLsMod; /* don't change last mod date */ + + // verify volume attribute flags + if ( (mdbP->drAtrb & VAtrb_Msk) == 0 ) + vcb->vcbAttributes = mdbP->drAtrb; + else + vcb->vcbAttributes = VAtrb_DFlt; + + // verify allocation map ptr + if ( mdbP->drAllocPtr < vcb->vcbTotalBlocks ) + vcb->vcbNextAllocation = mdbP->drAllocPtr; + else + vcb->vcbNextAllocation = 0; + + // verify default clump size + if ( (mdbP->drClpSiz > 0) && + (mdbP->drClpSiz <= maxClump) && + ((mdbP->drClpSiz % vcb->vcbBlockSize) == 0) ) + vcb->vcbDataClumpSize = mdbP->drClpSiz; + else if ( (alternateMDB->drClpSiz > 0) && + (alternateMDB->drClpSiz <= maxClump) && + ((alternateMDB->drClpSiz % vcb->vcbBlockSize) == 0) ) + vcb->vcbDataClumpSize = alternateMDB->drClpSiz; + else + vcb->vcbDataClumpSize = 4 * vcb->vcbBlockSize; + + if ( vcb->vcbDataClumpSize > kMaxClumpSize ) + vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */ + + // verify next CNode ID + if ( (mdbP->drNxtCNID > vcb->vcbNextCatalogID) && (mdbP->drNxtCNID <= (vcb->vcbNextCatalogID + 4096)) ) + vcb->vcbNextCatalogID = mdbP->drNxtCNID; + + // verify the volume name + result = ChkCName( GPtr, (const CatalogName*) &vcb->vcbVN, isHFSPlus ); + if ( result == noErr ) + if ( CmpBlock( mdbP->drVN, vcb->vcbVN, vcb->vcbVN[0] + 1 ) == 0 ) + CopyMemory( mdbP->drVN, vcb->vcbVN, kHFSMaxVolumeNameChars + 1 ); /* ...we have a good one */ + + // verify last backup date and backup seqence number + vcb->vcbBackupDate = mdbP->drVolBkUp; /* don't change last backup date */ + vcb->vcbVSeqNum = mdbP->drVSeqNum; /* don't change last backup sequence # */ + + // verify write count + vcb->vcbWriteCount = mdbP->drWrCnt; /* don't change write count */ + + // check out extent file and catalog clump sizes + if ( ((mdbP->drXTClpSiz % vcb->vcbBlockSize) == 0) && (mdbP->drXTClpSiz <= maxClump) ) + vcb->vcbExtentsFile->fcbClumpSize = mdbP->drXTClpSiz; + else if ( ((alternateMDB->drXTClpSiz % vcb->vcbBlockSize) == 0) && (alternateMDB->drXTClpSiz <= maxClump) ) + vcb->vcbExtentsFile->fcbClumpSize = alternateMDB->drXTClpSiz; + else + vcb->vcbExtentsFile->fcbClumpSize = (alternateMDB->drXTExtRec[0].blockCount * vcb->vcbBlockSize); + + if ( ((mdbP->drCTClpSiz % vcb->vcbBlockSize) == 0) && (mdbP->drCTClpSiz <= maxClump) ) + vcb->vcbCatalogFile->fcbClumpSize = mdbP->drCTClpSiz; + else if ( ((alternateMDB->drCTClpSiz % vcb->vcbBlockSize) == 0) && (alternateMDB->drCTClpSiz <= maxClump) ) + vcb->vcbCatalogFile->fcbClumpSize = alternateMDB->drCTClpSiz; + else + vcb->vcbCatalogFile->fcbClumpSize = (alternateMDB->drCTExtRec[0].blockCount * vcb->vcbBlockSize); + + // just copy Finder info for now + CopyMemory(mdbP->drFndrInfo, vcb->vcbFinderInfo, sizeof(mdbP->drFndrInfo)); + + // now compare verified MDB info with MDB info on disk + result = CmpMDB( GPtr, mdbP); + } + +exit: + if (priBlock.buffer) + (void) ReleaseVolumeBlock(vcb, &priBlock, kReleaseBlock); + if (altBlock.buffer) + (void) ReleaseVolumeBlock(vcb, &altBlock, kReleaseBlock); + + return (result); + +} /* end of VInfoChk */ + + +/*------------------------------------------------------------------------------ + +Function: VLockedChk - (Volume Name Locked Check) + +Function: Makes sure the volume name isn't locked. If it is locked, generate a repair order. + + This function is not called if file sharing is operating. + +Input: GPtr - pointer to scavenger global area + +Output: VInfoChk - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ + +OSErr VLockedChk( SGlobPtr GPtr ) +{ + UInt32 hint; + CatalogKey foundKey; + CatalogRecord record; + UInt16 recSize; + OSErr result; + UInt16 frFlags; + Boolean isHFSPlus; + SVCB *calculatedVCB = GPtr->calculatedVCB; + VolumeObjectPtr myVOPtr; + + myVOPtr = GetVolumeObjectPtr( ); + isHFSPlus = VolumeObjectIsHFSPlus( ); + GPtr->TarID = kHFSCatalogFileID; /* target = catalog file */ + GPtr->TarBlock = 0; /* no target block yet */ + + // + // locate the catalog record for the root directory + // + result = GetBTreeRecord( GPtr->calculatedCatalogFCB, 0x8001, &foundKey, &record, &recSize, &hint ); + + if ( result) + { + RcdError( GPtr, E_EntryNotFound ); + return( E_EntryNotFound ); + } + + // put the volume name in the VCB + if ( isHFSPlus == false ) + { + CopyMemory( foundKey.hfs.nodeName, calculatedVCB->vcbVN, sizeof(calculatedVCB->vcbVN) ); + } + else if ( myVOPtr->volumeType != kPureHFSPlusVolumeType ) + { + HFSMasterDirectoryBlock *mdbP; + BlockDescriptor block; + + block.buffer = NULL; + if ( (myVOPtr->flags & kVO_PriMDBOK) != 0 ) + result = GetVolumeObjectPrimaryMDB( &block ); + else + result = GetVolumeObjectAlternateMDB( &block ); + if ( result == noErr ) { + mdbP = (HFSMasterDirectoryBlock *) block.buffer; + CopyMemory( mdbP->drVN, calculatedVCB->vcbVN, sizeof(mdbP->drVN) ); + } + if ( block.buffer != NULL ) + (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock ); + ReturnIfError(result); + } + else // Because we don't have the unicode converters, just fill it with a dummy name. + { + CopyMemory( "\x0dPure HFS Plus", calculatedVCB->vcbVN, sizeof(Str27) ); + } + + GPtr->TarBlock = hint; + if ( isHFSPlus ) + CopyCatalogName( (const CatalogName *)&foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus ); + else + CopyCatalogName( (const CatalogName *)&foundKey.hfs.nodeName, &GPtr->CName, isHFSPlus ); + + if ( (record.recordType == kHFSPlusFolderRecord) || (record.recordType == kHFSFolderRecord) ) + { + frFlags = record.recordType == kHFSPlusFolderRecord ? + record.hfsPlusFolder.userInfo.frFlags : + record.hfsFolder.userInfo.frFlags; + + if ( frFlags & fNameLocked ) // name locked bit set? + RcdNameLockedErr( GPtr, E_LockedDirName, frFlags ); + } + + return( noErr ); +} + + +/*------------------------------------------------------------------------------ + +Name: RcdNameLockedErr + +Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP' + list, to describe an locked volume name for possible repair. + +Input: GPtr - ptr to scavenger global data + type - error code (E_xxx), which should be >0 + incorrect - the incorrect file flags as found in file record + +Output: 0 - no error + R_NoMem - not enough mem to allocate record +------------------------------------------------------------------------------*/ + +static int RcdNameLockedErr( SGlobPtr GPtr, SInt16 type, UInt32 incorrect ) /* for a consistency check */ +{ + RepairOrderPtr p; /* the new node we compile */ + int n; /* size of node we allocate */ + Boolean isHFSPlus; + + isHFSPlus = VolumeObjectIsHFSPlus( ); + RcdError( GPtr, type ); /* first, record the error */ + + n = CatalogNameSize( &GPtr->CName, isHFSPlus ); + + p = AllocMinorRepairOrder( GPtr, n ); /* get the node */ + if ( p==NULL ) /* quit if out of room */ + return ( R_NoMem ); + + CopyCatalogName( (const CatalogName *) &GPtr->CName, (CatalogName*)&p->name, isHFSPlus ); + + p->type = type; /* save error info */ + p->correct = incorrect & ~fNameLocked; /* mask off the name locked bit */ + p->incorrect = incorrect; + p->maskBit = (UInt16)fNameLocked; + p->parid = 1; + + GPtr->CatStat |= S_LockedDirName; /* set flag to trigger repair */ + + return( noErr ); /* successful return */ +} + +/*------------------------------------------------------------------------------ + +Name: RecordBadExtent + +Function: Allocates a RepairOrder for repairing bad extent. + +Input: GPtr - ptr to scavenger global data + fileID - fileID of the file with bad extent + forkType - bad extent's fork type + startBlock - start block of the bad extent record + badExtentIndex - index of bad extent entry in the extent record + +Output: 0 - no error + R_NoMem - not enough mem to allocate record +------------------------------------------------------------------------------*/ + +static int RecordBadExtent(SGlobPtr GPtr, UInt32 fileID, UInt8 forkType, + UInt32 startBlock, UInt32 badExtentIndex) +{ + RepairOrderPtr p; + Boolean isHFSPlus; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + p = AllocMinorRepairOrder(GPtr, 0); + if (p == NULL) { + return(R_NoMem); + } + + p->type = E_ExtEnt; + p->forkType = forkType; + p->correct = badExtentIndex; + p->hint = startBlock; + p->parid = fileID; + + GPtr->CatStat |= S_BadExtent; + return (0); +} + +/* + * Build a catalog node thread key. + */ +__unused static void +buildthreadkey(UInt32 parentID, int std_hfs, CatalogKey *key) +{ + if (std_hfs) { + key->hfs.keyLength = kHFSCatalogKeyMinimumLength; + key->hfs.reserved = 0; + key->hfs.parentID = parentID; + key->hfs.nodeName[0] = 0; + } else { + key->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength; + key->hfsPlus.parentID = parentID; + key->hfsPlus.nodeName.length = 0; + } +} + + +static void +printpath(SGlobPtr GPtr, UInt32 fileID) +{ + int result; + char path[PATH_MAX * 4]; + unsigned int pathlen = PATH_MAX * 4; + + if (fileID < kHFSFirstUserCatalogNodeID) { + switch(fileID) { + case kHFSExtentsFileID: + printf("$Extents_Overflow_File\n"); + return; + case kHFSCatalogFileID: + printf("$Catalog_File\n"); + return; + case kHFSAllocationFileID: + printf("$Allocation_Bitmap_File\n"); + return; + case kHFSAttributesFileID: + printf("$Attributes_File\n"); + return; + default: + printf("$File_ID_%d\n", fileID); + return; + } + } + + result = GetFileNamePathByID(GPtr, fileID, path, &pathlen, NULL, NULL, NULL); + if (result) { + printf ("error %d getting path for id=%u\n", result, fileID); + } + + printf("\"ROOT_OF_VOLUME%s\" (file id=%u)\n", path, fileID); +} + +void +CheckPhysicalMatch(SVCB *vcb, UInt32 startblk, UInt32 blkcount, UInt32 fileNumber, UInt8 forkType) +{ + int i; + u_int64_t blk, blk1, blk2; + u_int64_t offset; + + offset = (u_int64_t) startblk * (u_int64_t) vcb->vcbBlockSize; + + if (vcb->vcbSignature == kHFSPlusSigWord) + offset += vcb->vcbEmbeddedOffset; // offset into the wrapper + else + offset += vcb->vcbAlBlSt * 512ULL; // offset to start of volume + + blk1 = offset / gBlockSize; + blk2 = blk1 + ((blkcount * vcb->vcbBlockSize) / gBlockSize); + + for (i = 0; i < gBlkListEntries; ++i) { + blk = gBlockList[i]; + + if (blk >= blk1 && blk < blk2) { + // printf("block %d is in file %d\n", blk, fileNumber); + /* Do we need to grow the found blocks list? */ + if (gFoundBlockEntries % FOUND_BLOCKS_QUANTUM == 0) { + struct found_blocks *new_blocks; + new_blocks = realloc(gFoundBlocksList, (gFoundBlockEntries + FOUND_BLOCKS_QUANTUM) * sizeof(struct found_blocks)); + if (new_blocks == NULL) { + fprintf(stderr, "CheckPhysicalMatch: Out of memory!\n"); + return; + } + gFoundBlocksList = new_blocks; + } + gFoundBlocksList[gFoundBlockEntries].block = blk; + gFoundBlocksList[gFoundBlockEntries].fileID = fileNumber; + ++gFoundBlockEntries; + } + } +} + +static int compare_found_blocks(const void *x1_arg, const void *x2_arg) +{ + const struct found_blocks *x1 = x1_arg; + const struct found_blocks *x2 = x2_arg; + + if (x1->block < x2->block) + return -1; + else if (x1->block > x2->block) + return 1; + else { + if (x1->fileID < x2->fileID) + return -1; + else if (x1->fileID > x2->fileID) + return 1; + } + + return 0; +} + +void +dumpblocklist(SGlobPtr GPtr) +{ + int i, j; + u_int64_t block; + + /* Sort the found blocks */ + qsort(gFoundBlocksList, gFoundBlockEntries, sizeof(struct found_blocks), compare_found_blocks); + + /* + * Print out the blocks with matching files. In the case of overlapped + * extents, the same block number will be printed multiple times, with + * each file containing an overlapping extent. If overlapping extents + * come from the same file, then that path will be printed multiple times. + */ + for (i = 0; i < gFoundBlockEntries; ++i) { + block = gFoundBlocksList[i].block; + + printf("block %llu:\t", (unsigned long long) block); + printpath(GPtr, gFoundBlocksList[i].fileID); + + /* Remove block from the gBlockList */ + for (j = 0; j < gBlkListEntries; ++j) { + if (gBlockList[j] == block) { + gBlockList[j] = gBlockList[--gBlkListEntries]; + break; + } + } + } + + /* Print out the blocks without matching files */ + for (j = 0; j < gBlkListEntries; ++j) { + printf("block %llu:\t*** NO MATCH ***\n", (unsigned long long) gBlockList[j]); + } +} + +/*------------------------------------------------------------------------------ + +Function: CheckFileExtents - (Check File Extents) + +Description: + Verifies the extent info for a file data or extented attribute data. It + checks the correctness of extent data. If the extent information is + correct/valid, it updates in-memory volume bitmap, total number of valid + blocks for given file, and if overlapping extents exist, adds them to + the overlap extents list. If the extent information is not correct, it + considers the file truncated beyond the bad extent entry and reports + only the total number of good blocks seen. Therefore the caller detects + adds the extent information to repair order. It does not include the + invalid extent and any extents after it for checking volume bitmap and + hence overlapping extents. Note that currently the function + returns error if invalid extent is found for system files or for + extended attributes. + + For data fork and resource fork of file - This function checks extent + record present in catalog record as well as extent overflow records, if + any, for given fileID. + + For extended attribute data - This function only checks the extent record + passed as parameter. If any extended attribute has overflow extents in + the attribute btree, this function does not look them up. It is the left + to the caller to check remaining extents for given file's extended attribute. + +Input: + GPtr - pointer to scavenger global area + fileNumber - file number for fork/extended attribute + forkType - fork type + 00 - kDataFork - data fork + 01 - kEAData - extended attribute data extent + ff - kRsrcFork - resource fork + attrname - if fork type is kEAData, attrname contains pointer to the + name of extended attribute whose extent is being checked; else + it should be NULL. Note that the function assumes that this is + NULL-terminated string. + extents - ptr to 1st extent record for the file + +Output: + CheckFileExtents - function result: + noErr = no error + n = error code + blocksUsed - number of allocation blocks allocated to the file +------------------------------------------------------------------------------*/ + +OSErr CheckFileExtents( SGlobPtr GPtr, UInt32 fileNumber, UInt8 forkType, + const unsigned char *attrname, const void *extents, + UInt32 *blocksUsed) +{ + UInt32 blockCount = 0; + UInt32 extentBlockCount; + UInt32 extentStartBlock; + UInt32 hint; + HFSPlusExtentKey key; + HFSPlusExtentKey extentKey; + HFSPlusExtentRecord extentRecord; + UInt16 recSize; + OSErr err = noErr; + SInt16 i; + Boolean firstRecord; + Boolean isHFSPlus; + unsigned int lastExtentIndex; + Boolean foundBadExtent; + + /* For all extended attribute extents, the attrname should not be NULL */ + if (forkType == kEAData) { + assert(attrname != NULL); + } + + isHFSPlus = VolumeObjectIsHFSPlus( ); + firstRecord = true; + foundBadExtent = false; + lastExtentIndex = GPtr->numExtents; + + while ( (extents != nil) && (err == noErr) ) + { + // checkout the extent record first + err = ChkExtRec( GPtr, fileNumber, extents, &lastExtentIndex ); + if (err != noErr) { + DPRINTF (d_info, "%s: Bad extent for fileID %u in extent %u for startblock %u\n", __FUNCTION__, fileNumber, lastExtentIndex, blockCount); + if (cur_debug_level & d_dump_record) + { + plog("Extents:\n"); + HexDump(extents, sizeof(HFSPlusExtentRecord), FALSE); + plog("\n"); + } + + /* Stop verification if bad extent is found for system file or EA */ + if ((fileNumber < kHFSFirstUserCatalogNodeID) || + (forkType == kEAData)) { + break; + } + + /* store information about bad extent in repair order */ + (void) RecordBadExtent(GPtr, fileNumber, forkType, blockCount, lastExtentIndex); + foundBadExtent = true; + err = noErr; + } + + /* Check only till the last valid extent entry reported by ChkExtRec */ + for ( i=0 ; i<lastExtentIndex ; i++ ) // now checkout the extents + { + // HFS+/HFS moving extent fields into local variables for evaluation + if ( isHFSPlus == true ) + { + extentBlockCount = ((HFSPlusExtentDescriptor *)extents)[i].blockCount; + extentStartBlock = ((HFSPlusExtentDescriptor *)extents)[i].startBlock; + } + else + { + extentBlockCount = ((HFSExtentDescriptor *)extents)[i].blockCount; + extentStartBlock = ((HFSExtentDescriptor *)extents)[i].startBlock; + } + + if ( extentBlockCount == 0 ) + break; + + if (gBlkListEntries != 0) + CheckPhysicalMatch(GPtr->calculatedVCB, extentStartBlock, extentBlockCount, fileNumber, forkType); + + err = CaptureBitmapBits(extentStartBlock, extentBlockCount); + if (err == E_OvlExt) { + err = AddExtentToOverlapList(GPtr, fileNumber, (char *)attrname, extentStartBlock, extentBlockCount, forkType); + } + + blockCount += extentBlockCount; + } + + if ( fileNumber == kHFSExtentsFileID ) // Extents file has no overflow extents + break; + + /* Found bad extent for this file, do not find any extents after + * current extent. We assume that the file is truncated at the + * bad extent entry + */ + if (foundBadExtent == true) { + break; + } + + /* For extended attributes, only check the extent passed as parameter. The + * caller will take care of checking other extents, if any, for given + * extended attribute. + */ + if (forkType == kEAData) { + break; + } + + if ( firstRecord == true ) + { + firstRecord = false; + + // Set up the extent key + BuildExtentKey( isHFSPlus, forkType, fileNumber, blockCount, (void *)&key ); + + err = SearchBTreeRecord( GPtr->calculatedExtentsFCB, &key, kNoHint, (void *) &extentKey, (void *) &extentRecord, &recSize, &hint ); + + if ( err == btNotFound ) + { + err = noErr; // no more extent records + extents = nil; + break; + } + else if ( err != noErr ) + { + err = IntError( GPtr, err ); // error from SearchBTreeRecord + return( err ); + } + } + else + { + err = GetBTreeRecord( GPtr->calculatedExtentsFCB, 1, &extentKey, extentRecord, &recSize, &hint ); + + if ( err == btNotFound ) + { + err = noErr; // no more extent records + extents = nil; + break; + } + else if ( err != noErr ) + { + err = IntError( GPtr, err ); /* error from BTGetRecord */ + return( err ); + } + + // Check same file and fork + if ( isHFSPlus ) + { + if ( (extentKey.fileID != fileNumber) || (extentKey.forkType != forkType) ) + break; + } + else + { + if ( (((HFSExtentKey *) &extentKey)->fileID != fileNumber) || (((HFSExtentKey *) &extentKey)->forkType != forkType) ) + break; + } + } + + extents = (void *) &extentRecord; + } + + *blocksUsed = blockCount; + + return( err ); +} + + +void BuildExtentKey( Boolean isHFSPlus, UInt8 forkType, HFSCatalogNodeID fileNumber, UInt32 blockNumber, void * key ) +{ + if ( isHFSPlus ) + { + HFSPlusExtentKey *hfsPlusKey = (HFSPlusExtentKey*) key; + + hfsPlusKey->keyLength = kHFSPlusExtentKeyMaximumLength; + hfsPlusKey->forkType = forkType; + hfsPlusKey->pad = 0; + hfsPlusKey->fileID = fileNumber; + hfsPlusKey->startBlock = blockNumber; + } + else + { + HFSExtentKey *hfsKey = (HFSExtentKey*) key; + + hfsKey->keyLength = kHFSExtentKeyMaximumLength; + hfsKey->forkType = forkType; + hfsKey->fileID = fileNumber; + hfsKey->startBlock = (UInt16) blockNumber; + } +} + + + +// +// Adds this extent to our OverlappedExtentList for later repair. +// +static OSErr AddExtentToOverlapList( SGlobPtr GPtr, HFSCatalogNodeID fileNumber, const char *attrname, UInt32 extentStartBlock, UInt32 extentBlockCount, UInt8 forkType ) +{ + size_t newHandleSize; + ExtentInfo extentInfo; + ExtentsTable **extentsTableH; + size_t attrlen; + + ClearMemory(&extentInfo, sizeof(extentInfo)); + extentInfo.fileID = fileNumber; + extentInfo.startBlock = extentStartBlock; + extentInfo.blockCount = extentBlockCount; + extentInfo.forkType = forkType; + /* store the name of extended attribute */ + if (forkType == kEAData) { + assert(attrname != NULL); + + attrlen = strlen(attrname) + 1; + extentInfo.attrname = malloc(attrlen); + if (extentInfo.attrname == NULL) { + return(memFullErr); + } + strlcpy(extentInfo.attrname, attrname, attrlen); + } + + // If it's uninitialized + if ( GPtr->overlappedExtents == nil ) + { + GPtr->overlappedExtents = (ExtentsTable **) NewHandleClear( sizeof(ExtentsTable) ); + extentsTableH = GPtr->overlappedExtents; + } + else + { + extentsTableH = GPtr->overlappedExtents; + + if ( ExtentInfoExists( extentsTableH, &extentInfo) == true ) + return( noErr ); + + // Grow the Extents table for a new entry. + newHandleSize = ( sizeof(ExtentInfo) ) + ( GetHandleSize( (Handle)extentsTableH ) ); + SetHandleSize( (Handle)extentsTableH, newHandleSize ); + } + + // Copy the new extents into the end of the table + CopyMemory( &extentInfo, &((**extentsTableH).extentInfo[(**extentsTableH).count]), sizeof(ExtentInfo) ); + + // Update the overlap extent bit + GPtr->VIStat |= S_OverlappingExtents; + + // Update the extent table count + (**extentsTableH).count++; + + return( noErr ); +} + + +/* Compare if the given extentInfo exsists in the extents table */ +static Boolean ExtentInfoExists( ExtentsTable **extentsTableH, ExtentInfo *extentInfo) +{ + UInt32 i; + ExtentInfo *aryExtentInfo; + + + for ( i = 0 ; i < (**extentsTableH).count ; i++ ) + { + aryExtentInfo = &((**extentsTableH).extentInfo[i]); + + if ( extentInfo->fileID == aryExtentInfo->fileID ) + { + if ( (extentInfo->startBlock == aryExtentInfo->startBlock) && + (extentInfo->blockCount == aryExtentInfo->blockCount) && + (extentInfo->forkType == aryExtentInfo->forkType) ) + { + /* startBlock, blockCount, forkType are same. + * Compare the extended attribute names, if they exist. + */ + + /* If no attribute name exists, the two extents are same */ + if ((extentInfo->attrname == NULL) && + (aryExtentInfo->attrname == NULL)) { + return(true); + } + + /* If only one attribute name exists, the two extents are not same */ + if (((extentInfo->attrname != NULL) && (aryExtentInfo->attrname == NULL)) || + ((extentInfo->attrname == NULL) && (aryExtentInfo->attrname != NULL))) { + return(false); + } + + /* Both attribute name exist. Compare the names */ + if (!strcmp(extentInfo->attrname, aryExtentInfo->attrname)) { + return (true); + } else { + return (false); + } + + } + } + } + + return( false ); +} + +/* Function : DoesOverlap + * + * Description: + * This function takes a start block and the count of blocks in a + * given extent and compares it against the list of overlapped + * extents in the global structure. + * This is useful in finding the original files that overlap with + * the files found in catalog btree check. If a file is found + * overlapping, it is added to the overlap list. + * + * Input: + * 1. GPtr - global scavenger pointer. + * 2. fileID - file ID being checked. + * 3. attrname - name of extended attribute being checked, should be NULL for regular files + * 4. startBlock - start block in extent. + * 5. blockCount - total number of blocks in extent. + * 6. forkType - type of fork being check (kDataFork, kRsrcFork, kEAData). + * + * Output: isOverlapped - Boolean value of true or false. + */ +static Boolean DoesOverlap(SGlobPtr GPtr, UInt32 fileID, const char *attrname, UInt32 startBlock, UInt32 blockCount, UInt8 forkType) +{ + int i; + Boolean isOverlapped = false; + ExtentInfo *curExtentInfo; + ExtentsTable **extentsTableH = GPtr->overlappedExtents; + + for (i = 0; i < (**extentsTableH).count; i++) { + curExtentInfo = &((**extentsTableH).extentInfo[i]); + /* Check extents */ + if (curExtentInfo->startBlock < startBlock) { + if ((curExtentInfo->startBlock + curExtentInfo->blockCount) > startBlock) { + isOverlapped = true; + break; + } + } else { /* curExtentInfo->startBlock >= startBlock */ + if (curExtentInfo->startBlock < (startBlock + blockCount)) { + isOverlapped = true; + break; + } + } + } /* for loop Extents Table */ + + /* Add this extent to overlap list */ + if (isOverlapped) { + AddExtentToOverlapList(GPtr, fileID, attrname, startBlock, blockCount, forkType); + } + + return isOverlapped; +} /* DoesOverlap */ + +/* Function : CheckHFSPlusExtentRecords + * + * Description: + * For all valid extents, this function calls DoesOverlap to find + * if a given extent is overlapping with another extent existing + * in the overlap list. + * + * Input: + * 1. GPtr - global scavenger pointer. + * 2. fileID - file ID being checked. + * 3. attrname - name of extended attribute being checked, should be NULL for regular files + * 4. extent - extent information to check. + * 5. forkType - type of fork being check (kDataFork, kRsrcFork, kEAData). + * + * Output: None. + */ +static void CheckHFSPlusExtentRecords(SGlobPtr GPtr, UInt32 fileID, const char *attrname, HFSPlusExtentRecord extent, UInt8 forkType) +{ + int i; + + /* Check for overlapping extents for all extents in given extent data */ + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (extent[i].startBlock == 0) { + break; + } + DoesOverlap(GPtr, fileID, attrname, extent[i].startBlock, extent[i].blockCount, forkType); + } + return; +} /* CheckHFSPlusExtentRecords */ + +/* Function : CheckHFSExtentRecords + * + * Description: + * For all valid extents, this function calls DoesOverlap to find + * if a given extent is overlapping with another extent existing + * in the overlap list. + * + * Input: + * 1. GPtr - global scavenger pointer. + * 2. fileID - file ID being checked. + * 3. extent - extent information to check. + * 4. forkType - type of fork being check (kDataFork, kRsrcFork). + * + * Output: None. + */ +static void CheckHFSExtentRecords(SGlobPtr GPtr, UInt32 fileID, HFSExtentRecord extent, UInt8 forkType) +{ + int i; + + /* Check for overlapping extents for all extents in given extents */ + for (i = 0; i < kHFSExtentDensity; i++) { + if (extent[i].startBlock == 0) { + break; + } + DoesOverlap(GPtr, fileID, NULL, extent[i].startBlock, extent[i].blockCount, forkType); + } + return; +} /* CheckHFSExtentRecords */ + +/* Function: FindOrigOverlapFiles + * + * Description: + * This function is called only if btree check results in + * overlapped extents errors. The btree checks do not find + * out the original files whose extents are overlapping with one + * being reported in its check. This function finds out all the + * original files whose that are being overlapped. + * + * This function relies on comparison of extents with Overlap list + * created in verify stage. The list is also updated with the + * overlapped extents found in this function. + * + * 1. Compare extents for all the files located in volume header. + * 2. Traverse catalog btree and compare extents of all files. + * 3. Traverse extents btree and compare extents for all entries. + * + * Input: GPtr - pointer to global scanvenger area. + * + * Output: err - function result + * zero means success + * non-zero means failure + */ +int FindOrigOverlapFiles(SGlobPtr GPtr) +{ + OSErr err = noErr; + Boolean isHFSPlus; + + UInt16 selCode; /* select access pattern for BTree */ + UInt16 recordSize; + UInt32 hint; + + CatalogRecord catRecord; + CatalogKey catKey; + + ExtentRecord extentRecord; + ExtentKey extentKey; + + HFSPlusAttrRecord attrRecord; + HFSPlusAttrKey attrKey; + char attrName[XATTR_MAXNAMELEN]; + size_t len; + + SVCB *calculatedVCB = GPtr->calculatedVCB; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + /* Check file extents from volume header */ + if (isHFSPlus) { + /* allocation file */ + if (calculatedVCB->vcbAllocationFile) { + CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbAllocationFile->fcbFileID, NULL, + calculatedVCB->vcbAllocationFile->fcbExtents32, kDataFork); + } + + /* extents file */ + if (calculatedVCB->vcbExtentsFile) { + CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbExtentsFile->fcbFileID, NULL, + calculatedVCB->vcbExtentsFile->fcbExtents32, kDataFork); + } + + /* catalog file */ + if (calculatedVCB->vcbCatalogFile) { + CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbCatalogFile->fcbFileID, NULL, + calculatedVCB->vcbCatalogFile->fcbExtents32, kDataFork); + } + + /* attributes file */ + if (calculatedVCB->vcbAttributesFile) { + CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbAttributesFile->fcbFileID, NULL, + calculatedVCB->vcbAttributesFile->fcbExtents32, kDataFork); + } + + /* startup file */ + if (calculatedVCB->vcbStartupFile) { + CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbStartupFile->fcbFileID, NULL, + calculatedVCB->vcbStartupFile->fcbExtents32, kDataFork); + } + } else { + /* extents file */ + if (calculatedVCB->vcbExtentsFile) { + CheckHFSExtentRecords(GPtr, calculatedVCB->vcbExtentsFile->fcbFileID, + calculatedVCB->vcbExtentsFile->fcbExtents16, kDataFork); + } + + /* catalog file */ + if (calculatedVCB->vcbCatalogFile) { + CheckHFSExtentRecords(GPtr, calculatedVCB->vcbCatalogFile->fcbFileID, + calculatedVCB->vcbCatalogFile->fcbExtents16, kDataFork); + } + } + + /* Traverse the catalog btree */ + selCode = 0x8001; /* Get first record from BTree */ + err = GetBTreeRecord(GPtr->calculatedCatalogFCB, selCode, &catKey, &catRecord, &recordSize, &hint); + if (err != noErr) { + goto traverseExtents; + } + selCode = 1; /* Get next record */ + do { + if ((catRecord.recordType == kHFSPlusFileRecord) || + (catRecord.recordType == kHFSFileRecord)) { + + if (isHFSPlus) { + /* HFSPlus data fork */ + CheckHFSPlusExtentRecords(GPtr, catRecord.hfsPlusFile.fileID, NULL, + catRecord.hfsPlusFile.dataFork.extents, kDataFork); + + /* HFSPlus resource fork */ + CheckHFSPlusExtentRecords(GPtr, catRecord.hfsPlusFile.fileID, NULL, + catRecord.hfsPlusFile.resourceFork.extents, kRsrcFork); + } else { + /* HFS data extent */ + CheckHFSExtentRecords(GPtr, catRecord.hfsFile.fileID, + catRecord.hfsFile.dataExtents, kDataFork); + + /* HFS resource extent */ + CheckHFSExtentRecords(GPtr, catRecord.hfsFile.fileID, + catRecord.hfsFile.rsrcExtents, kRsrcFork); + } + } + + /* Access the next record */ + err = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &catKey, &catRecord, &recordSize, &hint ); + } while (err == noErr); + +traverseExtents: + /* Traverse the extents btree */ + selCode = 0x8001; /* Get first record from BTree */ + err = GetBTreeRecord(GPtr->calculatedExtentsFCB, selCode, &extentKey, &extentRecord, &recordSize, &hint); + if (err != noErr) { + goto traverseAttribute; + } + selCode = 1; /* Get next record */ + do { + if (isHFSPlus) { + CheckHFSPlusExtentRecords(GPtr, extentKey.hfsPlus.fileID, NULL, + extentRecord.hfsPlus, extentKey.hfsPlus.forkType); + } else { + CheckHFSExtentRecords(GPtr, extentKey.hfs.fileID, extentRecord.hfs, + extentKey.hfs.forkType); + } + + /* Access the next record */ + err = GetBTreeRecord(GPtr->calculatedExtentsFCB, selCode, &extentKey, &extentRecord, &recordSize, &hint); + } while (err == noErr); + +traverseAttribute: + /* Extended attributes are only supported in HFS Plus */ + if (!isHFSPlus) { + goto out; + } + + /* Traverse the attribute btree */ + selCode = 0x8001; /* Get first record from BTree */ + /* Warning: Attribute record of type kHFSPlusAttrInlineData may be + * truncated on read! (4425232). This function only uses recordType + * field from inline attribute record. + */ + err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &attrKey, &attrRecord, &recordSize, &hint); + if (err != noErr) { + goto out; + } + selCode = 1; /* Get next record */ + do { + if (attrRecord.recordType == kHFSPlusAttrForkData) { + (void) utf_encodestr(attrKey.attrName, attrKey.attrNameLen * 2, (unsigned char *)attrName, &len, sizeof(attrName)); + attrName[len] = '\0'; + + CheckHFSPlusExtentRecords(GPtr, attrKey.fileID, attrName, attrRecord.forkData.theFork.extents, kEAData); + } else if (attrRecord.recordType == kHFSPlusAttrExtents) { + (void) utf_encodestr(attrKey.attrName, attrKey.attrNameLen * 2, (unsigned char *)attrName, &len, sizeof(attrName)); + attrName[len] = '\0'; + + CheckHFSPlusExtentRecords(GPtr, attrKey.fileID, attrName, attrRecord.overflowExtents.extents, kEAData); + } + + /* Access the next record + * Warning: Attribute record of type kHFSPlusAttrInlineData may be + * truncated on read! (4425232). This function only uses recordType + * field from inline attribute record. + */ + err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &attrKey, &attrRecord, &recordSize, &hint); + } while (err == noErr); + +out: + if (err == btNotFound) { + err = noErr; + } + return err; +} /* FindOrigOverlapFiles */ + +/* Function: PrintOverlapFiles + * + * Description: Print the information about all unique overlapping files. + * 1. Sort the overlap extent in increasing order of fileID + * 2. For every unique fileID, prefix the string with fileID and find the + * filename/path based on fileID. + * If fileID > kHFSFirstUserCatalogNodeID, find path to file + * Else, find name of the system file. + * 3. Print the new string. + * Note that the path is printed only for HFS Plus volumes and not for + * plain HFS volumes. This is done by not allocating buffer for finding + * file path. + * + * Input: + * GPtr - Global scavenger structure pointer. + * + * Output: + * nothing (void) + */ +void PrintOverlapFiles (SGlobPtr GPtr) +{ + OSErr err; + ExtentsTable **extentsTableH; + ExtentInfo *extentInfo; + unsigned int numOverlapExtents; + unsigned int buflen, filepathlen; + char *filepath = NULL; + UInt32 lastID = 0; + Boolean printMsg; + Boolean isHFSPlus; + int i; + + isHFSPlus = VolumeObjectIsHFSPlus(); + + extentsTableH = GPtr->overlappedExtents; + numOverlapExtents = (**extentsTableH).count; + + /* Sort the list according to file ID */ + qsort((**extentsTableH).extentInfo, numOverlapExtents, sizeof(ExtentInfo), + CompareExtentFileID); + + buflen = PATH_MAX * 4; + /* Allocate buffer to read data */ + if (isHFSPlus) { + filepath = malloc (buflen); + } + + for (i = 0; i < numOverlapExtents; i++) { + extentInfo = &((**extentsTableH).extentInfo[i]); + + /* Skip the same fileID */ + if (lastID == extentInfo->fileID) { + continue; + } + + lastID = extentInfo->fileID; + printMsg = false; + + if (filepath) { + filepathlen = buflen; + if (extentInfo->fileID >= kHFSFirstUserCatalogNodeID) { + /* Lookup the file path */ + err = GetFileNamePathByID (GPtr, extentInfo->fileID, filepath, &filepathlen, NULL, NULL, NULL); + } else { + /* Get system filename */ + err = GetSystemFileName (extentInfo->fileID, filepath, &filepathlen); + } + + if (err == noErr) { + /* print fileID, filepath */ + fsckPrint(GPtr->context, E_OvlExt, extentInfo->fileID, filepath); + printMsg = true; + } + + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\textentType=0x%x, startBlock=0x%x, blockCount=0x%x, attrName=%s\n", + extentInfo->forkType, extentInfo->startBlock, extentInfo->blockCount, extentInfo->attrname); + } + } + + if (printMsg == false) { + /* print only fileID */ + fsckPrint(GPtr->context, E_OvlExtID, extentInfo->fileID); + } + } + + if (filepath) { + free (filepath); + } + + return; +} /* PrintOverlapFiles */ + +/* Function: CompareExtentFileID + * + * Description: Compares the fileID from two ExtentInfo and return the + * comparison result. (since we have to arrange in ascending order) + * + * Input: + * first and second - void pointers to ExtentInfo structure. + * + * Output: + * >0 if first > second + * =0 if first == second + * <0 if first < second + */ +static int CompareExtentFileID(const void *first, const void *second) +{ + return (((ExtentInfo *)first)->fileID - + ((ExtentInfo *)second)->fileID); +} /* CompareExtentFileID */ + +/* Function: journal_replay + * + * Description: Replay journal on a journaled HFS+ volume. This function + * returns success if the volume is not journaled or the journal was not + * dirty. If there was any error in replaying the journal, a non-zero value + * is returned. + * + * Output: + * 0 - success, non-zero - failure. + */ +//int journal_replay(SGlobPtr gptr) +int journal_replay(const char *block_device) +{ + int retval = 0; + struct vfsconf vfc; + int mib[4]; + int jfd; + + jfd = open(block_device, O_RDWR); + if (jfd == -1) { + retval = errno; + if (debug) + fplog(stderr, "Unable to open block device %s: %s", block_device, strerror(errno)); + goto out; + } + + retval = getvfsbyname("hfs", &vfc); + if (retval) { + goto out; + } + + mib[0] = CTL_VFS; + mib[1] = vfc.vfc_typenum; + mib[2] = HFS_REPLAY_JOURNAL; + mib[3] = jfd; + retval = sysctl(mib, 4, NULL, NULL, NULL, 0); + if (retval) { + retval = errno; + } + (void)close(jfd); + +out: + return retval; +} + diff --git a/fsck_hfs/dfalib/SVerify2.c b/fsck_hfs/dfalib/SVerify2.c new file mode 100644 index 0000000..4280b9c --- /dev/null +++ b/fsck_hfs/dfalib/SVerify2.c @@ -0,0 +1,1811 @@ +/* + * Copyright (c) 1999-2009 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: SVerify2.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1997-1999 by Apple Computer, Inc., all rights reserved. +*/ + +#include <sys/ioctl.h> +#include <sys/disk.h> + +#include "BTree.h" +#include "BTreePrivate.h" + +#include "Scavenger.h" + + +// Prototypes for internal subroutines +static int BTKeyChk( SGlobPtr GPtr, NodeDescPtr nodeP, BTreeControlBlock *btcb ); + + +/*------------------------------------------------------------------------------ + +Routine: ChkExtRec (Check Extent Record) + +Function: Checks out a generic extent record. + +Input: GPtr - pointer to scavenger global area. + extP - pointer to extent data record. + + +Output: lastExtentIndex - In normal case, it is set to the maximum number of + extents (3 or 8) for given file system. If the + function finds bad extent, it is set to the index + of the bad extent entry found. + ChkExtRec - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ +OSErr ChkExtRec ( SGlobPtr GPtr, UInt32 fileID, const void *extents , unsigned int *lastExtentIndex ) +{ + short i; + Boolean isHFSPlus; + UInt32 numABlks; + UInt32 maxNABlks; + UInt32 extentBlockCount; + UInt32 extentStartBlock; + + maxNABlks = GPtr->calculatedVCB->vcbTotalBlocks; + numABlks = 1; + isHFSPlus = VolumeObjectIsHFSPlus( ); + + /* initialize default output for extent index */ + *lastExtentIndex = GPtr->numExtents; + + for ( i=0 ; i<GPtr->numExtents ; i++ ) + { + if ( isHFSPlus ) + { + extentBlockCount = ((HFSPlusExtentDescriptor *)extents)[i].blockCount; + extentStartBlock = ((HFSPlusExtentDescriptor *)extents)[i].startBlock; + } + else + { + extentBlockCount = ((HFSExtentDescriptor *)extents)[i].blockCount; + extentStartBlock = ((HFSExtentDescriptor *)extents)[i].startBlock; + } + + if ( extentStartBlock >= maxNABlks ) + { + *lastExtentIndex = i; + RcdError( GPtr, E_ExtEnt ); + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\tCheckExtRecord: id=%u %d:(%u,%u), maxBlocks=%u (startBlock > maxBlocks)\n", + fileID, i, extentStartBlock, extentBlockCount, maxNABlks); + } + return( E_ExtEnt ); + } + /* Check if end of extent is beyond end of disk */ + if ( extentBlockCount >= (maxNABlks - extentStartBlock) ) + { + *lastExtentIndex = i; + RcdError( GPtr, E_ExtEnt ); + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\tCheckExtRecord: id=%u %d:(%u,%u), maxBlocks=%u (blockCount > (maxBlocks - startBlock))\n", + fileID, i, extentStartBlock, extentBlockCount, maxNABlks); + } + return( E_ExtEnt ); + } + /* This condition is not checked for standard HFS volumes as it is valid + * to have extent with allocation block number 0 on standard HFS. + */ + if ( isHFSPlus && + ((extentStartBlock == 0) && (extentBlockCount != 0))) + { + *lastExtentIndex = i; + RcdError( GPtr, E_ExtEnt ); + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\tCheckExtRecord: id=%u %d:(%u,%u), (startBlock == 0)\n", + fileID, i, extentStartBlock, extentBlockCount); + } + return( E_ExtEnt ); + + } + if ((extentStartBlock != 0) && (extentBlockCount == 0)) + { + *lastExtentIndex = i; + RcdError( GPtr, E_ExtEnt ); + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\tCheckExtRecord: id=%u %d:(%u,%u), (blockCount == 0)\n", + fileID, i, extentStartBlock, extentBlockCount); + } + return( E_ExtEnt ); + } + if ( numABlks == 0 ) + { + if ( extentBlockCount != 0 ) + { + *lastExtentIndex = i; + RcdError( GPtr, E_ExtEnt ); + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog ("\tCheckExtRecord: id=%u %d:(%u,%u), (blockCount != 0)\n", + fileID, i, extentStartBlock, extentBlockCount); + } + return( E_ExtEnt ); + } + } + numABlks = extentBlockCount; + } + + return( noErr ); +} + + +/*------------------------------------------------------------------------------ + +Routine: BTCheck - (BTree Check) + +Function Description: + Checks out the internal structure of a Btree file. The BTree + structure is enunumerated top down starting from the root node. + + A structure to store the current traversal state of each Btree level + is used. The function traverses Btree top to down till it finds + a leaf node - where it calls checkLeafRecord function for every + leaf record (if specified). The function then starts traversing + down from the next index node at previous BTree level. If all + index nodes in given BTree level are traversed top to down, + it starts traversing the next index node in a previous BTree level - + until it hits the root node. + + Btree traversal: + The tree is traversed in depth-first traversal - i.e. we recursively + traverse the children of a node before visiting its sibling. + For the btree shown below, this function will traverse as follows: + root B C E I H D G F + + (root node)----- + | B | + ----- + | + (node B)------------- + | C | D | F | + ------------- + / (node\ \ + (node C)------------- D)----- -------- (node F) + | E | I | H | | G | | leaf | + ------------- ----- -------- + / / \ | + -------- -------- -------- -------- + | leaf | | leaf | | leaf | | leaf | + -------- -------- -------- -------- + (node E) (node I) (node H) (node G) + +Input: + GPtr - pointer to scavenger global area + refNum - file refnum + checkLeafRecord - pointer to function that should be + called for every leaf record. + + +Output: BTCheck - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ + +int +BTCheck(SGlobPtr GPtr, short refNum, CheckLeafRecordProcPtr checkLeafRecord) +{ + OSErr result; + short i; + short keyLen; + UInt32 nodeNum; + short numRecs; /* number of records in current node */ + short index; /* index to current index record in index node */ + UInt16 recSize; + UInt8 parKey[ kMaxKeyLength + 2 + 2 ]; /* parent key for comparison */ + Boolean hasParKey = false; + UInt8 *dataPtr; + STPR *tprP; /* pointer to store BTree traversal state */ + STPR *parentP; + KeyPtr keyPtr; + BTHeaderRec *header; + NodeRec node; + NodeDescPtr nodeDescP; + UInt16 *statusFlag = NULL; + UInt32 leafRecords = 0; + BTreeControlBlock *calculatedBTCB = GetBTreeControlBlock( refNum ); + + node.buffer = NULL; + + // Set up + if ( refNum == kCalculatedCatalogRefNum ) + statusFlag = &(GPtr->CBTStat); + else if ( refNum == kCalculatedExtentRefNum ) + statusFlag = &(GPtr->EBTStat); + else if ( refNum == kCalculatedAttributesRefNum ) + statusFlag = &(GPtr->ABTStat); + else { + /* BTCheck is currently called only with the above three options. + * Initialize status flag correctly if we call BTCheck with other + * options + */ + result = E_BadValue; + goto exit; + } + + GPtr->TarBlock = 0; + + /* + * Check out BTree header node + */ + result = GetNode( calculatedBTCB, kHeaderNodeNum, &node ); + if ( result != noErr ) + { + if ( result == fsBTInvalidNodeErr ) /* hfs_swap_BTNode failed */ + { + RcdError( GPtr, E_BadNode ); + result = E_BadNode; + } + node.buffer = NULL; + goto exit; + } + + nodeDescP = node.buffer; + + result = AllocBTN( GPtr, refNum, 0 ); + if (result) goto exit; /* node already allocated */ + + /* Check node kind */ + if ( nodeDescP->kind != kBTHeaderNode ) + { + RcdError( GPtr, E_BadHdrN ); + result = E_BadHdrN; + goto exit; + } + /* Check total records allowed in header node */ + if ( nodeDescP->numRecords != Num_HRecs ) + { + RcdError( GPtr, E_BadHdrN ); + result = E_BadHdrN; + goto exit; + } + /* Check node height */ + if ( nodeDescP->height != 0 ) + { + RcdError( GPtr, E_NHeight ); + result = E_NHeight; + goto exit; + } + + /* + * check out BTree Header record + */ + header = (BTHeaderRec*) ((Byte*)nodeDescP + sizeof(BTNodeDescriptor)); + recSize = GetRecordSize( (BTreeControlBlock *)calculatedBTCB, (BTNodeDescriptor *)nodeDescP, 0 ); + + /* Check header size */ + if ( recSize != sizeof(BTHeaderRec) ) + { + RcdError( GPtr, E_LenBTH ); + result = E_LenBTH; + goto exit; + } + /* Check tree depth */ + if ( header->treeDepth > BTMaxDepth ) + { + RcdError( GPtr, E_BTDepth ); + goto RebuildBTreeExit; + } + calculatedBTCB->treeDepth = header->treeDepth; + + /* Check validity of root node number */ + if ( header->rootNode >= calculatedBTCB->totalNodes || + (header->treeDepth != 0 && header->rootNode == kHeaderNodeNum) ) + { + if (debug) + plog("Header root node %u, calculated total nodes %u, tree depth %u, header node num %u\n", + header->rootNode, calculatedBTCB->totalNodes, + header->treeDepth, kHeaderNodeNum); + + RcdError( GPtr, E_BTRoot ); + goto RebuildBTreeExit; + } + calculatedBTCB->rootNode = header->rootNode; + + /* Check if tree depth or root node are zero */ + if ( (calculatedBTCB->treeDepth == 0) || (calculatedBTCB->rootNode == 0) ) + { + /* If both are zero, empty BTree */ + if ( calculatedBTCB->treeDepth != calculatedBTCB->rootNode ) + { + RcdError( GPtr, E_BTDepth ); + goto RebuildBTreeExit; + } + } + + /* + * Check the extents for the btree. + * HFS+ considers it an error for a node to be split across + * extents, on a journaled filesystem. + * + * If debug is set, then it continues examining the tree; otherwise, + * it exits with a rebuilt error. + */ + if (CheckIfJournaled(GPtr, true) && + header->nodeSize > calculatedBTCB->fcbPtr->fcbVolume->vcbBlockSize) { + /* If it's journaled, it's HFS+ */ + HFSPlusExtentRecord *extp = &calculatedBTCB->fcbPtr->fcbExtents32; + int i; + int blocksPerNode = header->nodeSize / calculatedBTCB->fcbPtr->fcbVolume->vcbBlockSize; // How many blocks in a node + UInt32 totalBlocks = 0; + + /* + * First, go through the first 8 extents + */ + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if (((*extp)[i].blockCount % blocksPerNode) != 0) { + result = errRebuildBtree; + *statusFlag |= S_RebuildBTree; + fsckPrint(GPtr->context, E_BTreeSplitNode, calculatedBTCB->fcbPtr->fcbFileID); + if (debug == 0) { + goto exit; + } else { + plog("Improperly split node in file id %u, offset %u (extent #%d), Extent <%u, %u>\n", calculatedBTCB->fcbPtr->fcbFileID, totalBlocks, i, (*extp)[i].startBlock, (*extp)[i].blockCount); + } + } + totalBlocks += (*extp)[i].blockCount; + + } + /* + * Now, iterate through the extents overflow file if necessary. + * Style note: This is in a block so I can have local variables. + * It used to have a conditional, but that wasn't needed. + */ + { + int err; + BTreeIterator iterator = { 0 }; + FSBufferDescriptor btRecord = { 0 }; + HFSPlusExtentKey *key = (HFSPlusExtentKey*)&iterator.key; + HFSPlusExtentRecord extRecord = { 0 }; + UInt16 recordSize; + UInt32 fileID = calculatedBTCB->fcbPtr->fcbFileID; + static const int kDataForkType = 0; + + BuildExtentKey( true, kDataForkType, fileID, 0, (void*)key ); + btRecord.bufferAddress = &extRecord; + btRecord.itemCount = 1; + btRecord.itemSize = sizeof(extRecord); + + while (noErr == (err = BTIterateRecord(GPtr->calculatedExtentsFCB, kBTreeNextRecord, &iterator, &btRecord, &recordSize))) { + if (key->fileID != fileID || + key->forkType != kDataForkType) { + break; + } + for (i = 0; i < kHFSPlusExtentDensity; i++) { + if ((extRecord[i].blockCount % blocksPerNode) != 0) { + result = errRebuildBtree; + *statusFlag |= S_RebuildBTree; + fsckPrint(GPtr->context, E_BTreeSplitNode, fileID); + if (debug == 0) { + goto exit; + } else { + plog("Improperly split node in file id %u, startBlock %u, index %d (offset %u), extent <%u, %u>\n", fileID, key->startBlock, i, totalBlocks, extRecord[i].startBlock, extRecord[i].blockCount); + } + } + totalBlocks += extRecord[i].blockCount; + } + memset(&extRecord, 0, sizeof(extRecord)); + } + } + } + +#if 0 + plog( "\nB-Tree header rec: \n" ); + plog( " treeDepth = %d \n", header->treeDepth ); + plog( " rootNode = %d \n", header->rootNode ); + plog( " leafRecords = %d \n", header->leafRecords ); + plog( " firstLeafNode = %d \n", header->firstLeafNode ); + plog( " lastLeafNode = %d \n", header->lastLeafNode ); + plog( " totalNodes = %d \n", header->totalNodes ); + plog( " freeNodes = %d \n", header->freeNodes ); +#endif + + if (calculatedBTCB->rootNode == 0) { + // Empty btree, no need to continue + goto exit; + } + /* + * Set up tree path record for root level + */ + GPtr->BTLevel = 1; + /* BTPTPtr is an array of structure which stores the state + * of the btree traversal based on the current BTree level. + * It helps to traverse to parent node from a child node. + * tprP points to the correct offset to read/write. + */ + tprP = &(*GPtr->BTPTPtr)[0]; + tprP->TPRNodeN = calculatedBTCB->rootNode; + tprP->TPRRIndx = -1; /* last index accessed in a node */ + tprP->TPRLtSib = 0; + tprP->TPRRtSib = 0; + + /* + * Now enumerate the entire BTree + */ + while ( GPtr->BTLevel > 0 ) + { + tprP = &(*GPtr->BTPTPtr)[GPtr->BTLevel -1]; + nodeNum = tprP->TPRNodeN; + index = tprP->TPRRIndx; + + GPtr->TarBlock = nodeNum; + + (void) ReleaseNode(calculatedBTCB, &node); + result = GetNode( calculatedBTCB, nodeNum, &node ); + if ( result != noErr ) + { + if ( result == fsBTInvalidNodeErr ) /* hfs_swap_BTNode failed */ + { + RcdError( GPtr, E_BadNode ); + result = E_BadNode; + } + node.buffer = NULL; + if (debug) + { + /* Try to continue checking other nodes. + * + * Decrement the current btree level as we want to access + * the right sibling index record, if any, of our parent. + */ + GPtr->BTLevel--; + continue; + } + goto exit; + } + nodeDescP = node.buffer; + + /* + * Check out and allocate the node if its the first time its been seen + */ + if ( index < 0 ) + { +#if 0 // + // this will print out our leaf node order + if ( nodeDescP->kind == kBTLeafNode ) + { + static int myCounter = 0; + if ( myCounter > 19 ) + { + myCounter = 0; + plog( "\n " ); + } + plog( "%d ", nodeNum ); + + myCounter++; + } +#endif + + /* Allocate BTree node */ + result = AllocBTN( GPtr, refNum, nodeNum ); + if ( result ) + { + /* node already allocated can be fixed if it is an index node */ + goto RebuildBTreeExit; + } + + /* Check keys in the node */ + result = BTKeyChk( GPtr, nodeDescP, calculatedBTCB ); + if ( result ) + { + /* we should be able to fix any E_KeyOrd error or any B-Tree key */ + /* errors with an index node. */ + if ( E_KeyOrd == result || nodeDescP->kind == kBTIndexNode ) + { + *statusFlag |= S_RebuildBTree; + result = errRebuildBtree; + } + else + { + goto exit; + } + } + + /* Check backward link of this node */ + if ( nodeDescP->bLink != tprP->TPRLtSib ) + { + result = E_SibLk; + RcdError( GPtr, E_SibLk ); + if (debug) + printf("Node %d's back link is 0x%x; expected 0x%x\n" + " disk offset = 0x%llx, size = 0x%x\n", + nodeNum, nodeDescP->bLink, tprP->TPRLtSib, + ((Buf_t *)(node.blockHeader))->Offset, ((Buf_t *)(node.blockHeader))->Length); + if (!debug) + goto RebuildBTreeExit; + } + if ( tprP->TPRRtSib == -1 ) + { + tprP->TPRRtSib = nodeNum; /* set Rt sibling for later verification */ + } + else + { + /* Check forward link for this node */ + if ( nodeDescP->fLink != tprP->TPRRtSib ) + { + result = E_SibLk; + RcdError( GPtr, E_SibLk ); + if (debug) + printf("Node %d's forward link is 0x%x; expected 0x%x\n" + " disk offset = 0x%llx, size = 0x%x\n", + nodeNum, nodeDescP->fLink, tprP->TPRRtSib, + ((Buf_t *)(node.blockHeader))->Offset, ((Buf_t *)(node.blockHeader))->Length); + if (!debug) + goto RebuildBTreeExit; + } + } + + /* Check node kind - it should either be index node or leaf node */ + if ( (nodeDescP->kind != kBTIndexNode) && (nodeDescP->kind != kBTLeafNode) ) + { + result = E_NType; + RcdError( GPtr, E_NType ); + if (!debug) goto exit; + } + /* Check if the height of this node is correct based on calculated + * tree depth and current btree level of the traversal + */ + if ( nodeDescP->height != calculatedBTCB->treeDepth - GPtr->BTLevel + 1 ) + { + result = E_NHeight; + RcdError( GPtr, E_NHeight ); + if (!debug) goto RebuildBTreeExit; + } + + if (result && (cur_debug_level & d_dump_node)) + { + plog("Node %u:\n", node.blockNum); + HexDump(node.buffer, node.blockSize, TRUE); + GPtr->BTLevel--; + continue; + } + + /* If we saved the first key in the parent (index) node in past, use it to compare + * with the key of the first record in the current node. This check should + * be performed for all nodes except the root node. + */ + if ( hasParKey == true ) + { + GetRecordByIndex( (BTreeControlBlock *)calculatedBTCB, nodeDescP, 0, &keyPtr, &dataPtr, &recSize ); + if ( CompareKeys( (BTreeControlBlockPtr)calculatedBTCB, (BTreeKey *)parKey, keyPtr ) != 0 ) + { + if (debug) + { + plog("Index key doesn't match first node key\n"); + if (cur_debug_level & d_dump_record) + { + plog("Found (child; node %u):\n", tprP->TPRNodeN); + HexDump(keyPtr, CalcKeySize(calculatedBTCB, keyPtr), FALSE); + plog("Expected (parent; node %u):\n", tprP[-1].TPRNodeN); + HexDump(parKey, CalcKeySize(calculatedBTCB, (BTreeKey *)parKey), FALSE); + } + } + RcdError( GPtr, E_IKey ); + *statusFlag |= S_RebuildBTree; + result = errRebuildBtree; + } + } + if ( nodeDescP->kind == kBTIndexNode ) + { + if ( ( result = CheckForStop( GPtr ) ) ) + goto exit; + } + + GPtr->itemsProcessed++; + } + + numRecs = nodeDescP->numRecords; + + /* + * for an index node ... + */ + if ( nodeDescP->kind == kBTIndexNode ) + { + index++; /* on to next index record */ + if ( index >= numRecs ) + { + /* We have traversed children of all index records in this index node. + * Decrement the current btree level to access right sibling index record + * of previous btree level + */ + GPtr->BTLevel--; + continue; /* No more records */ + } + + /* Store current index for current Btree level */ + tprP->TPRRIndx = index; + /* Store current pointer as parent for next traversal */ + parentP = tprP; + /* Increase the current Btree level because we traverse top to down */ + GPtr->BTLevel++; + + /* Validate current btree traversal level */ + if ( GPtr->BTLevel > BTMaxDepth ) + { + RcdError( GPtr, E_BTDepth ); + goto RebuildBTreeExit; + } + /* Get the btree traversal state for current btree level */ + tprP = &(*GPtr->BTPTPtr)[GPtr->BTLevel -1]; + + /* Get index record in the current btree level at offset index in the given node */ + GetRecordByIndex( (BTreeControlBlock *)calculatedBTCB, nodeDescP, + index, &keyPtr, &dataPtr, &recSize ); + + nodeNum = *(UInt32*)dataPtr; + /* Current node number should not be header node number or greater than total nodes */ + if ( (nodeNum == kHeaderNodeNum) || (nodeNum >= calculatedBTCB->totalNodes) ) + { + RcdError( GPtr, E_IndxLk ); + goto RebuildBTreeExit; + } + + /* + * Make a copy of the parent's key so we can compare it + * with the child's key later. + */ + keyLen = ( calculatedBTCB->attributes & kBTBigKeysMask ) + ? keyPtr->length16 + sizeof(UInt16) + : keyPtr->length8 + sizeof(UInt8); + CopyMemory(keyPtr, parKey, keyLen); + hasParKey = true; + + /* Store current node number for the child node */ + tprP->TPRNodeN = nodeNum; + /* Initialize index to records for the child node */ + tprP->TPRRIndx = -1; + + tprP->TPRLtSib = 0; /* left sibling */ + if ( index > 0 ) + { + /* Get node number for the previous index record in current index node */ + GetRecordByIndex( (BTreeControlBlock *)calculatedBTCB, nodeDescP, index-1, &keyPtr, &dataPtr, &recSize ); + + nodeNum = *(UInt32*)dataPtr; + /* node number should not be header node number or greater than total nodes */ + if ( (nodeNum == kHeaderNodeNum) || (nodeNum >= calculatedBTCB->totalNodes) ) + { + RcdError( GPtr, E_IndxLk ); + goto RebuildBTreeExit; + } + /* Store this as left sibling node */ + tprP->TPRLtSib = nodeNum; + } + else + { + if ( parentP->TPRLtSib != 0 ) + tprP->TPRLtSib = tprP->TPRRtSib; /* Fill in the missing link */ + } + + tprP->TPRRtSib = 0; /* right sibling */ + if ( index < (numRecs -1) ) + { + /* Get node number for the next index record in current index node */ + GetRecordByIndex( (BTreeControlBlock *)calculatedBTCB, nodeDescP, index+1, &keyPtr, &dataPtr, &recSize ); + + nodeNum = *(UInt32*)dataPtr; + /* node number should not be header node number or greater than total nodes */ + if ( (nodeNum == kHeaderNodeNum) || (nodeNum >= calculatedBTCB->totalNodes) ) + { + RcdError( GPtr, E_IndxLk ); + goto RebuildBTreeExit; + } + /* Store this as right sibling node */ + tprP->TPRRtSib = nodeNum; + } + else + { + if ( parentP->TPRRtSib != 0 ) + tprP->TPRRtSib = -1; /* Link to be filled in later */ + } + } + + /* + * For a leaf node ... + */ + else + { + /* If left sibling link is zero, this is first leaf node */ + if ( tprP->TPRLtSib == 0 ) + calculatedBTCB->firstLeafNode = nodeNum; + /* If right sibling link is zero, this is last leaf node */ + if ( tprP->TPRRtSib == 0 ) + calculatedBTCB->lastLeafNode = nodeNum; + leafRecords += nodeDescP->numRecords; + + if (checkLeafRecord != NULL) { + /* For total number of records in this leaf node, get each record sequentially + * and call function to check individual leaf record through the + * function pointer passed by the caller + */ + for (i = 0; i < nodeDescP->numRecords; i++) { + GetRecordByIndex(calculatedBTCB, nodeDescP, i, &keyPtr, &dataPtr, &recSize); + result = checkLeafRecord(GPtr, keyPtr, dataPtr, recSize); + if (result) goto exit; + } + } + /* Decrement the current btree level as we want to access + * the right sibling index record, if any, of our parent. + */ + GPtr->BTLevel--; + continue; + } + } /* end while */ + + calculatedBTCB->leafRecords = leafRecords; + +exit: + if (result == noErr && (*statusFlag & S_RebuildBTree)) + result = errRebuildBtree; + if (node.buffer != NULL) + (void) ReleaseNode(calculatedBTCB, &node); + + return( result ); + +RebuildBTreeExit: + /* force a B-Tree file rebuild */ + *statusFlag |= S_RebuildBTree; + result = errRebuildBtree; + goto exit; + +} /* end of BTCheck */ + + + +/*------------------------------------------------------------------------------ + +Routine: BTMapChk - (BTree Map Check) + +Function: Checks out the structure of a BTree allocation map. + +Input: GPtr - pointer to scavenger global area + fileRefNum - refnum of BTree file + +Output: BTMapChk - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +int BTMapChk( SGlobPtr GPtr, short fileRefNum ) +{ + OSErr result; + UInt16 recSize; + SInt32 mapSize; + UInt32 nodeNum; + SInt16 recIndx; + NodeRec node; + NodeDescPtr nodeDescP; + BTreeControlBlock *calculatedBTCB = GetBTreeControlBlock( fileRefNum ); + + result = noErr; + nodeNum = 0; /* Start with header node */ + node.buffer = NULL; + recIndx = 2; + mapSize = ( calculatedBTCB->totalNodes + 7 ) / 8; /* size in bytes */ + + /* + * Enumerate the map structure starting with the map record in the header node + */ + while ( mapSize > 0 ) + { + GPtr->TarBlock = nodeNum; + + if (node.buffer != NULL) + (void) ReleaseNode(calculatedBTCB, &node); + result = GetNode( calculatedBTCB, nodeNum, &node ); + if ( result != noErr ) + { + if ( result == fsBTInvalidNodeErr ) /* hfs_swap_BTNode failed */ + { + RcdError( GPtr, E_BadNode ); + result = E_BadNode; + } + return( result ); + } + + nodeDescP = node.buffer; + + /* Check out the node if its not the header node */ + + if ( nodeNum != 0 ) + { + result = AllocBTN( GPtr, fileRefNum, nodeNum ); + if (result) goto exit; /* Error, node already allocated? */ + + if ( nodeDescP->kind != kBTMapNode ) + { + RcdError( GPtr, E_BadMapN ); + if (debug) + plog("Expected map node, got type %d\n", nodeDescP->kind); + result = E_BadMapN; + goto exit; + } + if ( nodeDescP->numRecords != Num_MRecs ) + { + RcdError( GPtr, E_BadMapN ); + if (debug) + plog("Expected %d records in node, found %d\n", Num_MRecs, nodeDescP->numRecords); + result = E_BadMapN; + goto exit; + } + if ( nodeDescP->height != 0 ) + RcdError( GPtr, E_NHeight ); + } + + // Move on to the next map node + recSize = GetRecordSize( (BTreeControlBlock *)calculatedBTCB, (BTNodeDescriptor *)nodeDescP, recIndx ); + mapSize -= recSize; /* Adjust remaining map size */ + + recIndx = 0; /* Map record is now record 0 */ + nodeNum = nodeDescP->fLink; + if (nodeNum == 0) + break; + + } /* end while */ + + + if ( (nodeNum != 0) || (mapSize > 0) ) + { + RcdError( GPtr, E_MapLk); + result = E_MapLk; /* bad map node linkage */ + } +exit: + if (node.buffer != NULL) + (void) ReleaseNode(calculatedBTCB, &node); + + return( result ); + +} /* end BTMapChk */ + + + +/*------------------------------------------------------------------------------ + +Routine: BTCheckUnusedNodes + +Function: Examines all unused nodes and makes sure they are filled with zeroes. + If there are any unused nodes which are not zero filled, bit mask + S_UnusedNodesNotZero is set in output btStat; the function result + is zero in this case. + +Input: GPtr - pointer to scavenger global area + fileRefNum - refnum of BTree file + +Output: *btStat - bit mask S_UnusedNodesNotZero + BTCheckUnusedNodes - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +int BTCheckUnusedNodes(SGlobPtr GPtr, short fileRefNum, UInt16 *btStat) +{ + BTreeControlBlock *btcb = GetBTreeControlBlock(fileRefNum); + unsigned char *bitmap = (unsigned char *) ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr; + unsigned char mask = 0x80; + OSErr err; + UInt32 nodeNum; + BlockDescriptor node; + + node.buffer = NULL; + + for (nodeNum = 0; nodeNum < btcb->totalNodes; ++nodeNum) + { + if ((*bitmap & mask) == 0) + { + UInt32 i; + UInt32 bufferSize; + UInt32 *buffer; + + /* Read the raw node, without going through hfs_swap_BTNode. */ + err = btcb->getBlockProc(btcb->fcbPtr, nodeNum, kGetBlock, &node); + if (err) + { + if (debug) plog("Couldn't read node #%u\n", nodeNum); + return err; + } + + /* + * Make sure node->blockSize bytes at address node->buffer are zero. + */ + buffer = (UInt32 *) node.buffer; + bufferSize = node.blockSize / sizeof(UInt32); + + for (i = 0; i < bufferSize; ++i) + { + if (buffer[i]) + { + *btStat |= S_UnusedNodesNotZero; + GPtr->TarBlock = nodeNum; + fsckPrint(GPtr->context, E_UnusedNodeNotZeroed, nodeNum); + + if (!debug) + { + /* Stop now; repair will zero all unused nodes. */ + goto done; + } + + /* No need to check the rest of this node. */ + break; + } + } + + /* Release the node without going through hfs_swap_BTNode. */ + (void) btcb->releaseBlockProc(btcb->fcbPtr, &node, kReleaseBlock); + node.buffer = NULL; + } + + /* Move to the next bit in the bitmap. */ + mask >>= 1; + if (mask == 0) + { + mask = 0x80; + ++bitmap; + } + } +done: + if (node.buffer) + { + (void) btcb->releaseBlockProc(btcb->fcbPtr, &node, kReleaseBlock); + } + + return 0; +} /* end BTCheckUnusedNodes */ + + + +/*------------------------------------------------------------------------------ + +Routine: CmpBTH - (Compare BTree Header) + +Function: Compares the scavenger BTH info with the BTH on disk. + +Input: GPtr - pointer to scavenger global area + fileRefNum - file refnum + +Output: CmpBTH - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +OSErr CmpBTH( SGlobPtr GPtr, SInt16 fileRefNum ) +{ + OSErr err; + BTHeaderRec bTreeHeader; + BTreeControlBlock *calculatedBTCB = GetBTreeControlBlock( fileRefNum ); + SInt16 *statP; + SFCB * fcb; + short isBTHDamaged = 0; + short printMsg = 0; + + switch (fileRefNum) { + case kCalculatedCatalogRefNum: + statP = (SInt16 *)&GPtr->CBTStat; + fcb = GPtr->calculatedCatalogFCB; + break; + case kCalculatedExtentRefNum: + statP = (SInt16 *)&GPtr->EBTStat; + fcb = GPtr->calculatedExtentsFCB; + break; + case kCalculatedAttributesRefNum: + statP = (SInt16 *)&GPtr->ABTStat; + fcb = GPtr->calculatedAttributesFCB; + break; + default: + return (-1); + }; + + /* + * Get BTree header record from disk + */ + GPtr->TarBlock = 0; // Set target node number + + err = GetBTreeHeader(GPtr, fcb, &bTreeHeader ); + ReturnIfError( err ); + + if (calculatedBTCB->leafRecords != bTreeHeader.leafRecords) { + char goodStr[32], badStr[32]; + + printMsg = 1; + fsckPrint(GPtr->context, E_LeafCnt); + sprintf(goodStr, "%ld", (long)calculatedBTCB->leafRecords); + sprintf(badStr, "%ld", (long)bTreeHeader.leafRecords); + fsckPrint(GPtr->context, E_BadValue, goodStr, badStr); + } + + if ( calculatedBTCB->treeDepth != bTreeHeader.treeDepth ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid tree depth - calculated %d header %d \n", + calculatedBTCB->treeDepth, bTreeHeader.treeDepth); + isBTHDamaged = 1; + } else if ( calculatedBTCB->rootNode != bTreeHeader.rootNode ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid root node - calculated %d header %d \n", + calculatedBTCB->rootNode, bTreeHeader.rootNode); + isBTHDamaged = 1; + } else if ( calculatedBTCB->firstLeafNode != bTreeHeader.firstLeafNode ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid first leaf node - calculated %d header %d \n", + calculatedBTCB->firstLeafNode, bTreeHeader.firstLeafNode); + isBTHDamaged = 1; + } else if ( calculatedBTCB->lastLeafNode != bTreeHeader.lastLeafNode ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid last leaf node - calculated %d header %d \n", + calculatedBTCB->lastLeafNode, bTreeHeader.lastLeafNode); + isBTHDamaged = 1; + } else if ( calculatedBTCB->nodeSize != bTreeHeader.nodeSize ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid node size - calculated %d header %d \n", + calculatedBTCB->nodeSize, bTreeHeader.nodeSize); + isBTHDamaged = 1; + } else if ( calculatedBTCB->maxKeyLength != bTreeHeader.maxKeyLength ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid max key length - calculated %d header %d \n", + calculatedBTCB->maxKeyLength, bTreeHeader.maxKeyLength); + isBTHDamaged = 1; + } else if ( calculatedBTCB->totalNodes != bTreeHeader.totalNodes ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid total nodes - calculated %d header %d \n", + calculatedBTCB->totalNodes, bTreeHeader.totalNodes); + isBTHDamaged = 1; + } else if ( calculatedBTCB->freeNodes != bTreeHeader.freeNodes ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog("\tinvalid free nodes - calculated %d header %d \n", + calculatedBTCB->freeNodes, bTreeHeader.freeNodes); + isBTHDamaged = 1; + } + + if (isBTHDamaged || printMsg) { + *statP = *statP | S_BTH; + if (isBTHDamaged) { + fsckPrint(GPtr->context, E_InvalidBTreeHeader); + } + } + return( noErr ); +} + + + +/*------------------------------------------------------------------------------ + +Routine: CmpBlock + +Function: Compares two data blocks for equality. + +Input: Blk1Ptr - pointer to 1st data block. + Blk2Ptr - pointer to 2nd data block. + len - size of the blocks (in bytes) + +Output: CmpBlock - result code + 0 = equal + 1 = not equal +------------------------------------------------------------------------------*/ + +OSErr CmpBlock( void *block1P, void *block2P, size_t length ) +{ + Byte *blk1Ptr = block1P; + Byte *blk2Ptr = block2P; + + while ( length-- ) + if ( *blk1Ptr++ != *blk2Ptr++ ) + return( -1 ); + + return( noErr ); + +} + + + +/*------------------------------------------------------------------------------ + +Routine: CmpBTM - (Compare BTree Map) + +Function: Compares the scavenger BTM with the BTM on disk. + +Input: GPtr - pointer to scavenger global area + fileRefNum - file refnum + +Output: CmpBTM - function result: + 0 = no error + n = error +------------------------------------------------------------------------------*/ + +int CmpBTM( SGlobPtr GPtr, short fileRefNum ) +{ + OSErr result; + UInt16 recSize; + SInt32 mapSize; + SInt32 size; + UInt32 nodeNum; + short recIndx; + char *p; + char *sbtmP; + UInt8 * dataPtr; + NodeRec node; + NodeDescPtr nodeDescP; + BTreeControlBlock *calculatedBTCB; + UInt16 *statP; + + result = noErr; + calculatedBTCB = GetBTreeControlBlock( fileRefNum ); + + switch (fileRefNum) { + case kCalculatedCatalogRefNum: + statP = &GPtr->CBTStat; + break; + case kCalculatedExtentRefNum: + statP = &GPtr->EBTStat; + break; + case kCalculatedAttributesRefNum: + statP = &GPtr->ABTStat; + break; + default: + return (-1); + }; + + nodeNum = 0; /* start with header node */ + node.buffer = NULL; + recIndx = 2; + recSize = size = 0; + mapSize = (calculatedBTCB->totalNodes + 7) / 8; /* size in bytes */ + sbtmP = ((BTreeExtensionsRec*)calculatedBTCB->refCon)->BTCBMPtr; + dataPtr = NULL; + + /* + * Enumerate BTree map records starting with map record in header node + */ + while ( mapSize > 0 ) + { + GPtr->TarBlock = nodeNum; + + if (node.buffer != NULL) + (void) ReleaseNode(calculatedBTCB, &node); + + result = GetNode( calculatedBTCB, nodeNum, &node ); + if (result) goto exit; /* error, could't get map node */ + + nodeDescP = node.buffer; + + recSize = GetRecordSize( (BTreeControlBlock *)calculatedBTCB, (BTNodeDescriptor *)nodeDescP, recIndx ); + dataPtr = GetRecordAddress( (BTreeControlBlock *)calculatedBTCB, (BTNodeDescriptor *)nodeDescP, recIndx ); + + size = ( recSize > mapSize ) ? mapSize : recSize; + + result = CmpBlock( sbtmP, dataPtr, size ); + if ( result != noErr ) + { + *statP = *statP | S_BTM; /* didn't match, mark it damaged */ + RcdError(GPtr, E_BadMapN); + result = 0; /* mismatch isn't fatal; let us continue */ + goto exit; + } + + recIndx = 0; /* map record is now record 0 */ + mapSize -= size; /* adjust remaining map size */ + sbtmP = sbtmP + size; + nodeNum = nodeDescP->fLink; /* next node number */ + if (nodeNum == 0) + break; + + } /* end while */ + + /* + * Make sure the unused portion of the last map record is zero + */ + for ( p = (Ptr)dataPtr + size ; p < (Ptr)dataPtr + recSize ; p++ ) + if ( *p != 0 ) + *statP = *statP | S_BTM; /* didn't match, mark it damaged */ + +exit: + if (node.buffer != NULL) + (void) ReleaseNode(calculatedBTCB, &node); + + return( result ); + +} /* end CmpBTM */ + + +/*------------------------------------------------------------------------------ + +Routine: BTKeyChk - (BTree Key Check) + +Function: Checks out the key structure within a Btree node. + +Input: GPtr - pointer to scavenger global area + NodePtr - pointer to target node + BTCBPtr - pointer to BTreeControlBlock + +Output: BTKeyChk - function result: + 0 = no error + n = error code +------------------------------------------------------------------------------*/ +extern HFSPlusCatalogKey gMetaDataDirKey; + +static int BTKeyChk( SGlobPtr GPtr, NodeDescPtr nodeP, BTreeControlBlock *btcb ) +{ + SInt16 index; + UInt16 dataSize; + UInt16 keyLength; + UInt16 prevKeyLength = 0; + KeyPtr keyPtr; + UInt8 *dataPtr; + KeyPtr prevkeyP = nil; + unsigned sizeofKeyLength; + int result = noErr; + + if (btcb->attributes & kBTBigKeysMask) + sizeofKeyLength = 2; + else + sizeofKeyLength = 1; + + if ( nodeP->numRecords == 0 ) + { + if ( (nodeP->fLink == 0) && (nodeP->bLink == 0) ) + { + RcdError( GPtr, E_BadNode ); + return( E_BadNode ); + } + } + else + { + /* + * Loop on number of records in node + */ + for ( index = 0; index < nodeP->numRecords; index++) + { + GetRecordByIndex( (BTreeControlBlock *)btcb, nodeP, (UInt16) index, &keyPtr, &dataPtr, &dataSize ); + + if (btcb->attributes & kBTBigKeysMask) + keyLength = keyPtr->length16; + else + keyLength = keyPtr->length8; + + if ( keyLength > btcb->maxKeyLength ) + { + RcdError( GPtr, E_KeyLen ); + return( E_KeyLen ); + } + + if ( prevkeyP != nil ) + { + if ( CompareKeys( (BTreeControlBlockPtr)btcb, prevkeyP, keyPtr ) >= 0 ) + { + /* + * When the HFS+ MetaDataDirKey is out of order we mark + * the result code so that it can be deleted later. + */ + if ((btcb->maxKeyLength == kHFSPlusCatalogKeyMaximumLength) && + (CompareKeys(btcb, prevkeyP, (KeyPtr)&gMetaDataDirKey) == 0)) + { + if (fsckGetVerbosity(GPtr->context) > 0) + plog("Problem: b-tree key for \"HFS+ Private Data\" directory is out of order.\n"); + return( E_KeyOrd + 1000 ); + } + else + { + RcdError( GPtr, E_KeyOrd ); + plog("Records %d and %d (0-based); offsets 0x%04X and 0x%04X\n", index-1, index, (long)prevkeyP - (long)nodeP, (long)keyPtr - (long)nodeP); + result = E_KeyOrd; + } + } + } + prevkeyP = keyPtr; + prevKeyLength = keyLength; + } + } + + if (result == E_KeyOrd) + { + if (cur_debug_level & d_dump_record) + { + for (index = 0; index < nodeP->numRecords; ++index) + { + GetRecordByIndex( (BTreeControlBlock *)btcb, nodeP, (UInt16) index, &keyPtr, &dataPtr, &dataSize ); + + if (btcb->attributes & kBTBigKeysMask) + keyLength = keyPtr->length16; + else + keyLength = keyPtr->length8; + + plog("Record %d (offset 0x%04X):\n", index, (long)keyPtr - (long)nodeP); + HexDump(keyPtr, keyLength + sizeofKeyLength, FALSE); + plog("--\n"); + HexDump(dataPtr, dataSize, FALSE); + plog("\n"); + } + } + + if (cur_debug_level & d_dump_node) + { + plog("Node:\n"); + HexDump(nodeP, btcb->nodeSize, TRUE); + } + } + + return( result ); +} + + + +/*------------------------------------------------------------------------------ + +Routine: ChkCName (Check Catalog Name) + +Function: Checks out a generic catalog name. + +Input: GPtr - pointer to scavenger global area. + CNamePtr - pointer to CName. + +Output: ChkCName - function result: + 0 = CName is OK + E_CName = invalid CName +------------------------------------------------------------------------------*/ + +OSErr ChkCName( SGlobPtr GPtr, const CatalogName *name, Boolean unicode ) +{ + UInt32 length; + OSErr err = noErr; + + length = CatalogNameLength( name, unicode ); + + if ( unicode ) + { + if ( (length == 0) || (length > kHFSPlusMaxFileNameChars) ) + err = E_CName; + } + else + { + if ( (length == 0) || (length > kHFSMaxFileNameChars) ) + err = E_CName; + } + + return( err ); +} + + +/*------------------------------------------------------------------------------ + +Routine: CmpMDB - (Compare Master Directory Block) + +Function: Compares the scavenger MDB info with the MDB on disk. + +Input: GPtr - pointer to scavenger global area + +Output: CmpMDB - function result: + 0 = no error + n = error + GPtr->VIStat - S_MDB flag set in VIStat if MDB is damaged. +------------------------------------------------------------------------------*/ + +int CmpMDB( SGlobPtr GPtr, HFSMasterDirectoryBlock * mdbP) +{ + short i; + SFCB * fcbP; + SVCB * vcb; + short printMsg = 0; + short isMDBDamaged = 0; + + // Set up + GPtr->TarID = MDB_FNum; + vcb = GPtr->calculatedVCB; + + /* + * compare VCB info with MDB + */ + if ( mdbP->drSigWord != vcb->vcbSignature ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drSigWord \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drCrDate != vcb->vcbCreateDate ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drCrDate \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drLsMod != vcb->vcbModifyDate ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drLsMod \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drAtrb != (UInt16)vcb->vcbAttributes ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drAtrb \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drVBMSt != vcb->vcbVBMSt ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drVBMSt \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drNmAlBlks != vcb->vcbTotalBlocks ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drNmAlBlks \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drClpSiz != vcb->vcbDataClumpSize ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drClpSiz \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drAlBlSt != vcb->vcbAlBlSt ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drAlBlSt \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drNxtCNID != vcb->vcbNextCatalogID ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drNxtCNID \n" ); + isMDBDamaged = 1; + } + if ( CmpBlock( mdbP->drVN, vcb->vcbVN, mdbP->drVN[0]+1 ) ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drVN \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drVolBkUp != vcb->vcbBackupDate ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drVolBkUp \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drVSeqNum != vcb->vcbVSeqNum ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drVSeqNum \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drWrCnt != vcb->vcbWriteCount ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drWrCnt \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drXTClpSiz != vcb->vcbExtentsFile->fcbClumpSize ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drXTClpSiz \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drCTClpSiz != vcb->vcbCatalogFile->fcbClumpSize ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drCTClpSiz \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drNmRtDirs != vcb->vcbNmRtDirs ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drNmRtDirs \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drFilCnt != vcb->vcbFileCount ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drFilCnt \n" ); + isMDBDamaged = 1; + } + if ( mdbP->drDirCnt != vcb->vcbFolderCount ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drDirCnt \n" ); + isMDBDamaged = 1; + } + if ( CmpBlock(mdbP->drFndrInfo, vcb->vcbFinderInfo, 32 ) ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid MDB drFndrInfo \n" ); + isMDBDamaged = 1; + } + + /* + * compare extent file allocation info with MDB + */ + fcbP = vcb->vcbExtentsFile; /* compare PEOF for extent file */ + if ( mdbP->drXTFlSize != fcbP->fcbPhysicalSize ) + { + printMsg = 1; + WriteError ( GPtr, E_MDBDamaged, 3, 0 ); + } + for ( i = 0; i < GPtr->numExtents; i++ ) + { + if ( (mdbP->drXTExtRec[i].startBlock != fcbP->fcbExtents16[i].startBlock) || + (mdbP->drXTExtRec[i].blockCount != fcbP->fcbExtents16[i].blockCount) ) + { + printMsg = 1; + WriteError ( GPtr, E_MDBDamaged, 4, 0 ); + } + } + + /* + * compare catalog file allocation info with MDB + */ + fcbP = vcb->vcbCatalogFile; /* compare PEOF for catalog file */ + if ( mdbP->drCTFlSize != fcbP->fcbPhysicalSize ) + { + printMsg = 1; + WriteError ( GPtr, E_MDBDamaged, 5, 0 ); + } + for ( i = 0; i < GPtr->numExtents; i++ ) + { + if ( (mdbP->drCTExtRec[i].startBlock != fcbP->fcbExtents16[i].startBlock) || + (mdbP->drCTExtRec[i].blockCount != fcbP->fcbExtents16[i].blockCount) ) + { + printMsg = 1; + WriteError ( GPtr, E_MDBDamaged, 6, 0 ); + } + } + + if (isMDBDamaged || printMsg) { + GPtr->VIStat = GPtr->VIStat | S_MDB; + if (isMDBDamaged) + WriteError ( GPtr, E_MDBDamaged, 1, 0 ); + } + return( noErr ); + +} /* end CmpMDB */ + + + +/*------------------------------------------------------------------------------ + +Routine: CompareVolumeHeader - (Compare VolumeHeader Block) + +Function: Compares the scavenger VolumeHeader info with the VolumeHeader on disk. + +Input: GPtr - pointer to scavenger global area + +Output: CmpMDB - function result: + 0 = no error + n = error + GPtr->VIStat - S_MDB flag set in VIStat if MDB is damaged. +------------------------------------------------------------------------------*/ + +OSErr CompareVolumeHeader( SGlobPtr GPtr, HFSPlusVolumeHeader *volumeHeader ) +{ + SInt16 i; + SVCB *vcb; + SFCB *fcbP; + UInt32 hfsPlusIOPosOffset; + UInt32 goodValue, badValue; + char goodStr[32], badStr[32]; + short isVHDamaged; + short printMsg; + + vcb = GPtr->calculatedVCB; + GPtr->TarID = MDB_FNum; + + hfsPlusIOPosOffset = vcb->vcbEmbeddedOffset; + + goodValue = badValue = 0; + isVHDamaged = 0; + printMsg = 0; + + // CatHChk will flag valence errors and display the good and bad values for + // our file and folder counts. It will set S_Valence in CatStat when this + // problem is detected. We do NOT want to flag the error here in that case + // since the volume header counts cannot be trusted and it will lead to + // confusing messages. + if ( volumeHeader->fileCount != vcb->vcbFileCount && + (GPtr->CatStat & S_Valence) == 0 ) { + fsckPrint(GPtr->context, E_FilCnt); + sprintf(goodStr, "%u", vcb->vcbFileCount); + sprintf(badStr, "%u", volumeHeader->fileCount); + fsckPrint(GPtr->context, E_BadValue, goodStr, badStr); + printMsg = 1; + } + + if ( volumeHeader->folderCount != vcb->vcbFolderCount && + (GPtr->CatStat & S_Valence) == 0 ) { + fsckPrint(GPtr->context, E_DirCnt); + sprintf(goodStr, "%u", vcb->vcbFolderCount); + sprintf(badStr, "%u", volumeHeader->folderCount); + fsckPrint(GPtr->context, E_BadValue, goodStr, badStr); + + printMsg = 1; + } + + if (volumeHeader->freeBlocks != vcb->vcbFreeBlocks) { + fsckPrint(GPtr->context, E_FreeBlocks); + sprintf(goodStr, "%u", vcb->vcbFreeBlocks); + sprintf(badStr, "%u", volumeHeader->freeBlocks); + fsckPrint(GPtr->context, E_BadValue, goodStr, badStr); + printMsg = 1; + } + + if ( volumeHeader->catalogFile.clumpSize != vcb->vcbCatalogFile->fcbClumpSize ) { + fsckPrint(GPtr->context, E_InvalidClumpSize); + sprintf(goodStr, "%u", vcb->vcbCatalogFile->fcbClumpSize); + sprintf(badStr, "%u", volumeHeader->catalogFile.clumpSize); + fsckPrint(GPtr->context, E_BadValue, goodStr, badStr); + printMsg = 1; + } + + if ( volumeHeader->signature != kHFSPlusSigWord && + volumeHeader->signature != kHFSXSigWord) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB signature \n" ); + isVHDamaged = 1; + } + /* From HFS Plus Volume Format Specification (TN1150), "It is acceptable + * for a bit in encodingsBitmap to be set even though no names on the + * volume use that encoding". Therefore we do not report extra bits set in + * on-disk encodingsBitmap as error but will repair it silently if any other + * repairs are made. We complain about extra bits cleared in + * on-disk encodingsBitmap when compared to calculated encodingsBitmap. + */ + if ( (volumeHeader->encodingsBitmap & vcb->vcbEncodingsBitmap) + != vcb->vcbEncodingsBitmap ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB encodingsBitmap, disk=0x%qx calculated=0x%qx \n", volumeHeader->encodingsBitmap, vcb->vcbEncodingsBitmap ); + isVHDamaged = 1; + } + if ( (UInt16) (hfsPlusIOPosOffset/512) != vcb->vcbAlBlSt ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB AlBlSt \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->createDate != vcb->vcbCreateDate ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB createDate \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->modifyDate != vcb->vcbModifyDate ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB modifyDate \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->backupDate != vcb->vcbBackupDate ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB backupDate \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->checkedDate != vcb->vcbCheckedDate ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB checkedDate \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->rsrcClumpSize != vcb->vcbRsrcClumpSize ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB rsrcClumpSize (VH=%u, vcb=%u)\n", volumeHeader->rsrcClumpSize, vcb->vcbRsrcClumpSize); + isVHDamaged = 1; + } + if ( volumeHeader->dataClumpSize != vcb->vcbDataClumpSize ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB dataClumpSize \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->nextCatalogID != vcb->vcbNextCatalogID && + (volumeHeader->attributes & kHFSCatalogNodeIDsReused) == 0) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB nextCatalogID \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->writeCount != vcb->vcbWriteCount ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB writeCount \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->nextAllocation != vcb->vcbNextAllocation ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB nextAllocation \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->totalBlocks != vcb->vcbTotalBlocks ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB totalBlocks \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->blockSize != vcb->vcbBlockSize ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB blockSize \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->attributes != vcb->vcbAttributes ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB attributes \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->extentsFile.clumpSize != vcb->vcbExtentsFile->fcbClumpSize ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB extentsFile.clumpSize \n" ); + isVHDamaged = 1; + } + if ( volumeHeader->allocationFile.clumpSize != vcb->vcbAllocationFile->fcbClumpSize ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB allocationFile.clumpSize \n" ); + isVHDamaged = 1; + } + if ( (vcb->vcbAttributesFile != NULL) && + (volumeHeader->attributesFile.clumpSize != vcb->vcbAttributesFile->fcbClumpSize )) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB attributesFile.clumpSize \n" ); + isVHDamaged = 1; + } + if ( CmpBlock( volumeHeader->finderInfo, vcb->vcbFinderInfo, sizeof(vcb->vcbFinderInfo) ) ) { + if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) + plog( "\tinvalid VHB finderInfo \n" ); + isVHDamaged = 1; + } + + /* + * compare extent file allocation info with VolumeHeader + */ + fcbP = vcb->vcbExtentsFile; + if ( (UInt64)volumeHeader->extentsFile.totalBlocks * (UInt64)vcb->vcbBlockSize != fcbP->fcbPhysicalSize ) + { + printMsg = 1; + WriteError ( GPtr, E_VolumeHeaderDamaged, 3, 0 ); + } + for ( i=0; i < GPtr->numExtents; i++ ) + { + if ( (volumeHeader->extentsFile.extents[i].startBlock != fcbP->fcbExtents32[i].startBlock) || + (volumeHeader->extentsFile.extents[i].blockCount != fcbP->fcbExtents32[i].blockCount) ) + { + printMsg = 1; + WriteError ( GPtr, E_VolumeHeaderDamaged, 4, 0 ); + } + } + + /* + * compare catalog file allocation info with MDB + */ + fcbP = vcb->vcbCatalogFile; /* compare PEOF for catalog file */ + if ( (UInt64)volumeHeader->catalogFile.totalBlocks * (UInt64)vcb->vcbBlockSize != fcbP->fcbPhysicalSize ) + { + printMsg = 1; + WriteError ( GPtr, E_VolumeHeaderDamaged, 5, 0 ); + } + for ( i=0; i < GPtr->numExtents; i++ ) + { + if ( (volumeHeader->catalogFile.extents[i].startBlock != fcbP->fcbExtents32[i].startBlock) || + (volumeHeader->catalogFile.extents[i].blockCount != fcbP->fcbExtents32[i].blockCount) ) + { + printMsg = 1; + WriteError ( GPtr, E_VolumeHeaderDamaged, 6, 0 ); + } + } + + + /* + * compare bitmap file allocation info with MDB + */ + fcbP = vcb->vcbAllocationFile; + if ( (UInt64)volumeHeader->allocationFile.totalBlocks * (UInt64)vcb->vcbBlockSize != fcbP->fcbPhysicalSize ) + { + printMsg = 1; + WriteError ( GPtr, E_VolumeHeaderDamaged, 7, 0 ); + } + for ( i=0; i < GPtr->numExtents; i++ ) + { + if ( (volumeHeader->allocationFile.extents[i].startBlock != fcbP->fcbExtents32[i].startBlock) || + (volumeHeader->allocationFile.extents[i].blockCount != fcbP->fcbExtents32[i].blockCount) ) + { + printMsg = 1; + WriteError ( GPtr, E_VolumeHeaderDamaged, 8, 0 ); + } + } + + if (isVHDamaged || printMsg) { + GPtr->VIStat = GPtr->VIStat | S_MDB; + if (isVHDamaged) + WriteError ( GPtr, E_VolumeHeaderDamaged, 2, 0 ); + } + + return( noErr ); +} + diff --git a/fsck_hfs/dfalib/Scavenger.h b/fsck_hfs/dfalib/Scavenger.h new file mode 100644 index 0000000..e0ffcce --- /dev/null +++ b/fsck_hfs/dfalib/Scavenger.h @@ -0,0 +1,1451 @@ +/* + * Copyright (c) 1999-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Scavenger.h */ + +#ifndef __SCAVENGER__ +#define __SCAVENGER__ + +#define pascal + +#include "SRuntime.h" +#include "BTree.h" +#include "BTreePrivate.h" +#include "CheckHFS.h" +#include "BTreeScanner.h" +#include "hfs_endian.h" +#include "../fsck_debug.h" +#include "../fsck_messages.h" +#include "../fsck_hfs_msgnums.h" +#include "../fsck_msgnums.h" +#include "../fsck_hfs.h" + +#include <assert.h> +#include <sys/xattr.h> +#include <sys/acl.h> +#include <sys/kauth.h> +#include <sys/errno.h> +#include <sys/syslimits.h> +#include <sys/param.h> +#include <sys/sysctl.h> +#include <sys/mount.h> +#include <hfs/hfs_mount.h> + +#ifdef __cplusplus +extern "C" { +#endif + + +#define kFSCKMountVersion 0x6673636B /* 'fsck' made changes */ + +enum { + Log2BlkLo = 9, // number of left shifts to convert bytes to block.lo + Log2BlkHi = 23 // number of right shifts to convert bytes to block.hi +}; + +enum { + kNoHint = 0 +}; + + +// +// Misc constants +// + +/* IO size for reading or writing disk blocks */ +#define DISK_IOSIZE 32768 + +#define kMaxReScan 3 /* max times to re-scan volume on repair success */ + +#define kBTreeHeaderUserBytes 128 + +#define kBusErrorValue 0x50FF8001 + +//¥¥ Danger! This should not be hard coded +#define kMaxClumpSize 0x100000 /* max clump size is 1MB (2048 btree nodes) */ + +#define MDB_FNum 1 /* file number representing the MDB */ +#define AMDB_FNum -1 /* file number representing the alternate MDB */ +#define VBM_FNum 2 /* file number representing the volume bit map */ +#define MDB_BlkN 2 /* logical block number for the MDB */ + +#define kCalculatedExtentRefNum ( 0 ) +#define kCalculatedCatalogRefNum ( 1*sizeof(SFCB) ) +#define kCalculatedAllocationsRefNum ( 2*sizeof(SFCB) ) +#define kCalculatedAttributesRefNum ( 3*sizeof(SFCB) ) +#define kCalculatedStartupRefNum ( 4*sizeof(SFCB) ) +#define kCalculatedRepairRefNum ( 5*sizeof(SFCB) ) + +#define Max_ABSiz 0x7FFFFE00 /* max allocation block size (multiple of 512 */ +#define Blk_Size 512 /* size of a logical block */ +#define kHFSBlockSize 512 /* HFS block size */ + +// only the lower 7 bits are considered to be invalid, all others are valid -djb +#define VAtrb_Msk 0x007F /* volume attribute mask - invalid bits */ +#define VAtrb_DFlt 0x0100 /* default volume attribute flags */ +#define VAtrb_Cons 0x0100 /* volume consistency flag */ +#define kHFSCatalogNodeIDsReused 0x1000 + + +/* + * File type and creator for TextEdit documents + */ +enum { + kTextFileType = 0x54455854, /* 'TEXT' */ + kTextFileCreator = 0x74747874, /* 'ttxt' */ +}; + +/* + * Alias type and creator for directory hard links + */ +enum { + kHFSAliasType = 0x66647270, /* 'fdrp' */ + kHFSAliasCreator = 0x4D414353 /* 'MACS' */ +}; + +/*------------------------------------------------------------------------------ + BTree data structures +------------------------------------------------------------------------------*/ + +/* misc BTree constants */ + +#define BTMaxDepth 8 /* max tree depth */ +#define Num_HRecs 3 /* number of records in BTree Header node */ +#define Num_MRecs 1 /* number of records in BTree Map node */ + + + +// DFA extensions to the HFS/HFS+ BTreeControlBlock +typedef struct BTreeExtensionsRec +{ + Ptr BTCBMPtr; // pointer to scavenger BTree bit map + UInt32 BTCBMSize; // size of the bitmap, bytes + BTreeControlBlock *altBTCB; // BTCB DFA builds up + UInt32 realFreeNodeCount; // Number of real free nodes, taken from disk, for more accurate progress information +} BTreeExtensionsRec; + + + +/* + * Scavenger BTree Path Record (STPR) + */ +typedef struct STPR { + UInt32 TPRNodeN; /* node number */ + SInt16 TPRRIndx; /* record index */ + SInt16 unused; /* not used - makes debugging easier */ + UInt32 TPRLtSib; /* node number of left sibling node */ + UInt32 TPRRtSib; /* node number of right sibling node */ + } STPR, *STPRPtr; + +typedef STPR SBTPT[BTMaxDepth]; /* BTree path table */ + +#define LenSBTPT ( sizeof(STPR) * BTMaxDepth ) /* length of BTree Path Table */ + + + + +/*------------------------------------------------------------------------------ + CM (Catalog Manager) data structures + ------------------------------------------------------------------------------*/ + +// +// Misc constants +// +#define CMMaxDepth 100 /* max catalog depth (Same as Finder 7.0) */ + +#define fNameLocked 4096 + +union CatalogName { + Str31 pstr; + HFSUniStr255 ustr; +}; +typedef union CatalogName CatalogName; + +// +// Scavenger Directory Path Record (SDPR) +// +typedef struct SDPR { + UInt32 directoryID; // directory ID + UInt32 offspringIndex; // offspring index + UInt32 directoryHint; // BTree hint for directory record + long threadHint; // BTree hint for thread record + HFSCatalogNodeID parentDirID; // parent directory ID + CatalogName directoryName; // directory CName +} SDPR; + +enum { +// kInvalidMRUCacheKey = -1L, /* flag to denote current MRU cache key is invalid*/ + kDefaultNumMRUCacheBlocks = 16 /* default number of blocks in each cache*/ +}; + + +/* + * UTCacheReadIP and UTCacheWriteIP cacheOption + */ + +enum { + noCacheBit = 5, /* don't cache this please */ + noCacheMask = 0x0020, + rdVerifyBit = 6, /* read verify */ + rdVerifyMask = 0x0040 +}; + + +/*------------------------------------------------------------------------------ + Low-level File System Error codes +------------------------------------------------------------------------------*/ + +/* The DCE bits are defined as follows (for the word of flags): */ + +enum +{ + Is_AppleTalk = 0, + Is_Agent = 1, // future use + FollowsNewRules = 2, // New DRVR Rules Bit + Is_Open = 5, + Is_Ram_Based = 6, + Is_Active = 7, + Read_Enable = 8, + Write_Enable = 9, + Control_Enable = 10, + Status_Enable = 11, + Needs_Goodbye = 12, + Needs_Time = 13, + Needs_Lock = 14, + + Is_AppleTalk_Mask = 1 << Is_AppleTalk, + Is_Agent_Mask = 1 << Is_Agent, + FollowsRules_Mask = 1 << FollowsNewRules, + Is_Open_Mask = 1 << Is_Open, + Is_Ram_Based_Mask = 1 << Is_Ram_Based, + Is_Active_Mask = 1 << Is_Active, + Read_Enable_Mask = 1 << Read_Enable, + Write_Enable_Mask = 1 << Write_Enable, + Control_Enable_Mask = 1 << Control_Enable, + Status_Enable_Mask = 1 << Status_Enable, + Needs_Goodbye_Mask = 1 << Needs_Goodbye, + Needs_Time_Mask = 1 << Needs_Time, + Needs_Lock_Mask = 1 << Needs_Lock +}; + +enum { + cdInternalErr = -1312, // internal CheckDisk error + cdVolumeNotFoundErr = -1313, // cound not find volume (could be offline) + cdCannotReadErr = -1314, // unable to read from disk + cdCannotWriteErr = -1315, // unable to write to disk + cdNotHFSVolumeErr = -1316, // not an HFS disk + cdUnrepairableErr = -1317, // volume needs major repairs that CheckDisk cannot fix + cdRepairFailedErr = -1318, // repair failed + cdUserCanceledErr = -1319, // user interrupt + cdVolumeInUseErr = -1320, // volume modifed by another app + cdNeedsRepairsErr = -1321, // volume needs repairs (see repairInfo for additional info) + cdReMountErr = -1322, // Cannot remount volume + cdUnknownProcessesErr = -1323, // Volume cannot be unmounted and unknown processes are running + cdDamagedWrapperErr = -1324, // HFS Wrapper damaged error. + cdIncompatibleOSErr = -1325, // Current OS version is incompatible + cdMemoryFullErr = -1326 // not enough memory to check disk +}; + + +enum { + fsDSIntErr = -127 /* non-hardware Internal file system error */ +}; + +// Repair Info - additional info returned when a repair is attempted +enum { + kFileSharingEnabled = 0x00000001, + kDiskIsLocked = 0x00000002, + kDiskIsBoot = 0x00000004, + kDiskHasOpenFiles = 0x00000008, + kVolumeHadOverlappingExtents = 0x00000010, // repairLevelSomeDataLoss + kVolumeClean = 0x00000020, + + kRepairsWereMade = 0x80000000 +}; + +// Input parameters to CheckDisk +enum +{ + ignoreRunningProcessesMask = 0x00000001, // Assumes caller has shut down processes + checkDiskVersionMask = 0x00000004 // Will just return back the version in repairInfo. +}; + +// Message types, so the user can treat and display accordingly +enum { + kStatusMessage = 0x0000, + kTitleMessage = 0x0001, + kErrorMessage = 0x0002 +}; + +// <10> Current stage of CheckDisk passed to cancel proc. +// File System is marked busy during kRepairStage, so WaitNextEvent and I/O cannot be done during this stage. + +enum { + kHFSStage = 0, + kRepairStage, + kVerifyStage, + kAboutToRepairStage +}; + +// Resource ID of 'STR ' resource containing the name of of the folder to create aliases to damaged files. +enum { + rDamagedFilesDirSTRid = -20886 +}; + +// Type of volume +enum { + kUnknownVolumeType = 0, + kHFSVolumeType, + kEmbededHFSPlusVolumeType, + kPureHFSPlusVolumeType +}; + + +enum { + kStatusLines = 131, + kFirstError = 500, + + kHighLevelInfo = 1100, + kBasicInfo = 1200, + kErrorInfo = 1202, + + kErrorBase = -1310 +}; + + +/*------------------------------------------------------------------------------ + Minor Repair Interface (records compiled during scavenge, later repaired) + Note that not all repair types use all of these fields. + -----------------------------------------------------------------------------*/ + + typedef struct RepairOrder /* a node describing a needed minor repair */ + { + struct RepairOrder *link; /* link to next node, or NULL */ + SInt16 type; /* type of error, as an error code (E_DirVal etc) */ + SInt16 forkType; /* which file fork */ + UInt64 correct; /* correct valence */ + UInt64 incorrect; /* valence as found in volume (for consistency chk) */ + UInt32 maskBit; /* incorrect bit */ + UInt32 hint; /* B-tree node hint */ + UInt32 parid; /* parent ID */ + unsigned char name[1]; /* dir or file name */ + } RepairOrder, *RepairOrderPtr; + + + typedef struct EmbededVolDescription + { + SInt16 drAlBlSt; + UInt16 drEmbedSigWord; + HFSExtentDescriptor drEmbedExtent; + } EmbededVolDescription; + + +// define the correct drive queue structure +typedef struct ExtendedDrvQueue +{ + char dQVolumeLocked; + char dQDiskInDrive; + char dQUsedInternally; + char dQDiskIsSingleSided; + QElemPtr qLink; + short qType; + short dQDrive; + short dQRefNum; + short dQFSID; + short dQDrvSz; + short dQDrvSz2; +}ExtendedDrvQueue; + + +/*------------------------------------------------------------------------------ + Scavenger Global Area - (SGlob) +------------------------------------------------------------------------------*/ +typedef struct MissingThread +{ + struct MissingThread *link; /* link to next node, or NULL */ + UInt32 threadID; + HFSPlusCatalogKey nextKey; + HFSPlusCatalogThread thread; +} MissingThread; + +#define kDataFork 0 +#define kRsrcFork (-1) +#define kEAData 1 + +struct ExtentInfo { + HFSCatalogNodeID fileID; + UInt32 startBlock; + UInt32 blockCount; + UInt32 newStartBlock; + char * attrname; + UInt8 forkType; + /* didRepair stores the result of moving of overlap extent and is used + * to decide which disk blocks (original blocks or blocks allocated for + * for new extent location) should be marked used and free. + */ + Boolean didRepair; +}; +typedef struct ExtentInfo ExtentInfo; + +struct ExtentsTable { + UInt32 count; + ExtentInfo extentInfo[1]; +}; +typedef struct ExtentsTable ExtentsTable; + + +struct FileIdentifier { + Boolean hasThread; + HFSCatalogNodeID fileID; + HFSCatalogNodeID parID; // Used for files on HFS volumes without threads + Str31 name; // Used for files on HFS volumes without threads +}; +typedef struct FileIdentifier FileIdentifier; + +struct FileIdentifierTable { + UInt32 count; + FileIdentifier fileIdentifier[1]; +}; +typedef struct FileIdentifierTable FileIdentifierTable; + +/* Universal Extent Key */ + +union ExtentKey { + HFSExtentKey hfs; + HFSPlusExtentKey hfsPlus; +}; +typedef union ExtentKey ExtentKey; +/* Universal extent descriptor */ + +union ExtentDescriptor { + HFSExtentDescriptor hfs; + HFSPlusExtentDescriptor hfsPlus; +}; +typedef union ExtentDescriptor ExtentDescriptor; +/* Universal extent record */ + +union ExtentRecord { + HFSExtentRecord hfs; + HFSPlusExtentRecord hfsPlus; +}; +typedef union ExtentRecord ExtentRecord; +/* Universal catalog key */ + +union CatalogKey { + HFSCatalogKey hfs; + HFSPlusCatalogKey hfsPlus; +}; +typedef union CatalogKey CatalogKey; +/* Universal catalog data record */ + +union CatalogRecord { + SInt16 recordType; + HFSCatalogFolder hfsFolder; + HFSCatalogFile hfsFile; + HFSCatalogThread hfsThread; + HFSPlusCatalogFolder hfsPlusFolder; + HFSPlusCatalogFile hfsPlusFile; + HFSPlusCatalogThread hfsPlusThread; +}; +typedef union CatalogRecord CatalogRecord; + +/* + Key for records in the attributes file. Fields are compared in the order: + cnid, attributeName, startBlock +*/ + +struct AttributeKey { + UInt16 keyLength; /* must set kBTBigKeysMask and kBTVariableIndexKeysMask in BTree header's attributes */ + UInt16 pad; + HFSCatalogNodeID cnid; /* file or folder ID */ + UInt32 startBlock; /* block # relative to start of attribute */ + UInt16 attrNameLen; /* number of unicode characters */ + UInt16 attrName[127]; /* attribute name (Unicode) */ +}; +typedef struct AttributeKey AttributeKey; +enum { + kAttributeKeyMaximumLength = sizeof(AttributeKey) - sizeof(UInt16), + kAttributeKeyMinimumLength = kAttributeKeyMaximumLength - 127 * sizeof(UInt16) + sizeof(UInt16) +}; + +struct HIOParam { + QElemPtr qLink; /*queue link in header*/ + short qType; /*type byte for safety check*/ + short ioTrap; /*FS: the Trap*/ + Ptr ioCmdAddr; /*FS: address to dispatch to*/ + void* ioCompletion; /*completion routine addr (0 for synch calls)*/ + OSErr ioResult; /*result code*/ + StringPtr ioNamePtr; /*ptr to Vol:FileName string*/ + short ioVRefNum; /*volume refnum (DrvNum for Eject and MountVol)*/ + short ioRefNum; + SInt8 ioVersNum; + SInt8 ioPermssn; + Ptr ioMisc; + Ptr ioBuffer; + long ioReqCount; + long ioActCount; + short ioPosMode; + long ioPosOffset; +}; +typedef struct HIOParam HIOParam; + +typedef HIOParam * HIOParamPtr; + + +struct FCBArray { + UInt32 length; /* first word is FCB part length*/ + SFCB fcb[1]; /* fcb array*/ +}; +typedef struct FCBArray FCBArray; + +/* + UserCancel callback routine + + Input: + progress: number from 1 to 100 indicating current progress + progressChanged: boolean flag that is true if progress number has been updated + context: pointer to context data (if any) that the caller passed to CheckDisk + + Output: + return true if the user wants to cancel the CheckDisk operation + */ + +typedef int (*UserCancelProcPtr)(UInt16 progress, UInt16 secondsRemaining, Boolean progressChanged, UInt16 stage, void *context, int passno); + + +#if 0 + + //-- User Cancel Proc + typedef UniversalProcPtr UserCancelUPP; + + enum { + uppUserCancelProcInfo = kPascalStackBased + | RESULT_SIZE(kTwoByteCode) + | STACK_ROUTINE_PARAMETER(1, kTwoByteCode) + | STACK_ROUTINE_PARAMETER(2, kTwoByteCode) + | STACK_ROUTINE_PARAMETER(3, kTwoByteCode) + | STACK_ROUTINE_PARAMETER(4, kTwoByteCode) + | STACK_ROUTINE_PARAMETER(5, kFourByteCode) + }; + + #define NewUserCancelProc(userRoutine) \ + (UserCancelUPP) NewRoutineDescriptor((ProcPtr)(userRoutine), uppUserCancelProcInfo, GetCurrentArchitecture()) + + #define CallUserCancelProc(userRoutine, progress, secondsRemaining, progressChanged, stage, context, p) \ + CallUniversalProc((UniversalProcPtr)(userRoutine), uppUserCancelProcInfo, (progress), (secondsRemaining), (progressChanged), (stage), (context), (p)) + +#else /* not CFM */ + + typedef UserCancelProcPtr UserCancelUPP; + + #define NewUserCancelProc(userRoutine) \ + ((UserCancelUPP) (userRoutine)) + + #define CallUserCancelProc(userRoutine, progress, secondsRemaining, progressChanged, stage, context, p) \ + (*(userRoutine))((progress), (secondsRemaining), (progressChanged), (stage), (context), (p)) + +#endif + + +/* + UserMessage callback routine + + Input: + message: message from CheckDisk + messageType: type of message + context: pointer to context data (if any) that the caller passed to CheckDisk + + Output: + return true if the user wants to cancel the CheckDisk operation + */ + + +typedef pascal void (*UserMessageProcPtr)(StringPtr message, SInt16 messageType, void *context); + +#if 0 + + //-- User Message Proc + typedef UniversalProcPtr UserMessageUPP; + + enum { + uppUserMessageProcInfo = kPascalStackBased + | STACK_ROUTINE_PARAMETER(1, kFourByteCode) + | STACK_ROUTINE_PARAMETER(2, kTwoByteCode) + | STACK_ROUTINE_PARAMETER(3, kFourByteCode) + }; + + #define NewUserMessageProc(userRoutine) \ + (UserMessageUPP) NewRoutineDescriptor((ProcPtr)(userRoutine), uppUserMessageProcInfo, GetCurrentArchitecture()) + + #define CallUserMessageProc(userRoutine, message, messageType, context) \ + CallUniversalProc((UniversalProcPtr)(userRoutine), uppUserMessageProcInfo, (message), (messageType), (context)) + +#else /* not CFM */ + + typedef UserMessageProcPtr UserMessageUPP; + + #define NewUserMessageProc(userRoutine) \ + ((UserMessageUPP) (userRoutine)) + + #define CallUserMessageProc(userRoutine, message, messageType, context) \ + (*(userRoutine))((message), (messageType), (context)) + +#endif + +/* 3843779 Structure to determine consistency of attribute data and + * corresponding bit in catalog record. Based on Chinese Remainder + * Theorem + */ +typedef struct PrimeBuckets { + UInt32 n32[32]; + UInt32 n27[27]; + UInt32 n25[25]; + UInt32 n7[7]; + UInt32 n11[11]; + UInt32 n13[13]; + UInt32 n17[17]; + UInt32 n19[19]; + UInt32 n23[23]; + UInt32 n29[29]; + UInt32 n31[31]; +} PrimeBuckets; + +/* Record last attribute ID checked, used in CheckAttributeRecord, initialized in ScavSetup */ +typedef struct attributeInfo { + Boolean isValid; + Boolean hasSecurity; + int16_t recordType; + u_int32_t fileID; + unsigned char attrname[XATTR_MAXNAMELEN+1]; + u_int32_t totalBlocks; + u_int32_t calculatedTotalBlocks; + u_int64_t logicalSize; +} attributeInfo; + +/* + VolumeObject encapsulates all infomration about the multiple volume anchor blocks (VHB and MSD) + on HFS and HFS+ volumes. An HFS volume will have two MDBs (primary and alternate HFSMasterDirectoryBlock), + a pure HFS+ volume will have two VHBs (primary and alternate HFSPlusVolumeHeader), and a wrapped HFS+ + volume will have two MDBs and two VHBs. +*/ + +/* values for VolumeObject.flags */ +enum { + kVO_Inited = 0x00000001, // this structured has been initialized + kVO_PriVHBOK = 0x00000002, // the primary Volume Header Block is valid + kVO_AltVHBOK = 0x00000004, // the alternate Volume Header Block is valid + kVO_PriMDBOK = 0x00000008, // the primary Master Directory Block is valid + kVO_AltMDBOK = 0x00000010, // the alternate Master Directory Block is valid +}; + +typedef struct VolumeObject { + UInt32 flags; + SVCB * vcbPtr; // pointer to VCB used for this volume + UInt32 volumeType; // (kHFSVolumeType or kEmbededHFSPlusVolumeType or kPureHFSPlusVolumeType) + UInt32 embeddedOffset; // offset of embedded HFS+ (in bytes) volume into HFS wrapper volume + // NOTE - UInt32 is OK since we don't support HFS Wrappers on TB volumes + UInt32 sectorSize; // size of a sector for this device + UInt64 totalDeviceSectors; // total number of sectors for this volume (from GetDeviceSize) + UInt64 totalEmbeddedSectors; // total number of sectors for embedded volume + // location of all possible volume anchor blocks (MDB and VHB) on this volume. These locations + // are the sector offset into the volume. Only wrapped HFS+ volumes use all 4 of these. + UInt64 primaryVHB; // not used for HFS volumes + UInt64 alternateVHB; // not used for HFS volumes + UInt64 primaryMDB; // not used for pure HFS+ volumes + UInt64 alternateMDB; // not used for pure HFS+ volumes +} VolumeObject, *VolumeObjectPtr; + + +typedef struct SGlob { + void * scavStaticPtr; // pointer to static structure allocated in ScavSetUp + SInt16 DrvNum; // drive number of target drive + SInt16 RepLevel; // repair level, 1 = minor repair, 2 = major repair + SInt16 ScavRes; // scavenge result code + OSErr ErrCode; // error code + OSErr IntErr; // internal error code + UInt16 VIStat; // scavenge status flags for volume info + UInt16 ABTStat; // scavenge status flags for Attributes BTree + UInt16 EBTStat; // scavenge status flags for extent BTree + UInt16 CBTStat; // scavenge status flags for catalog BTree + UInt32 CatStat; // scavenge status flags for catalog file + UInt16 VeryMinorErrorsStat; // scavenge status flags for very minor errors + UInt16 JStat; // scavange status flags for journal errors + UInt16 PrintStat; // info about messages that should be displayed only once + DrvQElPtr DrvPtr; // pointer to driveQ element for target drive + UInt32 TarID; // target ID (CNID of data structure being verified) + UInt64 TarBlock; // target block/node number being verified + SInt16 BTLevel; // current BTree enumeration level + SBTPT *BTPTPtr; // BTree path table pointer + SInt16 DirLevel; // current directory enumeration level + SDPR *DirPTPtr; // directory path table pointer (pointer to array of SDPR) + uint32_t dirPathCount; // number of SDPR entries allocated in directory path table + SInt16 CNType; // current CNode type + UInt32 ParID; // current parent DirID + CatalogName CName; // current CName + RepairOrderPtr MinorRepairsP; // ptr to list of problems for later repair + MissingThread *missingThreadList; + Ptr FCBAPtr; // pointer to scavenger FCB array + UInt32 **validFilesList; // List of valid HFS file IDs + + ExtentsTable **overlappedExtents; // List of overlapped extents + FileIdentifierTable **fileIdentifierTable; // List of files for post processing + + UInt32 inputFlags; // Caller can specify some DFA behaviors + + UInt32 volumeFeatures; // bit vector of volume and OS features + Boolean usersAreConnected; // true if user are connected + Boolean fileSharingOn; // true if file sharing is on + UInt32 altBlockLocation; + Boolean checkingWrapper; + SInt16 numExtents; // Number of memory resident extents. 3 or 8 + OSErr volumeErrorCode; + + UserCancelUPP userCancelProc; + UserMessageUPP userMessageProc; + void *userContext; + + UInt64 onePercent; + UInt64 itemsToProcess; + UInt64 itemsProcessed; + UInt64 lastProgress; + long startTicks; + UInt16 secondsRemaining; + + long lastTickCount; + + + SVCB *calculatedVCB; + SFCB *calculatedExtentsFCB; + SFCB *calculatedCatalogFCB; + SFCB *calculatedAllocationsFCB; + SFCB *calculatedAttributesFCB; + SFCB *calculatedStartupFCB; + SFCB *calculatedRepairFCB; + BTreeControlBlock *calculatedExtentsBTCB; + BTreeControlBlock *calculatedCatalogBTCB; + BTreeControlBlock *calculatedRepairBTCB; + BTreeControlBlock *calculatedAttributesBTCB; + + Boolean cleanUnmount; + Boolean guiControl; + fsck_ctx_t context; + int chkLevel; + int repairLevel; + int rebuildOptions; // options to indicate type of btree(s) to rebuild + Boolean minorRepairErrors; // indicates some minor repairs failed + Boolean minorRepairFalseSuccess; // indicates minor repair function is returning false success, do not delete from the list + int canWrite; // we can safely write to the block device + int writeRef; // file descriptor with write access on the volume + int lostAndFoundMode; // used when creating lost+found directory + int liveVerifyState; // indicates if live verification is being done or not + BTScanState scanState; + int scanCount; /* Number of times fsck_hfs has looped */ + + unsigned char volumeName[256]; /* volume name in ASCII or UTF-8 */ + char deviceNode[256]; /* device node in ASCII */ + + /* Extended attribute check related stuff */ + uint32_t cat_ea_count; /* number of catalog records that have attribute bit set */ + uint32_t cat_acl_count; /* number of catalog records that have security bit set */ + uint32_t attr_ea_count; /* number of unique fileID attributes found in attribute btree */ + uint32_t attr_acl_count; /* number of acls found in attribute btree */ + PrimeBuckets CBTAttrBucket; /* prime number buckets for Attribute bit in Catalog btree */ + PrimeBuckets CBTSecurityBucket; /* prime number buckets for Security bit in Catalog btree */ + PrimeBuckets ABTAttrBucket; /* prime number buckets for Attribute bit in Attribute btree */ + PrimeBuckets ABTSecurityBucket; /* prime number buckets for Security bit in Attribute btree */ + attributeInfo lastAttrInfo; /* Record last attribute ID checked, used in CheckAttributeRecord, initialized in ScavSetup */ + UInt16 securityAttrName[XATTR_MAXNAMELEN]; /* Store security attribute name in UTF16, to avoid frequent conversion */ + size_t securityAttrLen; + + /* File Hard Links related stuff */ + uint32_t filelink_priv_dir_id; + + /* Directory Hard Links related stuff */ + uint32_t dirlink_priv_dir_id; + uint32_t dirlink_priv_dir_valence; + uint32_t calculated_dirinodes; + uint32_t calculated_dirlinks; + + /* Journal file ID's */ + uint32_t journal_file_id; + uint32_t jib_file_id; +} SGlob, *SGlobPtr; + + +enum +{ + supportsTrashVolumeCacheFeatureMask = 1, + supportsHFSPlusVolsFeatureMask = 2, + volumeIsMountedMask = 4 +}; + +/* scavenger flags */ + +/* volume info status flags (contents of VIStat) */ + +#define S_MDB 0x8000 // MDB/VHB damaged +#define S_AltMDB 0x4000 // Unused /* alternate MDB damaged */ +#define S_VBM 0x2000 // volume bit map damaged +#define S_WMDB 0x1000 // wrapper MDB is damaged +#define S_OverlappingExtents 0x0800 // Overlapping extents found +#define S_BadMDBdrAlBlSt 0x0400 // Invalid drAlBlSt field in MDB +#define S_InvalidWrapperExtents 0x0200 // Invalid catalog extent start in MDB + +/* BTree status flags (contents of EBTStat, CBTStat and ABTStat) */ + +#define S_BTH 0x8000 /* BTree header damaged */ +#define S_BTM 0x4000 /* BTree map damaged */ +#define S_Indx 0x2000 // Unused /* index structure damaged */ +#define S_Leaf 0x1000 // Unused /* leaf structure damaged */ +#define S_Orphan 0x0800 // orphaned file +#define S_OrphanedExtent 0x0400 // orphaned extent +#define S_ReservedNotZero 0x0200 // the flags or reserved fields are not zero +#define S_RebuildBTree 0x0100 // similar to S_Indx, S_Leaf, but if one is bad we stop checking and the other may also be bad. +#define S_ReservedBTH 0x0080 // fields in the BTree header should be zero but are not +#define S_AttributeCount 0x0040 // incorrect number of xattr in attribute btree in comparison with attribute bit in catalog btree +#define S_SecurityCount 0x0020 // incorrect number of security xattrs in attribute btree in comparison with security bit in catalog btree +#define S_AttrRec 0x0010 // orphaned/unknown record in attribute BTree +#define S_ParentHierarchy 0x0008 // bad parent hierarchy, could not lookup parent directory record */ +#define S_UnusedNodesNotZero 0x0004 /* Unused B-tree nodes are not filled with zeroes */ + +/* catalog file status flags (contents of CatStat) */ + +#define S_IllName 0x00008000 /* illegal name found */ +#define S_Valence 0x00004000 /* a directory valence is out of sync */ +#define S_FThd 0x00002000 /* dangling file thread records exist */ +#define S_DFCorruption 0x00001000 /* disappearing folder corruption detected */ +#define S_NoDir 0x00000800 /* missing directory record */ +#define S_LockedDirName 0x00000400 // locked dir name +#define S_MissingThread 0x00000200 /* missing thread record */ +#define S_UnlinkedFile 0x00000100 /* orphaned link node */ +#define S_LinkCount 0x00000080 /* data node link count needs repair */ +#define S_Permissions 0x00000040 /* BSD permissions need repair */ +#define S_FileAllocation 0x00000020 /* peof or leof needs adjustment */ +#define S_BadExtent 0x00000010 /* invalid extent */ +#define S_LinkErrRepair 0x00000008 /* repairable file/directory hard link corruption detected */ +#define S_LinkErrNoRepair 0x00000004 /* un-repairable file/directory hard link corruptions detected */ +#define S_FileHardLinkChain 0x00000002 /* incorrect number of file hard links, doubly linked list chain needs repair */ +#define S_DirHardLinkChain 0x00000001 /* incorrect number of directory hard links, doubly linked list chain needs repair */ + +/* VeryMinorErrorsStat */ + +#define S_BloatedThreadRecordFound 0x8000 // 2210409, excessivly large thread record found + +/* user file status flags (contents of FilStat) */ + +//#define S_LockedName 0x4000 // locked file name + +/* Journal status flag (contents of JStat) */ +#define S_BadJournal 0x8000 /* Bad journal content */ +#define S_DirtyJournal 0x4000 /* Journal is dirty (needs to be replayed) */ + +/* Print status flag (contents of PrintStat) */ +#define S_DamagedDir 0x8000 /* message for M_LookDamagedDir already printed */ +#define S_SymlinkCreate 0x4000 /* message for E_SymlinkCreate already printed */ + +/*------------------------------------------------------------------------------ + ScavCtrl Interface +------------------------------------------------------------------------------*/ + +// Command Codes (commands to ScavControl) +enum +{ + scavInitialize = 1, // Start initial volume check + scavVerify, // Start verify operation + scavRepair, // Start repair opeation + scavTerminate, // Cleanup after scavenge +}; + + +// Repair Levels +enum +{ + repairLevelNoProblemsFound = 0, + repairLevelRepairIfOtherErrorsExist, // Bloated thread records, ... + repairLevelVeryMinorErrors, // Missing Custom Icon, Locked Directory name,..., Errors that don't need fixing from CheckDisk (Installer), Non Volume corruption bugs. + repairLevelVolumeRecoverable, // Minor Volume corruption exists + repairLevelSomeDataLoss, // Overlapping extents, some data loss but no scavaging will get it back + repairLevelWillCauseDataLoss, // Missing leaf nodes, repair will lose nodes without scavaging (proceed at your own risk, check disk with other utils) + repairLevelCatalogBtreeRebuild, // Catalog Btree is damaged, repair may lose some data + repairLevelUnrepairable // DFA cannot repair volume +}; + + +/* Status messages written to summary */ +enum { + M_FirstMessage = 1, + M_LastMessage = 29 +}; + + +/* Internal DFA error codes */ +enum { + errRebuildBtree = -1001 /* BTree requires rebuilding. */ +}; + + +enum { /* extendFileContigMask = 0x0002*/ + kEFContigBit = 1, /* force contiguous allocation*/ + kEFContigMask = 0x02, + kEFAllBit = 0, /* allocate all requested bytes or none*/ + kEFAllMask = 0x01, + kEFNoClumpBit = 2, /* Don't round up requested size to multiple of clump size*/ + kEFNoClumpMask = 0x04, /* TruncateFile option flags*/ + kEFNoExtOvflwBit = 3, /* Don't use extens overflow file */ + kEFNoExtOvflwMask = 0x08, + + kTFTrunExtBit = 0, /* truncate to the extent containing new PEOF*/ + kTFTrunExtMask = 1 +}; + + + +// Encoding vs. Index +// +// For runtime table lookups and for the volume encoding bitmap we +// need to map some encodings to keep them in a reasonable range. +// + +enum { + kTextEncodingMacRoman = 0L, + kTextEncodingMacFarsi = 0x8C, /* Like MacArabic but uses Farsi digits*/ + /* The following use script code 7, smCyrillic*/ + kTextEncodingMacUkrainian = 0x98, /* The following use script code 32, smUnimplemented*/ + + kIndexMacUkrainian = 48, // MacUkrainian encoding is 152 + kIndexMacFarsi = 49 // MacFarsi encoding is 140 +}; + +#define MapEncodingToIndex(e) \ + ( (e) < 48 ? (e) : ( (e) == kTextEncodingMacUkrainian ? kIndexMacUkrainian : ( (e) == kTextEncodingMacFarsi ? kIndexMacFarsi : kTextEncodingMacRoman) ) ) + +#define MapIndexToEncoding(i) \ + ( (i) == kIndexMacFarsi ? kTextEncodingMacFarsi : ( (i) == kIndexMacUkrainian ? kTextEncodingMacUkrainian : (i) ) ) + +#define ValidMacEncoding(e) \ + ( ((e) < 39) || ((e) == kTextEncodingMacFarsi) || ((e) == kTextEncodingMacUkrainian) ) + + + + +extern void WriteMsg( SGlobPtr GPtr, short messageID, short messageType ); +extern void WriteError( SGlobPtr GPtr, short msgID, UInt32 tarID, UInt64 tarBlock ); +extern short CheckPause( void ); + +/* ------------------------------- From SControl.c ------------------------------- */ + +void ScavCtrl( SGlobPtr GPtr, UInt32 ScavOp, short *ScavRes ); + +extern short CheckForStop( SGlobPtr GPtr ); + + +/* ------------------------------- From SRepair.c -------------------------------- */ + +extern OSErr RepairVolume( SGlobPtr GPtr ); + +extern int FixDFCorruption( const SGlobPtr GPtr, RepairOrderPtr DFOrderP ); + +extern OSErr ProcessFileExtents( SGlobPtr GPtr, SFCB *fcb, UInt8 forkType, UInt16 flags, Boolean isExtentsBTree, Boolean *hasOverflowExtents, UInt32 *blocksUsed ); + +/* Function to get return file path/name given an ID */ +extern OSErr GetSystemFileName(UInt32 fileID, char *filename, unsigned int *filenamelen); +extern OSErr GetFileNamePathByID(SGlobPtr GPtr, UInt32 fileID, char *fullPath, unsigned int *fullPathLen, char *fileName, unsigned int *fileNameLen, u_int16_t *status); +#define FNAME_BUF2SMALL 0x001 /* filename buffer was too small */ +#define FNAME_BIGNAME 0x002 /* filename is greater than NAME_MAX bytes */ +#define FPATH_BUF2SMALL 0x010 /* path buffer was too small */ +#define FPATH_BIGNAME 0x020 /* intermediate component in path is greater than NAME_MAX bytes */ +#define F_RESERVE_FILEID 0x100 /* file ID was less than kHFSFirstUserCatalogNodeID */ + +/* ------------------------------- From SUtils.c --------------------------------- */ + +extern int AllocBTN( SGlobPtr GPtr, short FilRefN, UInt32 NodeNum ); + +extern int IntError( SGlobPtr GPtr, OSErr ErrCode ); + +extern void RcdError( SGlobPtr GPtr, OSErr ErrCode ); + +extern RepairOrderPtr AllocMinorRepairOrder( SGlobPtr GPtr, size_t extraBytes ); + +extern int IsDuplicateRepairOrder(SGlobPtr GPtr, RepairOrderPtr orig); + +extern void DeleteRepairOrder(SGlobPtr GPtr, RepairOrderPtr orig); + +extern void SetDFAStage( UInt32 stage ); +extern UInt32 GetDFAGlobals( void ); + +extern void InitializeVolumeObject( SGlobPtr GPtr ); +extern void CheckEmbeddedVolInfoInMDBs( SGlobPtr GPtr ); +extern VolumeObjectPtr GetVolumeObjectPtr( void ); +extern OSErr GetVolumeObjectVHB( BlockDescriptor * theBlockDescPtr ); +extern void GetVolumeObjectBlockNum( UInt64 * theBlockNumPtr ); +extern OSErr GetVolumeObjectAlternateBlock( BlockDescriptor * theBlockDescPtr ); +extern OSErr GetVolumeObjectPrimaryBlock( BlockDescriptor * theBlockDescPtr ); +extern void GetVolumeObjectAlternateBlockNum( UInt64 * theBlockNumPtr ); +extern void GetVolumeObjectPrimaryBlockNum( UInt64 * theBlockNumPtr ); +extern OSErr GetVolumeObjectAlternateMDB( BlockDescriptor * theBlockDescPtr ); +extern OSErr GetVolumeObjectPrimaryMDB( BlockDescriptor * theBlockDescPtr ); +extern OSErr GetVolumeObjectVHBorMDB( BlockDescriptor * theBlockDescPtr ); +extern void PrintName( int theCount, const UInt8 *theNamePtr, Boolean isUnicodeString ); +extern void PrintVolumeObject( void ); +extern Boolean VolumeObjectIsValid( void ); +extern Boolean VolumeObjectIsHFSPlus( void ); +extern Boolean VolumeObjectIsHFS( void ); +extern Boolean VolumeObjectIsEmbeddedHFSPlus( void ); +extern Boolean VolumeObjectIsPureHFSPlus( void ); +extern Boolean VolumeObjectIsHFSX(SGlobPtr); + +extern void InvalidateCalculatedVolumeBitMap( SGlobPtr GPtr ); + +extern OSErr GetVolumeFeatures( SGlobPtr GPtr ); + +OSErr FlushAlternateVolumeControlBlock( SVCB *vcb, Boolean isHFSPlus ); + +extern void ConvertToHFSPlusExtent(const HFSExtentRecord oldExtents, HFSPlusExtentRecord newExtents); + +void add_prime_bucket_uint32(PrimeBuckets *cur, uint32_t num); + +void add_prime_bucket_uint64(PrimeBuckets *cur, uint64_t num); + +int compare_prime_buckets(PrimeBuckets *bucket1, PrimeBuckets *bucket2); + +/* ------------------------------- From CatalogCheck.c -------------------------------- */ + +extern OSErr CheckCatalogBTree( SGlobPtr GPtr ); // catalog btree check + +extern OSErr CheckFolderCount( SGlobPtr GPtr ); // Compute folderCount + +extern int RecordBadAllocation(UInt32 parID, unsigned char * filename, UInt32 forkType, UInt32 oldBlkCnt, UInt32 newBlkCnt); + +extern int RecordTruncation(UInt32 parID, unsigned char * filename, UInt32 forkType, UInt64 oldSize, UInt64 newSize); + +/* ------------------------------- From SVerify1.c -------------------------------- */ + +extern OSErr CatFlChk( SGlobPtr GPtr ); // catalog file check + +extern OSErr CatHChk( SGlobPtr GPtr ); // catalog hierarchy check + +extern OSErr ExtBTChk( SGlobPtr GPtr ); // extent btree check + +extern OSErr BadBlockFileExtentCheck( SGlobPtr GPtr ); // bad block file extent check + +extern OSErr AttrBTChk( SGlobPtr GPtr ); // attributes btree check + +extern OSErr IVChk( SGlobPtr GPtr ); + +/* Operation type for CheckForClean */ +enum { + kCheckVolume, // check if volume is clean/dirty + kMarkVolumeDirty, // mark the volume dirty + kMarkVolumeClean // mark the volume clean +}; +extern int CheckForClean( SGlobPtr GPtr, UInt8 operation, Boolean *modified ); + +extern int CheckIfJournaled(SGlobPtr GPtr, Boolean journal_bit_only); + +typedef struct fsckJournalInfo { + int jnlfd; // File descriptor for journal device + off_t jnlOffset; // Offset of journal on journal device + off_t jnlSize; // Size of journal on same + char *name; // Name of journal device +} fsckJournalInfo_t; + +extern int IsJournalEmpty(SGlobPtr, fsckJournalInfo_t *); + +extern OSErr VInfoChk( SGlobPtr GPtr ); + +extern OSErr VLockedChk( SGlobPtr GPtr ); + +extern void BuildExtentKey( Boolean isHFSPlus, UInt8 forkType, HFSCatalogNodeID fileNumber, UInt32 blockNumber, void * key ); + +extern OSErr OrphanedFileCheck( SGlobPtr GPtr, Boolean *problemsFound ); + +extern int cmpLongs (const void *a, const void *b); + +extern int CheckAttributeRecord(SGlobPtr GPtr, const HFSPlusAttrKey *key, const HFSPlusAttrRecord *rec, UInt16 reclen); + +extern void RecordXAttrBits(SGlobPtr GPtr, UInt16 flags, HFSCatalogNodeID fileid, UInt16 btreetype); + +extern int FindOrigOverlapFiles(SGlobPtr GPtr); + +extern void PrintOverlapFiles (SGlobPtr GPtr); + +/* ------------------------------- From SVerify2.c -------------------------------- */ + +typedef int (* CheckLeafRecordProcPtr)(SGlobPtr GPtr, void *key, void *record, UInt16 recordLen); + +extern int BTCheck(SGlobPtr GPtr, short refNum, CheckLeafRecordProcPtr checkLeafRecord); + +extern int BTMapChk( SGlobPtr GPtr, short FilRefN ); + +extern OSErr ChkCName( SGlobPtr GPtr, const CatalogName *name, Boolean unicode ); // check catalog name + +extern OSErr CmpBTH( SGlobPtr GPtr, SInt16 fileRefNum ); + +extern int CmpBTM( SGlobPtr GPtr, short FilRefN ); + +extern int CmpMDB( SGlobPtr GPtr, HFSMasterDirectoryBlock * mdbP); + +extern int CmpVBM( SGlobPtr GPtr ); + +extern OSErr CmpBlock( void *block1P, void *block2P, size_t length ); /* same as 'memcmp', but EQ/NEQ only */ + +extern OSErr ChkExtRec ( SGlobPtr GPtr, UInt32 fileID, const void *extents , unsigned int *lastExtentIndex); + +extern int BTCheckUnusedNodes(SGlobPtr GPtr, short fileRefNum, UInt16 *btStat); + + +/* -------------------------- From SRebuildBTree.c ------------------------- */ + +extern OSErr RebuildBTree( SGlobPtr theSGlobPtr, int FileID ); + + +/* -------------------------- From SCatalog.c ------------------------- */ + +extern OSErr UpdateFolderCount( SVCB *vcb, + HFSCatalogNodeID pid, + const CatalogName *name, + SInt16 newType, + UInt32 hint, + SInt16 valenceDelta ); + +/* ------------------------------- From SExtents.c -------------------------------- */ +OSErr ZeroFileBlocks( SVCB *vcb, SFCB *fcb, UInt32 startingSector, UInt32 numberOfSectors ); + +OSErr MapFileBlockC ( + SVCB *vcb, // volume that file resides on + SFCB *fcb, // FCB of file + UInt32 numberOfBytes, // number of contiguous bytes desired + UInt64 sectorOffset, // starting offset within file (in 512-byte sectors) + UInt64 *startSector, // first 512-byte volume sector (NOT an allocation block) + UInt32 *availableBytes); // number of contiguous bytes (up to numberOfBytes) + +OSErr DeallocateFile(SVCB *vcb, CatalogRecord * fileRec); + +OSErr ExtendFileC ( + SVCB *vcb, // volume that file resides on + SFCB *fcb, // FCB of file to truncate + UInt32 sectorsToAdd, // number of sectors to allocate + UInt32 flags, // EFContig and/or EFAll + UInt32 *actualSectorsAdded); // number of bytes actually allocated + +OSErr FlushExtentFile( SVCB *vcb ); + +void ExtDataRecToExtents( + const HFSExtentRecord oldExtents, + HFSPlusExtentRecord newExtents); + +OSErr UpdateExtentRecord ( + const SVCB *vcb, + SFCB *fcb, + const HFSPlusExtentKey *extentFileKey, + HFSPlusExtentRecord extentData, + UInt32 extentBTreeHint); + +OSErr ReleaseExtents( + SVCB *vcb, + const HFSPlusExtentRecord extentRecord, + UInt32 *numReleasedAllocationBlocks, + Boolean *releasedLastExtent); + +OSErr CheckFileExtents( SGlobPtr GPtr, UInt32 fileNumber, UInt8 forkType, const unsigned char *xattrName, + const void *extents, UInt32 *blocksUsed ); +OSErr GetBTreeHeader( SGlobPtr GPtr, SFCB* fcb, BTHeaderRec *header ); +OSErr CompareVolumeBitMap( SGlobPtr GPtr, SInt32 whichBuffer ); +OSErr CompareVolumeHeader( SGlobPtr GPtr, HFSPlusVolumeHeader *vh ); +OSErr CreateExtentsBTreeControlBlock( SGlobPtr GPtr ); +OSErr CreateCatalogBTreeControlBlock( SGlobPtr GPtr ); +OSErr CreateAttributesBTreeControlBlock( SGlobPtr GPtr ); +OSErr CreateExtendedAllocationsFCB( SGlobPtr GPtr ); + + +OSErr CacheWriteInPlace( SVCB *vcb, UInt32 fileRefNum, HIOParam *iopb, UInt64 currentPosition, + UInt32 maximumBytes, UInt32 *actualBytes ); + + +/* Generic B-tree call back routines */ +OSStatus GetBlockProc (SFCB *filePtr, UInt32 blockNum, GetBlockOptions options, BlockDescriptor *block); +OSStatus ReleaseBlockProc (SFCB *filePtr, BlockDescPtr blockPtr, ReleaseBlockOptions options); +OSStatus SetEndOfForkProc (SFCB *filePtr, FSSize minEOF, FSSize maxEOF); +OSStatus SetBlockSizeProc (SFCB *filePtr, ByteCount blockSize, ItemCount minBlockCount); + +void DFA_PrepareInputName(ConstStr31Param name, Boolean isHFSPlus, CatalogName *catalogName); + +extern UInt32 CatalogNameSize( const CatalogName *name, Boolean isHFSPlus); + +void SetupFCB( SVCB *vcb, SInt16 refNum, UInt32 fileID, UInt32 fileClumpSize ); + + +extern void CalculateItemCount( SGlob *GPtr, UInt64 *itemCount, UInt64 *onePercent ); + + + +// Macros +extern BTreeControlBlock* GetBTreeControlBlock( short refNum ); +#define GetBTreeControlBlock(refNum) ((BTreeControlBlock*) ResolveFCB((refNum))->fcbBtree) + +/* The following macro marks a VCB as dirty by setting the upper 8 bits of the flags*/ +EXTERN_API_C( void ) +MarkVCBDirty (SVCB * vcb); + +#define MarkVCBDirty(vcb) ((void) (vcb->vcbFlags |= 0xFF00)) +EXTERN_API_C( void ) +MarkVCBClean (SVCB * vcb); + +#define MarkVCBClean(vcb) ((void) (vcb->vcbFlags &= 0x00FF)) +EXTERN_API_C( Boolean ) +IsVCBDirty (SVCB * vcb); + +#define IsVCBDirty(vcb) ((Boolean) ((vcb->vcbFlags & 0xFF00) != 0)) + + +extern pascal void M_Debugger(void); +extern pascal void M_DebugStr(ConstStr255Param debuggerMsg); +#if ( DEBUG_BUILD ) + #define M_Debuger() Debugger() + #define M_DebugStr( debuggerMsg ) DebugStr( debuggerMsg ) +#else + #define M_Debuger() + #define M_DebugStr( debuggerMsg ) +#endif + + +/* Test for error and return if error occurred*/ +EXTERN_API_C( void ) +ReturnIfError (OSErr result); + +#define ReturnIfError(result) if ( (result) != noErr ) return (result); else ; +/* Test for passed condition and return if true*/ +EXTERN_API_C( void ) +ReturnErrorIf (Boolean condition, + OSErr result); + +#define ReturnErrorIf(condition, error) if ( (condition) ) return( (error) ); +/* Exit function on error*/ +EXTERN_API_C( void ) +ExitOnError (OSErr result); + +#define ExitOnError( result ) if ( ( result ) != noErr ) goto ErrorExit; else ; + +/* Return the low 16 bits of a 32 bit value, pinned if too large*/ +EXTERN_API_C( UInt16 ) +LongToShort (UInt32 l); + +#define LongToShort( l ) l <= (UInt32)0x0000FFFF ? ((UInt16) l) : ((UInt16) 0xFFFF) + + +EXTERN_API_C( UInt32 ) +GetDFAStage (void); + +EXTERN_API_C(OSErr) +DeleteCatalogNode(SVCB *vcb, UInt32 pid, const CatalogName * name, UInt32 hint, Boolean for_rename); + +EXTERN_API_C(OSErr) +GetCatalogNode(SVCB *vcb, UInt32 pid, const CatalogName * name, UInt32 hint, CatalogRecord *data); + +EXTERN_API_C( SInt32 ) +CompareCatalogKeys (HFSCatalogKey * searchKey, + HFSCatalogKey * trialKey); + +EXTERN_API_C( SInt32 ) +CompareExtendedCatalogKeys (HFSPlusCatalogKey * searchKey, + HFSPlusCatalogKey * trialKey); +EXTERN_API_C( SInt32 ) +CaseSensitiveCatalogKeyCompare (HFSPlusCatalogKey * searchKey, + HFSPlusCatalogKey * trialKey); + +EXTERN_API_C( SInt32 ) +CompareExtentKeys (const HFSExtentKey * searchKey, + const HFSExtentKey * trialKey); + +EXTERN_API_C( SInt32 ) +CompareExtentKeysPlus (const HFSPlusExtentKey * searchKey, + const HFSPlusExtentKey * trialKey); +EXTERN_API_C( SInt32 ) +CompareAttributeKeys (const AttributeKey * searchKey, const AttributeKey * trialKey); +EXTERN_API( SFCB* ) +ResolveFCB (short fileRefNum); + +EXTERN_API_C( OSErr ) +ValidVolumeHeader (HFSPlusVolumeHeader * volumeHeader); + + +/* Old B-tree Manager API (going away soon!) */ + +EXTERN_API_C( OSErr ) +SearchBTreeRecord (SFCB *fcb, + const void * key, + UInt32 hint, + void * foundKey, + void * data, + UInt16 * dataSize, + UInt32 * newHint); + +EXTERN_API_C( OSErr ) +GetBTreeRecord (SFCB *fcb, + SInt16 selectionIndex, + void * key, + void * data, + UInt16 * dataSize, + UInt32 * newHint); + +EXTERN_API_C( OSErr ) +InsertBTreeRecord (SFCB *fcb, + const void * key, + const void * data, + UInt16 dataSize, + UInt32 * newHint); + +EXTERN_API_C( OSErr ) +DeleteBTreeRecord (SFCB *fcb, + const void * key); + +EXTERN_API_C( OSErr ) +ReplaceBTreeRecord (SFCB *fcb, + const void * key, + UInt32 hint, + void * newData, + UInt16 dataSize, + UInt32 * newHint); + +EXTERN_API_C( void ) +InitBTreeHeader (UInt32 fileSize, + UInt32 clumpSize, + UInt16 nodeSize, + UInt16 recordCount, + UInt16 keySize, + UInt32 attributes, + UInt32 * mapNodes, + void * buffer); + +EXTERN_API_C( OSErr ) +UpdateFreeCount (SVCB * vcb); + + +EXTERN_API_C(Boolean) +NodesAreContiguous( SFCB *fcb, + UInt32 nodeSize); + + + +UInt32 GetTimeUTC(void); +UInt32 GetTimeLocal(Boolean forHFS); + +OSErr FlushVolumeControlBlock( SVCB *vcb ); + +pascal short ResolveFileRefNum(SFCB * fileCtrlBlockPtr); + +extern UInt32 CatalogNameLength( const CatalogName *name, Boolean isHFSPlus); + +extern void CopyCatalogName( const CatalogName *srcName, CatalogName *dstName, Boolean isHFSPLus); + +extern void UpdateCatalogName( ConstStr31Param srcName, Str31 destName); + +extern void BuildCatalogKey( HFSCatalogNodeID parentID, const CatalogName *name, Boolean isHFSPlus, + CatalogKey *key); + +extern void UpdateVolumeEncodings( SVCB *volume, TextEncoding encoding); + + +OSErr BlockAllocate (SVCB *vcb, UInt32 startingBlock, UInt32 blocksRequested, UInt32 blocksMaximum, + Boolean forceContiguous, UInt32 *actualStartBlock, UInt32 *actualNumBlocks); +OSErr BlockDeallocate ( SVCB *vcb, UInt32 firstBlock, UInt32 numBlocks); +UInt32 DivideAndRoundUp( UInt32 numerator, UInt32 denominator); +OSErr BlockFindAll(SFCB *fcb, UInt32 needed); + +OSErr InitializeBlockCache ( UInt32 blockSize, UInt32 blockCount ); + +void SetFCBSPtr( Ptr value ); +Ptr GetFCBSPtr( void ); + + +/* + * UTF-8 conversion routines + */ +extern int utf_decodestr(const unsigned char *, size_t, u_int16_t *, size_t *, size_t); +extern int utf_encodestr(const u_int16_t *, size_t, unsigned char *, size_t *, size_t); + +/* + * HardLink checking routines + */ +extern int HardLinkCheckBegin(SGlobPtr gp, void** cookie); +extern void HardLinkCheckEnd(void * cookie); +extern void CaptureHardLink(void * cookie, const HFSPlusCatalogFile *file); +extern int CheckHardLinks(void *cookie); + +extern void hardlink_add_bucket(PrimeBuckets *bucket, uint32_t inode_id, uint32_t cur_link_id); +extern int inode_check(SGlobPtr, PrimeBuckets *, CatalogRecord *, CatalogKey *, Boolean); +extern void record_link_badchain(SGlobPtr, Boolean); +extern int record_link_badflags(SGlobPtr, uint32_t, Boolean, uint32_t, uint32_t); +extern int record_inode_badflags(SGlobPtr, uint32_t, Boolean, uint32_t, uint32_t, Boolean); +extern int record_dirlink_badownerflags(SGlobPtr, uint32_t, uint8_t, uint8_t, int); +extern int record_link_badfinderinfo(SGlobPtr, uint32_t, Boolean); + +extern int get_first_link_id(SGlobPtr gptr, CatalogRecord *inode_rec, uint32_t inode_id, Boolean isdir, uint32_t *first_link_id); +extern int filelink_hash_inode(UInt32 inode_id, UInt32 linkCount); + +/* + * Directory Hard Link checking routines + */ +extern int dirhardlink_init(SGlobPtr gptr); +extern int dirhardlink_check(SGlobPtr gptr); + +extern OSErr GetCatalogRecordByID(SGlobPtr GPtr, UInt32 file_id, Boolean isHFSPlus, CatalogKey *key, CatalogRecord *rec, uint16_t *recsize); + +struct HardLinkInfo; +extern int RepairHardLinkChains(SGlobPtr, Boolean); + +/* + * Volume Bitmap checking routines + */ +extern int BitMapCheckBegin(SGlobPtr g); +extern int BitMapCheckEnd(void); +extern int CaptureBitmapBits(UInt32 startBit, UInt32 bitCount); +extern int ReleaseBitmapBits(UInt32 startBit, UInt32 bitCount); +extern int CheckVolumeBitMap(SGlobPtr g, Boolean repair); +extern void UpdateFreeBlockCount(SGlobPtr g); +extern int AllocateContigBitmapBits (SVCB *vcb, UInt32 numBlocks, UInt32 *actualStartBlock); +extern int IsTrimSupported(void); +extern void TrimFreeBlocks(SGlobPtr g); + +/* + * Variables and routines to support mapping a physical block number to a + * file path + */ +struct found_blocks { + u_int64_t block; + u_int32_t fileID; + u_int32_t padding; +}; +#define FOUND_BLOCKS_QUANTUM 30 +extern int gBlkListEntries; +extern u_int64_t *gBlockList; +extern int gFoundBlockEntries; +extern struct found_blocks *gFoundBlocksList; +extern long gBlockSize; +void CheckPhysicalMatch(SVCB *vcb, UInt32 startblk, UInt32 blkcount, UInt32 fileNumber, UInt8 forkType); +void dumpblocklist(SGlobPtr GPtr); + +#ifdef __cplusplus +}; +#endif + +#endif /* __SCAVENGER__ */ diff --git a/fsck_hfs/dfalib/VolumeBitmapCheck.c b/fsck_hfs/dfalib/VolumeBitmapCheck.c new file mode 100644 index 0000000..fd1074c --- /dev/null +++ b/fsck_hfs/dfalib/VolumeBitmapCheck.c @@ -0,0 +1,1490 @@ +/* + * Copyright (c) 2000-2002, 2004-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Summary for in-memory volume bitmap: + * A binary search tree is used to store bitmap segments that are + * partially full. If a segment does not exist in the tree, it + * can be assumed to be in the following state: + * 1. Full if the coresponding segment map bit is set + * 2. Empty (implied) + */ + +#include "Scavenger.h" + +#include <sys/disk.h> + +#include <bitstring.h> + +#define bit_dealloc(p) free(p) + +#define _VBC_DEBUG_ 0 + +enum { + kBitsPerByte = 8, + kBitsPerWord = 32, + kBitsPerSegment = 1024, + kBytesPerSegment = kBitsPerSegment / kBitsPerByte, + kWordsPerSegment = kBitsPerSegment / kBitsPerWord, + + kBitsWithinWordMask = kBitsPerWord-1, + kBitsWithinSegmentMask = kBitsPerSegment-1, + + kBMS_NodesPerPool = 450, + kBMS_PoolMax = 2000 +}; + + +#define kAllBitsSetInWord 0xFFFFFFFFu +#define kMSBBitSetInWord 0x80000000u + +enum { + kSettingBits = 1, + kClearingBits = 2, + kTestingBits = 3 +}; + +#define kEmptySegment 0 +#define kFullSegment 1 + +int gBitMapInited = 0; + +/* + * Bitmap segments that are full are marked in + * the gFullSegmentList (a bit string). + */ +bitstr_t* gFullSegmentList; +UInt32 gBitsMarked; +UInt32 gTotalBits; +UInt32 gTotalSegments; +UInt32* gFullBitmapSegment; /* points to a FULL bitmap segment*/ +UInt32* gEmptyBitmapSegment; /* points to an EMPTY bitmap segment*/ + +/* + * Bitmap Segment (BMS) Tree node + * Bitmap segments that are partially full are + * saved in the BMS Tree. + */ +typedef struct BMS_Node { + struct BMS_Node *left; + struct BMS_Node *right; + UInt32 segment; + UInt32 bitmap[kWordsPerSegment]; +} BMS_Node; + +BMS_Node *gBMS_Root; /* root of BMS tree */ +BMS_Node *gBMS_FreeNodes; /* list of free BMS nodes */ +BMS_Node *gBMS_PoolList[kBMS_PoolMax]; /* list of BMS node pools */ +int gBMS_PoolCount; /* count of pools allocated */ + +/* Bitmap operations routines */ +static int FindContigClearedBitmapBits (SVCB *vcb, UInt32 numBlocks, UInt32 *actualStartBlock); + +/* Segment Tree routines (binary search tree) */ +static int BMS_InitTree(void); +static int BMS_DisposeTree(void); +static BMS_Node * BMS_Lookup(UInt32 segment); +static BMS_Node * BMS_Insert(UInt32 segment, int segmentType); +static BMS_Node * BMS_Delete(UInt32 segment); +static void BMS_GrowNodePool(void); + +#if _VBC_DEBUG_ +static void BMS_PrintTree(BMS_Node * root); +static void BMS_MaxDepth(BMS_Node * root, int depth, int *maxdepth); +#endif + +/* + * Initialize our volume bitmap data structures + */ +int BitMapCheckBegin(SGlobPtr g) +{ + Boolean isHFSPlus; + + if (gBitMapInited) + return (0); + + isHFSPlus = VolumeObjectIsHFSPlus( ); + + gFullBitmapSegment = (UInt32 *)malloc(kBytesPerSegment); + memset((void *)gFullBitmapSegment, 0xff, kBytesPerSegment); + + gEmptyBitmapSegment = (UInt32 *)malloc(kBytesPerSegment); + memset((void *)gEmptyBitmapSegment, 0x00, kBytesPerSegment); + + gTotalBits = g->calculatedVCB->vcbTotalBlocks; + gTotalSegments = (gTotalBits / kBitsPerSegment); + if (gTotalBits % kBitsPerSegment) + ++gTotalSegments; + + gFullSegmentList = bit_alloc(gTotalSegments); + bit_nclear(gFullSegmentList, 0, gTotalSegments - 1); + + BMS_InitTree(); + gBitMapInited = 1; + gBitsMarked = 0; + + if (isHFSPlus) { + UInt16 alignBits; + + /* + * Allocate the VolumeHeader in the volume bitmap. + * Since the VH is the 3rd sector in we may need to + * add some alignment allocation blocks before it. + */ + if (g->calculatedVCB->vcbBlockSize == 512) + alignBits = 2; + else if (g->calculatedVCB->vcbBlockSize == 1024) + alignBits = 1; + else + alignBits = 0; + + (void) CaptureBitmapBits(0, 1 + alignBits); + + if (g->calculatedVCB->vcbBlockSize == 512) + alignBits = 1; + else + alignBits = 0; + + (void) CaptureBitmapBits(gTotalBits - 1 - alignBits, 1 + alignBits); + } + + return (0); +} + +/* debugging stats */ +int gFullSegments = 0; +int gSegmentNodes = 0; + +int BitMapCheckEnd(void) +{ + if (gBitMapInited) { +#if _VBC_DEBUG_ + int maxdepth = 0; + + BMS_MaxDepth(gBMS_Root, 0, &maxdepth); + plog(" %d full segments, %d segment nodes (max depth was %d nodes)\n", + gFullSegments, gSegmentNodes, maxdepth); +#endif + free(gFullBitmapSegment); + gFullBitmapSegment = NULL; + + free(gEmptyBitmapSegment); + gEmptyBitmapSegment = NULL; + + bit_dealloc(gFullSegmentList); + gFullSegmentList = NULL; + + BMS_DisposeTree(); + gBitMapInited = 0; + } + return (0); +} + +/* Function: GetSegmentBitmap + * + * Description: Return bitmap segment corresponding to given startBit. + * + * 1. Calculate the segment number for given bit. + * 2. If the segment exists in full segment list, + * If bitOperation is to clear bits, + * a. Remove segment from full segment list. + * b. Insert a full segment in the bitmap tree. + * Else return pointer to dummy full segment + * 3. If segment found in tree, it is partially full. Return it. + * 4. If (2) and (3) are not true, it is a empty segment. + * If bitOperation is to set bits, + * a. Insert empty segment in the bitmap tree. + * Else return pointer to dummy empty segment. + * + * Input: + * 1. startBit - bit number (block number) to lookup + * 2. buffer - pointer to return pointer to bitmap segment + * 3. bitOperation - intent for new segment + * kSettingBits - caller wants to set bits + * kClearingBits - caller wants to clear bits + * kTestingBits - caller wants to test bits. + * + * Output: + * 1. buffer - pointer to desired segment + * returns zero on success, -1 on failure. + */ +static int GetSegmentBitmap(UInt32 startBit, UInt32 **buffer, int bitOperation) +{ + UInt32 segment; + BMS_Node *segNode = NULL; + + *buffer = NULL; + segment = startBit / kBitsPerSegment; + + // for a full seqment... + if (bit_test(gFullSegmentList, segment)) { + if (bitOperation == kClearingBits) { + bit_clear(gFullSegmentList, segment); + --gFullSegments; + if ((segNode = BMS_Insert(segment, kFullSegment)) != NULL) + *buffer = &segNode->bitmap[0]; + } else + *buffer = gFullBitmapSegment; + + // for a partially full segment.. + } else if ((segNode = BMS_Lookup(segment)) != NULL) { + *buffer = &segNode->bitmap[0]; + + // for an empty segment... + } else { + if (bitOperation == kSettingBits) { + if ((segNode = BMS_Insert(segment, kEmptySegment)) != NULL) + *buffer = &segNode->bitmap[0]; + } else + *buffer = gEmptyBitmapSegment; + } + + if (*buffer == NULL) { +#if _VBC_DEBUG_ + plog("GetSegmentBitmap: couldn't get a node for block %d, segment %d\n", startBit, segment); +#endif + return (-1); /* oops */ + } + +#if 0 + if (segNode) { + int i; + plog(" segment %d: L=0x%08x, R=0x%08x \n< ", + (int)segNode->segment, (int)segNode->left, segNode->right); + for (i = 0; i < kWordsPerSegment; ++i) { + plog("0x%08x ", segNode->bitmap[i]); + if ((i & 0x3) == 0x3) + plog("\n "); + } + plog("\n"); + } + + if (bitOperation == kSettingBits && *buffer && bcmp(*buffer, gFullBitmapSegment, kBytesPerSegment) == 0) { + plog("*** segment %d (start blk %d) is already full!\n", segment, startBit); + exit(5); + } + if (bitOperation == kClearingBits && *buffer && bcmp(*buffer, gEmptyBitmapSegment, kBytesPerSegment) == 0) { + plog("*** segment %d (start blk %d) is already empty!\n", segment, startBit); + exit(5); + } +#endif + + return (0); +} + +/* Function: TestSegmentBitmap + * + * Description: Test if the current bitmap segment is a full + * segment or empty segment. + * If full segment, delete the segment, set corresponding full segment + * bit in gFullSegmentList, and update counters. + * If empty list, delete the segment from list. Note that we update + * the counter only for debugging purposes. + * + * Input: + * startBit - startBit of segment to test + * + * Output: + * nothing (void). + */ +void TestSegmentBitmap(UInt32 startBit) +{ + UInt32 segment; + BMS_Node *segNode = NULL; + + segment = startBit / kBitsPerSegment; + + if (bit_test(gFullSegmentList, segment)) + return; + + if ((segNode = BMS_Lookup(segment)) != NULL) { +#if 0 + int i; + plog("> "); + for (i = 0; i < kWordsPerSegment; ++i) { + plog("0x%08x ", segNode->bitmap[i]); + if ((i & 0x3) == 0x3) + plog("\n "); + } + plog("\n"); +#endif + if (segment != 0 && bcmp(&segNode->bitmap[0], gFullBitmapSegment, kBytesPerSegment) == 0) { + if (BMS_Delete(segment) != NULL) { + bit_set(gFullSegmentList, segment); + /* debugging stats */ + ++gFullSegments; + --gSegmentNodes; + } + } + + if (segment != 0 && bcmp(&segNode->bitmap[0], gEmptyBitmapSegment, kBytesPerSegment) == 0) { + if (BMS_Delete(segment) != NULL) { + /* debugging stats */ + --gSegmentNodes; + } + } + } +} + + +/* Function: CaptureBitmapBits + * + * Description: Set bits in the segmented bitmap from startBit upto + * bitCount bits. + * + * Note: This function is independent of the previous state of the bit + * to be set. Therefore single bit can be set multiple times. Setting a + * bit multiple times might result in incorrect total number of blocks used + * (which can be corrected using UpdateFreeBlockCount function). + * + * 1. Increment gBitsMarked with bitCount. + * 2. If first bit does not start on word boundary, special case it. + * 3. Set all whole words. + * 4. If not all bits in last word need to be set, special case it. + * 5. For 2, 3, and 4, call TestSegmentBitmap after writing one segment or + * setting all bits to optimize full and empty segment list. + * + * Input: + * startBit - bit number in segment bitmap to start set operation. + * bitCount - total number of bits to set. + * + * Output: + * zero on success, non-zero on failure. + * This function also returns E_OvlExt if any overlapping extent is found. + */ +int CaptureBitmapBits(UInt32 startBit, UInt32 bitCount) +{ + Boolean overlap; + OSErr err; + UInt32 wordsLeft; + UInt32 bitMask; + UInt32 firstBit; + UInt32 numBits; + UInt32 *buffer; + UInt32 *currentWord; + + overlap = false; + if (bitCount == 0) + return (0); + + if ((startBit + bitCount) > gTotalBits) { + err = vcInvalidExtentErr; + goto Exit; + } + + /* count allocated bits */ + gBitsMarked += bitCount; + + /* + * Get the bitmap segment containing the first word to check + */ + err = GetSegmentBitmap(startBit, &buffer, kSettingBits); + if (err != noErr) goto Exit; + + /* Initialize buffer stuff */ + { + UInt32 wordIndexInSegment; + + wordIndexInSegment = (startBit & kBitsWithinSegmentMask) / kBitsPerWord; + currentWord = buffer + wordIndexInSegment; + wordsLeft = kWordsPerSegment - wordIndexInSegment; + } + + /* + * If the first bit to check doesn't start on a word + * boundary in the bitmap, then treat that first word + * specially. + */ + firstBit = startBit % kBitsPerWord; + if (firstBit != 0) { + bitMask = kAllBitsSetInWord >> firstBit; // turn off all bits before firstBit + numBits = kBitsPerWord - firstBit; // number of remaining bits in this word + if (numBits > bitCount) { + numBits = bitCount; // entire allocation is inside this one word + bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits)); // turn off bits after last + } + + if (SWAP_BE32(*currentWord) & bitMask) { + overlap = true; + + //plog("(1) overlapping file blocks! word: 0x%08x, mask: 0x%08x\n", *currentWord, bitMask); + } + + *currentWord |= SWAP_BE32(bitMask); /* set the bits in the bitmap */ + + bitCount -= numBits; + ++currentWord; + --wordsLeft; + if (wordsLeft == 0 || bitCount == 0) + TestSegmentBitmap(startBit); + } + + /* + * Set whole words (32 bits) at a time. + */ + bitMask = kAllBitsSetInWord; + while (bitCount >= kBitsPerWord) { + /* See if it's time to move to the next bitmap segment */ + if (wordsLeft == 0) { + startBit += kBitsPerSegment; // generate a bit in the next bitmap segment + + err = GetSegmentBitmap(startBit, &buffer, kSettingBits); + if (err != noErr) goto Exit; + + // Readjust currentWord, wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerSegment; + } + + if (SWAP_BE32(*currentWord) & bitMask) { + overlap = true; + + //plog("(2) overlapping file blocks! word: 0x%08x, mask: 0x%08x\n", *currentWord, bitMask); + } + + *currentWord |= SWAP_BE32(bitMask); /* set the bits in the bitmap */ + + bitCount -= kBitsPerWord; + ++currentWord; + --wordsLeft; + if (wordsLeft == 0 || bitCount == 0) + TestSegmentBitmap(startBit); + } + + /* + * Check any remaining bits. + */ + if (bitCount != 0) { + bitMask = ~(kAllBitsSetInWord >> bitCount); // set first bitCount bits + if (wordsLeft == 0) { + startBit += kBitsPerSegment; + + err = GetSegmentBitmap(startBit, &buffer, kSettingBits); + if (err != noErr) goto Exit; + + currentWord = buffer; + wordsLeft = kWordsPerSegment; + } + + if (SWAP_BE32(*currentWord) & bitMask) { + overlap = true; + + //plog("(3) overlapping file blocks! word: 0x%08x, mask: 0x%08x\n", *currentWord, bitMask); + } + + *currentWord |= SWAP_BE32(bitMask); /* set the bits in the bitmap */ + + TestSegmentBitmap(startBit); + } +Exit: + return (overlap ? E_OvlExt : err); +} + + +/* Function: ReleaseBitMapBits + * + * Description: Clear bits in the segmented bitmap from startBit upto + * bitCount bits. + * + * Note: This function is independent of the previous state of the bit + * to clear. Therefore single bit can be cleared multiple times. Clearing a + * bit multiple times might result in incorrect total number of blocks used + * (which can be corrected using UpdateFreeBlockCount function). + * + * 1. Decrement gBitsMarked with bitCount. + * 2. If first bit does not start on word boundary, special case it. + * 3. Clear all whole words. + * 4. If partial bits in last word needs to be cleared, special case it. + * 5. For 2, 3, and 4, call TestSegmentBitmap after writing one segment or + * clearing all bits to optimize full and empty segment list. + * + * Input: + * startBit - bit number in segment bitmap to start clear operation. + * bitCount - total number of bits to clear. + * + * Output: + * zero on success, non-zero on failure. + * This function also returns E_OvlExt if any overlapping extent is found. + */ +int ReleaseBitmapBits(UInt32 startBit, UInt32 bitCount) +{ + Boolean overlap; + OSErr err; + UInt32 wordsLeft; + UInt32 bitMask; + UInt32 firstBit; + UInt32 numBits; + UInt32 *buffer; + UInt32 *currentWord; + + overlap = false; + if (bitCount == 0) + return (0); + + if ((startBit + bitCount) > gTotalBits) { + err = vcInvalidExtentErr; + goto Exit; + } + + /* decrment allocated bits */ + gBitsMarked -= bitCount; + + /* + * Get the bitmap segment containing the first word to check + */ + err = GetSegmentBitmap(startBit, &buffer, kClearingBits); + if (err != noErr) goto Exit; + + /* Initialize buffer stuff */ + { + UInt32 wordIndexInSegment; + + wordIndexInSegment = (startBit & kBitsWithinSegmentMask) / kBitsPerWord; + currentWord = buffer + wordIndexInSegment; + wordsLeft = kWordsPerSegment - wordIndexInSegment; + } + + /* + * If the first bit to check doesn't start on a word + * boundary in the bitmap, then treat that first word + * specially. + */ + firstBit = startBit % kBitsPerWord; + if (firstBit != 0) { + bitMask = kAllBitsSetInWord >> firstBit; // turn off all bits before firstBit + numBits = kBitsPerWord - firstBit; // number of remaining bits in this word + if (numBits > bitCount) { + numBits = bitCount; // entire deallocation is inside this one word + bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits)); // turn off bits after last + } + + if ((SWAP_BE32(*currentWord) & bitMask) != bitMask) { + overlap = true; + + //plog("(1) overlapping file blocks! word: 0x%08x, mask: 0x%08x\n", *currentWord, bitMask); + } + + *currentWord &= SWAP_BE32(~bitMask); /* clear the bits in the bitmap */ + + bitCount -= numBits; + ++currentWord; + --wordsLeft; + if (wordsLeft == 0 || bitCount == 0) + TestSegmentBitmap(startBit); + } + + /* + * Clear whole words (32 bits) at a time. + */ + bitMask = kAllBitsSetInWord; + while (bitCount >= kBitsPerWord) { + /* See if it's time to move to the next bitmap segment */ + if (wordsLeft == 0) { + startBit += kBitsPerSegment; // generate a bit in the next bitmap segment + + err = GetSegmentBitmap(startBit, &buffer, kClearingBits); + if (err != noErr) goto Exit; + + // Readjust currentWord, wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerSegment; + } + + if ((SWAP_BE32(*currentWord) & bitMask) != bitMask) { + overlap = true; + + //plog("(2) overlapping file blocks! word: 0x%08x, mask: 0x%08x\n", *currentWord, bitMask); + } + + *currentWord &= SWAP_BE32(~bitMask); /* clear the bits in the bitmap */ + + bitCount -= kBitsPerWord; + ++currentWord; + --wordsLeft; + if (wordsLeft == 0 || bitCount == 0) + TestSegmentBitmap(startBit); + } + + /* + * Check any remaining bits. + */ + if (bitCount != 0) { + bitMask = ~(kAllBitsSetInWord >> bitCount); // set first bitCount bits + if (wordsLeft == 0) { + startBit += kBitsPerSegment; + + err = GetSegmentBitmap(startBit, &buffer, kClearingBits); + if (err != noErr) goto Exit; + + currentWord = buffer; + wordsLeft = kWordsPerSegment; + } + + if ((SWAP_BE32(*currentWord) & bitMask) != bitMask) { + overlap = true; + + //plog("(3) overlapping file blocks! word: 0x%08x, mask: 0x%08x\n", *currentWord, bitMask); + } + + *currentWord &= SWAP_BE32(~bitMask); /* set the bits in the bitmap */ + + TestSegmentBitmap(startBit); + } +Exit: + return (overlap ? E_OvlExt : err); +} + +/* Function: CheckVolumeBitMap + * + * Description: Compares the in-memory volume bitmap with the on-disk + * volume bitmap. + * If repair is true, update the on-disk bitmap with the in-memory bitmap. + * If repair is false and the bitmaps don't match, an error message is + * printed and check stops. + * + * Input: + * 1. g - global scavenger structure + * 2. repair - indicate if a repair operation is requested or not. + * + * Output: + * zero on success, non-zero on failure. + */ +int CheckVolumeBitMap(SGlobPtr g, Boolean repair) +{ + UInt8 *vbmBlockP; + UInt32 *buffer; + UInt64 bit; /* 64-bit to avoid wrap around on volumes with 2^32 - 1 blocks */ + UInt32 bitsWithinFileBlkMask; + UInt32 fileBlk; + BlockDescriptor block; + ReleaseBlockOptions relOpt; + SFCB * fcb; + SVCB * vcb; + Boolean isHFSPlus; + Boolean foundOverAlloc = false; + int err = 0; + + vcb = g->calculatedVCB; + fcb = g->calculatedAllocationsFCB; + isHFSPlus = VolumeObjectIsHFSPlus( ); + + if ( vcb->vcbFreeBlocks != (vcb->vcbTotalBlocks - gBitsMarked) ) { + vcb->vcbFreeBlocks = vcb->vcbTotalBlocks - gBitsMarked; + MarkVCBDirty(vcb); + } + + vbmBlockP = (UInt8 *)NULL; + block.buffer = (void *)NULL; + relOpt = kReleaseBlock; + if ( isHFSPlus ) + bitsWithinFileBlkMask = (fcb->fcbBlockSize * 8) - 1; + else + bitsWithinFileBlkMask = (kHFSBlockSize * 8) - 1; + fileBlk = (isHFSPlus ? 0 : vcb->vcbVBMSt); + + /* + * Loop through all the bitmap segments and compare + * them against the on-disk bitmap. + */ + for (bit = 0; bit < gTotalBits; bit += kBitsPerSegment) { + (void) GetSegmentBitmap(bit, &buffer, kTestingBits); + + /* + * When we cross file block boundries read a new block from disk. + */ + if ((bit & bitsWithinFileBlkMask) == 0) { + if (isHFSPlus) { + if (block.buffer) { + err = ReleaseFileBlock(fcb, &block, relOpt); + ReturnIfError(err); + } + err = GetFileBlock(fcb, fileBlk, kGetBlock, &block); + } else /* plain HFS */ { + if (block.buffer) { + err = ReleaseVolumeBlock(vcb, &block, relOpt | kSkipEndianSwap); + ReturnIfError(err); + } + err = GetVolumeBlock(vcb, fileBlk, kGetBlock | kSkipEndianSwap, &block); + } + ReturnIfError(err); + + vbmBlockP = (UInt8 *) block.buffer; + relOpt = kReleaseBlock; + g->TarBlock = fileBlk; + ++fileBlk; + } + if (memcmp(buffer, vbmBlockP + (bit & bitsWithinFileBlkMask)/8, kBytesPerSegment) == 0) + continue; + + if (repair) { + bcopy(buffer, vbmBlockP + (bit & bitsWithinFileBlkMask)/8, kBytesPerSegment); + relOpt = kForceWriteBlock; + } else { + int underalloc = 0; + int indx; +#if _VBC_DEBUG_ + int i, j; + UInt32 *disk_buffer; + UInt32 dummy, block_num; + + plog(" disk buffer + %d\n", (bit & bitsWithinFileBlkMask)/8); + plog("start block number for segment = %qu\n", bit); + plog("segment %qd\n", bit / kBitsPerSegment); + + plog("Memory:\n"); + for (i = 0; i < kWordsPerSegment; ++i) { + plog("0x%08x ", buffer[i]); + if ((i & 0x7) == 0x7) + plog("\n"); + } + + disk_buffer = (UInt32*) (vbmBlockP + (bit & bitsWithinFileBlkMask)/8); + plog("Disk:\n"); + for (i = 0; i < kWordsPerSegment; ++i) { + plog("0x%08x ", disk_buffer[i]); + if ((i & 0x7) == 0x7) + plog("\n"); + } + + plog ("\n"); + for (i = 0; i < kWordsPerSegment; ++i) { + /* Compare each word in the segment */ + if (buffer[i] != disk_buffer[i]) { + dummy = 0x80000000; + /* If two words are different, compare each bit in the word */ + for (j = 0; j < kBitsPerWord; ++j) { + /* If two bits are different, calculate allocation block number */ + if ((buffer[i] & dummy) != (disk_buffer[i] & dummy)) { + block_num = bit + (i * kBitsPerWord) + j; + if (buffer[i] & dummy) { + plog ("Allocation block %u should be marked used on disk.\n", block_num); + } else { + plog ("Allocation block %u should be marked free on disk.\n", block_num); + } + } + dummy = dummy >> 1; + } + } + } +#endif + /* + * We have at least one difference. If we have over-allocated (that is, the + * volume bitmap says a block is allocated, but our counts say it isn't), then + * this is a lessor error. If we've under-allocated (that is, the volume bitmap + * says a block is available, but our counts say it is in use), then this is a + * bigger problem -- it can lead to overlapping extents. + * + * Once we determine we have under-allocated, we can just stop and print out + * the message. + */ + for (indx = 0; indx < kBytesPerSegment; indx++) { + uint8_t *bufp, *diskp; + bufp = (uint8_t *)buffer; + diskp = vbmBlockP + (bit & bitsWithinFileBlkMask)/8; + if (bufp[indx] & ~diskp[indx]) { + underalloc++; + break; + } + } + g->VIStat = g->VIStat | S_VBM; + if (underalloc) { + fsckPrint(g->context, E_VBMDamaged); + break; /* stop checking after first miss */ + } else if (!foundOverAlloc) { + /* Only print out a message on the first find */ + fsckPrint(g->context, E_VBMDamagedOverAlloc); + foundOverAlloc = true; + } + } + ++g->itemsProcessed; + } + + if (block.buffer) { + if (isHFSPlus) + (void) ReleaseFileBlock(fcb, &block, relOpt); + else + (void) ReleaseVolumeBlock(vcb, &block, relOpt | kSkipEndianSwap); + } + + return (0); +} + +/* Function: UpdateFreeBlockCount + * + * Description: Re-calculate the total bits marked in in-memory bitmap + * by traversing the entire bitmap. Update the total number of bits set in + * the in-memory volume bitmap and the volume free block count. + * + * All the bits representing the blocks that are beyond total allocation + * blocks of the volume are intialized to zero in the last bitmap segment. + * This function checks for bits marked, therefore we do not special case + * the last bitmap segment. + * + * Input: + * g - global scavenger structure pointer. + * + * Output: + * nothing (void) + */ +void UpdateFreeBlockCount(SGlobPtr g) +{ + int i; + UInt32 newBitsMarked = 0; + UInt32 bit; + UInt32 *buffer; + UInt32 curWord; + SVCB * vcb = g->calculatedVCB; + + /* Loop through all the bitmap segments */ + for (bit = 0; bit < gTotalBits; bit += kBitsPerSegment) { + (void) GetSegmentBitmap(bit, &buffer, kTestingBits); + + /* All bits in segment are set */ + if (buffer == gFullBitmapSegment) { + newBitsMarked += kBitsPerSegment; + continue; + } + + /* All bits in segment are clear */ + if (buffer == gEmptyBitmapSegment) { + continue; + } + + /* Segment is partially full */ + for (i = 0; i < kWordsPerSegment; i++) { + if (buffer[i] == kAllBitsSetInWord) { + newBitsMarked += kBitsPerWord; + } else { + curWord = SWAP_BE32(buffer[i]); + while (curWord) { + newBitsMarked += curWord & 1; + curWord >>= 1; + } + } + } + } + + /* Update total bits marked count for in-memory bitmap */ + if (gBitsMarked != newBitsMarked) { + gBitsMarked = newBitsMarked; + } + + /* Update volume free block count */ + if (vcb->vcbFreeBlocks != (vcb->vcbTotalBlocks - gBitsMarked)) { + vcb->vcbFreeBlocks = vcb->vcbTotalBlocks - gBitsMarked; + MarkVCBDirty(vcb); + } +} + +/* Function: FindContigClearedBitmapBits + * + * Description: Find contigous free bitmap bits (allocation blocks) from + * the in-memory volume bitmap. If found, the bits are not marked as + * used. + * + * The function traverses the entire in-memory volume bitmap. It keeps + * a count of contigous cleared bits and the first cleared bit seen in + * the current sequence. + * If it sees a set bit, it re-intializes the count to the number of + * blocks to be found and first cleared bit as zero. + * If it sees a cleared bit, it decrements the count of number of blocks + * to be found cleared. If the first cleared bit was set to zero, + * it initializes it with the current bit. If the count of number + * of blocks becomes zero, the function returns. + * + * The function takes care if the last bitmap segment is paritally used + * to represented the total number of allocation blocks. + * + * Input: + * 1. vcb - pointer to volume information + * 2. numBlocks - number of free contigous blocks + * 3. actualStartBlock - pointer to return the start block, if contigous + * free blocks found. + * + * Output: + * 1. actualStartBlock - pointer to return the start block, if contigous + * free blocks found. + * On success, returns zero. + * On failure, non-zero value + * ENOSPC - No contigous free blocks were found of given length + */ +static int FindContigClearedBitmapBits (SVCB *vcb, UInt32 numBlocks, UInt32 *actualStartBlock) +{ + int i, j; + int retval = ENOSPC; + UInt32 bit; + UInt32 *buffer; + UInt32 curWord; + UInt32 validBitsInSegment; /* valid bits remaining (considering totalBits) in segment */ + UInt32 validBitsInWord; /* valid bits remaining (considering totalBits) in word */ + UInt32 bitsRemain = numBlocks; /* total free bits more to search */ + UInt32 startBlock = 0; /* start bit for free bits sequence */ + + /* For all segments except the last segments, number of valid bits + * is always total number of bits represented by the segment + */ + validBitsInSegment = kBitsPerSegment; + + /* For all words except the last word, the number of valid bits + * is always total number of bits represented by the word + */ + validBitsInWord = kBitsPerWord; + + /* Loop through all the bitmap segments */ + for (bit = 0; bit < gTotalBits; bit += kBitsPerSegment) { + (void) GetSegmentBitmap(bit, &buffer, kTestingBits); + + /* If this is last segment, calculate valid bits remaining */ + if ((gTotalBits - bit) < kBitsPerSegment) { + validBitsInSegment = gTotalBits - bit; + } + + /* All bits in segment are set */ + if (buffer == gFullBitmapSegment) { + /* Reset our counters */ + startBlock = 0; + bitsRemain = numBlocks; + continue; + } + + /* All bits in segment are clear */ + if (buffer == gEmptyBitmapSegment) { + /* If startBlock is not initialized, initialize it */ + if (bitsRemain == numBlocks) { + startBlock = bit; + } + /* If the total number of required free blocks is greater than + * total number of blocks represented in one free segment, include + * entire segment in our count + * If the total number of required free blocks is less than the + * total number of blocks represented in one free segment, include + * only the remaining free blocks in the count and break out. + */ + if (bitsRemain > validBitsInSegment) { + bitsRemain -= validBitsInSegment; + continue; + } else { + bitsRemain = 0; + break; + } + } + + /* Segment is partially full */ + for (i = 0; i < kWordsPerSegment; i++) { + /* All bits in a word are set */ + if (buffer[i] == kAllBitsSetInWord) { + /* Reset our counters */ + startBlock = 0; + bitsRemain = numBlocks; + } else { + /* Not all bits in a word are set */ + + /* If this is the last segment, check if the current word + * is the last word containing valid bits. + */ + if (validBitsInSegment != kBitsPerSegment) { + if ((validBitsInSegment - (i * kBitsPerWord)) < kBitsPerWord) { + /* Calculate the total valid bits in last word */ + validBitsInWord = validBitsInSegment - (i * kBitsPerWord); + } + } + + curWord = SWAP_BE32(buffer[i]); + /* Check every bit in the word */ + for (j = 0; j < validBitsInWord; j++) { + if (curWord & kMSBBitSetInWord) { + /* The bit is set, reset our counters */ + startBlock = 0; + bitsRemain = numBlocks; + } else { + /* The bit is clear */ + if (bitsRemain == numBlocks) { + startBlock = bit + (i * kBitsPerWord) + j; + } + bitsRemain--; + if (bitsRemain == 0) { + goto out; + } + } + curWord <<= 1; + } /* for - checking bits set in word */ + + /* If this is last valid word, stop the search */ + if (validBitsInWord != kBitsPerWord) { + goto out; + } + } /* else - not all bits set in a word */ + } /* for - segment is partially full */ + } /* for - loop over all segments */ + +out: + if (bitsRemain == 0) { + /* Return the new start block found */ + *actualStartBlock = startBlock; + retval = 0; + } else { + *actualStartBlock = 0; + } + + return retval; +} + +/* Function: AllocateContigBitmapBits + * + * Description: Find contigous free bitmap bits (allocation blocks) from + * the in-memory volume bitmap. If found, also mark the bits as used. + * + * Input: + * 1. vcb - pointer to volume information + * 2. numBlocks - number of free contigous blocks + * 3. actualStartBlock - pointer to return the start block, if contigous + * free blocks found. + * + * Output: + * 1. actualStartBlock - pointer to return the start block, if contigous + * free blocks found. + * On success, returns zero. + * On failure, non-zero value + * ENOENT - No contigous free blocks were found of given length + * E_OvlExt - Free blocks found are already allocated (overlapping + * extent found). + */ +int AllocateContigBitmapBits (SVCB *vcb, UInt32 numBlocks, UInt32 *actualStartBlock) +{ + int error; + + error = FindContigClearedBitmapBits (vcb, numBlocks, actualStartBlock); + if (error == noErr) { + error = CaptureBitmapBits (*actualStartBlock, numBlocks); + } + + return error; +} + +enum { kMaxTrimExtents = 256 }; +dk_extent_t gTrimExtents[kMaxTrimExtents]; +dk_unmap_t gTrimData; + +static void TrimInit(void) +{ + bzero(&gTrimData, sizeof(gTrimData)); + gTrimData.extents = gTrimExtents; +} + +static void TrimFlush(void) +{ + int err; + + if (gTrimData.extentsCount == 0) + { + DPRINTF(d_info|d_trim, "TrimFlush: nothing to flush\n"); + return; + } + + err = ioctl(fsreadfd, DKIOCUNMAP, &gTrimData); + if (err == -1) + { + DPRINTF(d_error|d_trim, "TrimFlush: error %d\n", errno); + } + gTrimData.extentsCount = 0; +} + +static void TrimExtent(SGlobPtr g, UInt32 startBlock, UInt32 blockCount) +{ + UInt64 offset; + UInt64 length; + + DPRINTF(d_info|d_trim, "Trimming: startBlock=%10u, blockCount=%10u\n", startBlock, blockCount); + + offset = (UInt64) startBlock * g->calculatedVCB->vcbBlockSize; + if (VolumeObjectIsHFSPlus()) + offset += g->calculatedVCB->vcbEmbeddedOffset; + else + offset += g->calculatedVCB->vcbAlBlSt * 512ULL; + length = (UInt64) blockCount * g->calculatedVCB->vcbBlockSize; + + gTrimExtents[gTrimData.extentsCount].offset = offset; + gTrimExtents[gTrimData.extentsCount].length = length; + if (++gTrimData.extentsCount == kMaxTrimExtents) + TrimFlush(); +} + +/* Function: TrimFreeBlocks + * + * Description: Find contiguous ranges of free allocation blocks (cleared bits + * in the bitmap) and issue DKIOCUNMAP requests to tell the underlying device + * that those blocks are not in use. This allows the device to reclaim that + * space. + * + * Input: + * g - global scavenger structure pointer + */ +void TrimFreeBlocks(SGlobPtr g) +{ + UInt32 *buffer; + UInt32 bit; + UInt32 wordWithinSegment; + UInt32 bitWithinWordMask; + UInt32 currentWord; + UInt32 startBlock; + UInt32 blockCount; + UInt32 totalTrimmed = 0; + + TrimInit(); + + /* We haven't seen any free blocks yet. */ + startBlock = 0; + blockCount = 0; + + /* Loop through bitmap segments */ + for (bit = 0; bit < gTotalBits; /* bit incremented below */) { + assert((bit % kBitsPerSegment) == 0); + + (void) GetSegmentBitmap(bit, &buffer, kTestingBits); + + if (buffer == gFullBitmapSegment) { + /* + * There are no free blocks in this segment, so trim any previous + * extent (that ended at the end of the previous segment). + */ + if (blockCount != 0) { + TrimExtent(g, startBlock, blockCount); + totalTrimmed += blockCount; + blockCount = 0; + } + bit += kBitsPerSegment; + continue; + } + + if (buffer == gEmptyBitmapSegment) { + /* + * This entire segment is free. Add it to a previous extent, or + * start a new one. + */ + if (blockCount == 0) { + startBlock = bit; + } + if (gTotalBits - bit < kBitsPerSegment) { + blockCount += gTotalBits - bit; + } else { + blockCount += kBitsPerSegment; + } + bit += kBitsPerSegment; + continue; + } + + /* + * If we get here, the current segment has some free and some used + * blocks, so we have to iterate over them. + */ + for (wordWithinSegment = 0; + wordWithinSegment < kWordsPerSegment && bit < gTotalBits; + ++wordWithinSegment) + { + assert((bit % kBitsPerWord) == 0); + + currentWord = SWAP_BE32(buffer[wordWithinSegment]); + + /* Iterate over all the bits in the current word. */ + for (bitWithinWordMask = kMSBBitSetInWord; + bitWithinWordMask != 0 && bit < gTotalBits; + ++bit, bitWithinWordMask >>= 1) + { + if (currentWord & bitWithinWordMask) { + /* Found a used block. */ + if (blockCount != 0) { + TrimExtent(g, startBlock, blockCount); + totalTrimmed += blockCount; + blockCount = 0; + } + } else { + /* + * Found an unused block. Add it to the current extent, + * or start a new one. + */ + if (blockCount == 0) { + startBlock = bit; + } + ++blockCount; + } + } + } + } + if (blockCount != 0) { + TrimExtent(g, startBlock, blockCount); + totalTrimmed += blockCount; + blockCount = 0; + } + + TrimFlush(); + DPRINTF(d_info|d_trim, "Trimmed %u allocation blocks.\n", totalTrimmed); +} + +/* Function: IsTrimSupported + * + * Description: Determine whether the device we're verifying/repairing suppports + * trimming (i.e., whether it supports DKIOCUNMAP). + * + * Result: + * non-zero Trim supported + * zero Trim not supported + */ +int IsTrimSupported(void) +{ + int err; + uint32_t features = 0; + + err = ioctl(fsreadfd, DKIOCGETFEATURES, &features); + if (err < 0) + { + /* Can't tell if UNMAP is supported. Assume no. */ + return 0; + } + + return features & DK_FEATURE_UNMAP; +} + +/* + * BITMAP SEGMENT TREE + * + * A binary search tree is used to store bitmap segments that are + * partially full. If a segment does not exist in the tree, it + * can be assumed to be in the following state: + * 1. Full if the coresponding segment map bit is set + * 2. Empty (implied) + */ + +static int +BMS_InitTree(void) +{ + gBMS_PoolCount = 0; + BMS_GrowNodePool(); + + gBMS_Root = gBMS_FreeNodes; + gBMS_FreeNodes = gBMS_FreeNodes->right; + gBMS_Root->right = NULL; + + return (0); +} + + +static int +BMS_DisposeTree(void) +{ + while(gBMS_PoolCount > 0) + free(gBMS_PoolList[--gBMS_PoolCount]); + + gBMS_Root = gBMS_FreeNodes = 0; + return (0); +} + + +static BMS_Node * +BMS_Lookup(UInt32 segment) +{ + BMS_Node *ptree = gBMS_Root; + + while (ptree && ptree->segment != segment) { + + if (segment > ptree->segment) + ptree = ptree->right; + else + ptree = ptree->left; + } + + return ((BMS_Node *)ptree); +} + + +static BMS_Node * +BMS_InsertTree(BMS_Node *NewEntry) +{ + BMS_Node *ptree; + register UInt32 segment; + + segment = NewEntry->segment; + ptree = gBMS_Root; + if (ptree == (BMS_Node *)NULL) { + gBMS_Root = NewEntry; + return (NewEntry); + } + + while (ptree) { + if (segment > ptree->segment) { /* walk the right sub-tree */ + if (ptree->right) + ptree = ptree->right; + else { + ptree->right = NewEntry; + return (ptree); + } + } + else { /* walk the left sub-tree */ + if (ptree->left) + ptree = ptree->left; + else { + ptree->left = NewEntry; + return (ptree); + } + } + } + + return ((BMS_Node *)NULL); +} + + +/* insert a new segment into the tree */ +static BMS_Node * +BMS_Insert(UInt32 segment, int segmentType) +{ + BMS_Node *new; + + if ((new = gBMS_FreeNodes) == NULL) { + BMS_GrowNodePool(); + if ((new = gBMS_FreeNodes) == NULL) + return ((BMS_Node *)NULL); + } + + gBMS_FreeNodes = gBMS_FreeNodes->right; + + ++gSegmentNodes; /* debugging stats */ + + new->right = NULL; + new->segment = segment; + if (segmentType == kFullSegment) + bcopy(gFullBitmapSegment, new->bitmap, kBytesPerSegment); + else + bzero(new->bitmap, sizeof(new->bitmap)); + + if (BMS_InsertTree(new) != NULL) + return (new); + else + return ((BMS_Node *)NULL); +} + + +static BMS_Node * +BMS_Delete(UInt32 segment) +{ + BMS_Node *seg_found, *pprevious, *pnext, *pnextl, *psub; + + pprevious = NULL; + seg_found = gBMS_Root; + + /* don't allow the root to be deleted! */ + if (seg_found->segment == segment) + return ((BMS_Node *)NULL); + + while (seg_found && seg_found->segment != segment) { + pprevious = seg_found; + if (segment > seg_found->segment) + seg_found = seg_found->right; + else + seg_found = seg_found->left; + } + + if (seg_found) { + /* + * we found the entry, now reorg the sub-trees + * spanning from our node. + */ + if ((pnext = seg_found->right)) { + /* + * Tree pruning: take the left branch of the + * current node and place it at the lowest + * left branch of the current right branch + */ + psub = pnext; + + /* walk the Right/Left sub tree from current node */ + while ((pnextl = psub->left)) + psub = pnextl; + + /* plug the old left tree to the new ->Right leftmost node */ + psub->left = seg_found->left; + } else { /* only left sub-tree, simple case */ + pnext = seg_found->left; + } + /* + * Now, plug the current node sub tree to + * the good pointer of our parent node. + */ + if (pprevious->left == seg_found) + pprevious->left = pnext; + else + pprevious->right = pnext; + + /* add node back to the free-list */ + bzero(seg_found, sizeof(BMS_Node)); + seg_found->right = gBMS_FreeNodes; + gBMS_FreeNodes = seg_found; + } + + return (seg_found); +} + + +static void +BMS_GrowNodePool(void) +{ + BMS_Node *nodePool; + short i; + + if (gBMS_PoolCount > kBMS_PoolMax) + return; + + nodePool = (BMS_Node *)malloc(sizeof(BMS_Node) * kBMS_NodesPerPool); + if (nodePool != NULL) { + bzero(&nodePool[0], sizeof(BMS_Node) * kBMS_NodesPerPool); + for (i = 1 ; i < kBMS_NodesPerPool ; i++) { + (&nodePool[i-1])->right = &nodePool[i]; + } + + gBMS_FreeNodes = &nodePool[0]; + gBMS_PoolList[gBMS_PoolCount++] = nodePool; + } +} + + +#if _VBC_DEBUG_ +static void +BMS_MaxDepth(BMS_Node * root, int depth, int *maxdepth) +{ + if (root) { + depth++; + if (depth > *maxdepth) + *maxdepth = depth; + BMS_MaxDepth(root->left, depth, maxdepth); + BMS_MaxDepth(root->right, depth, maxdepth); + } +} + +static void +BMS_PrintTree(BMS_Node * root) +{ + if (root) { + BMS_PrintTree(root->left); + plog("seg %d\n", root->segment); + BMS_PrintTree(root->right); + } +} +#endif + diff --git a/fsck_hfs/dfalib/dirhardlink.c b/fsck_hfs/dfalib/dirhardlink.c new file mode 100644 index 0000000..ed1b0ed --- /dev/null +++ b/fsck_hfs/dfalib/dirhardlink.c @@ -0,0 +1,1537 @@ +/* + * Copyright (c) 2007-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "Scavenger.h" +#include "SRuntime.h" +#include <sys/stat.h> +#include <ctype.h> + +/* Looks up a catalog file/folder record for given file/folder ID. + * The functionality of this routine is same as GetCatalogRecord() in + * dfalib/SRepair.c, but this implementation is better because it does not + * change the lastIterator stored in the catalog BTreeControlBlock. + * Therefore this function does not interfere with other catalog btree + * iterations. + */ +OSErr GetCatalogRecordByID(SGlobPtr GPtr, UInt32 file_id, Boolean isHFSPlus, CatalogKey *key, CatalogRecord *rec, uint16_t *recsize) +{ + int retval = 0; + SFCB *fcb; + BTreeControlBlock *btcb; + FSBufferDescriptor buf_desc; + BTreeIterator search_iterator; + BTreeIterator result_iterator; + uint32_t thread_key_parentID = 0; + + fcb = GPtr->calculatedCatalogFCB; + btcb = (BTreeControlBlock *)fcb->fcbBtree; + + /* Lookup the thread record with given file/folderID */ + bzero(&buf_desc, sizeof(buf_desc)); + bzero(&search_iterator, sizeof(search_iterator)); + buf_desc.bufferAddress = rec; + buf_desc.itemCount = 1; + buf_desc.itemSize = sizeof(CatalogRecord); + + BuildCatalogKey(file_id, NULL, isHFSPlus, (CatalogKey *)&(search_iterator.key)); + retval = BTSearchRecord(fcb, &search_iterator, kInvalidMRUCacheKey, + &buf_desc, recsize, &result_iterator); + if (retval) { + goto out; + } + + /* Check if really we found a thread record */ + if (isHFSPlus) { + if ((rec->recordType != kHFSPlusFolderThreadRecord) && + (rec->recordType != kHFSPlusFileThreadRecord)) { + retval = ENOENT; + goto out; + } + } else { + if ((rec->recordType != kHFSFolderThreadRecord) && + (rec->recordType != kHFSFileThreadRecord)) { + retval = ENOENT; + goto out; + } + } + + if (isHFSPlus) { + thread_key_parentID = ((CatalogKey *)&(result_iterator.key))->hfsPlus.parentID; + } + + /* Lookup the corresponding file/folder record */ + bzero(&buf_desc, sizeof(buf_desc)); + bzero(&search_iterator, sizeof(search_iterator)); + buf_desc.bufferAddress = rec; + buf_desc.itemCount = 1; + buf_desc.itemSize = sizeof(CatalogRecord); + + if (isHFSPlus) { + BuildCatalogKey(rec->hfsPlusThread.parentID, + (CatalogName *)&(rec->hfsPlusThread.nodeName), + isHFSPlus, (CatalogKey *)&(search_iterator.key)); + } else { + BuildCatalogKey(rec->hfsThread.parentID, + (CatalogName *)&(rec->hfsThread.nodeName), + isHFSPlus, (CatalogKey *)&(search_iterator.key)); + } + retval = BTSearchRecord(fcb, &search_iterator, kInvalidMRUCacheKey, + &buf_desc, recsize, &result_iterator); + if (retval) { + goto out; + } + + bcopy(&(result_iterator.key), key, CalcKeySize(btcb, &(result_iterator.key))); + + if (isHFSPlus) { + /* For catalog file or folder record, the parentID in the thread + * record's key should be equal to the fileID in the file/folder + * record --- which is equal to the ID of the file/folder record + * that is being looked up. If not, mark the volume for repair. + */ + if (thread_key_parentID != rec->hfsPlusFile.fileID) { + RcdError(GPtr, E_IncorrectNumThdRcd); + if (fsckGetVerbosity(GPtr->context) >= kDebugLog) { + plog("\t%s: fileID=%u, thread.key.parentID=%u, record.fileID=%u\n", + __FUNCTION__, file_id, thread_key_parentID, rec->hfsPlusFile.fileID); + } + GPtr->CBTStat |= S_Orphan; + } + } + +out: + return retval; +} + +/* Record minor repair order for invalid permissions for directory hardlink priv dir */ +static int record_privdir_bad_perm(SGlobPtr gptr, uint32_t cnid) +{ + RepairOrderPtr p; + + RcdError (gptr, E_BadPermPrivDir); + p = AllocMinorRepairOrder(gptr, 0); + if (p == NULL) { + return ENOMEM; + } + + p->type = E_BadPermPrivDir; + p->parid = cnid; + gptr->CatStat |= S_LinkErrRepair; + + return 0; +} + +/* Record minor repair order for invalid flags for file/directory hard links */ +int record_link_badflags(SGlobPtr gptr, uint32_t link_id, Boolean isdir, + uint32_t incorrect, uint32_t correct) +{ + RepairOrderPtr p; + char str1[12]; + char str2[12]; + + fsckPrint(gptr->context, isdir? E_DirLinkBadFlags : E_FileLinkBadFlags, link_id); + snprintf(str1, sizeof(str1), "0x%x", correct); + snprintf(str2, sizeof(str2), "0x%x", incorrect); + fsckPrint(gptr->context, E_BadValue, str1, str2); + + p = AllocMinorRepairOrder(gptr, 0); + if (p == NULL) { + return ENOMEM; + } + + p->type = isdir ? E_DirLinkBadFlags : E_FileLinkBadFlags; + p->correct = correct; + p->incorrect = incorrect; + p->parid = link_id; + + gptr->CatStat |= S_LinkErrRepair; + + return 0; +} + +/* Record minor repair order for invalid flags for file/directory inode + * If a corruption is recorded during verification, do not check for + * duplicates as none should exist. If this corruption is recorded + * during repair, check for duplicates because before early termination + * of verification we might have seen this corruption. + */ +int record_inode_badflags(SGlobPtr gptr, uint32_t inode_id, Boolean isdir, + uint32_t incorrect, uint32_t correct, Boolean check_duplicates) +{ + RepairOrderPtr p; + char str1[12]; + char str2[12]; + + p = AllocMinorRepairOrder(gptr, 0); + if (p == NULL) { + return ENOMEM; + } + + p->type = isdir ? E_DirInodeBadFlags : E_FileInodeBadFlags; + p->correct = correct; + p->incorrect = incorrect; + p->parid = inode_id; + + gptr->CatStat |= S_LinkErrRepair; + + if ((check_duplicates != 0) && + (IsDuplicateRepairOrder(gptr, p) == 1)) { + DeleteRepairOrder(gptr, p); + } else { + fsckPrint(gptr->context, isdir? E_DirInodeBadFlags : E_FileInodeBadFlags, inode_id); + snprintf(str1, sizeof(str1), "0x%x", correct); + snprintf(str2, sizeof(str2), "0x%x", incorrect); + fsckPrint(gptr->context, E_BadValue, str1, str2); + } + + return 0; +} + +/* Record minor repair order for invalid parent of directory/file inode */ +/* XXX -- not repaired yet (file or directory) */ +static int record_inode_badparent(SGlobPtr gptr, uint32_t inode_id, Boolean isdir, + uint32_t incorrect, uint32_t correct) +{ + char str1[12]; + char str2[12]; + + fsckPrint(gptr->context, isdir? E_DirInodeBadParent : E_FileInodeBadParent, inode_id); + snprintf(str1, sizeof(str1), "%u", correct); + snprintf(str2, sizeof(str2), "%u", incorrect); + fsckPrint(gptr->context, E_BadValue, str1, str2); + + gptr->CatStat |= S_LinkErrNoRepair; + + return 0; +} + +/* Record minor repair order for invalid name of directory inode */ +/* XXX - not repaired yet (file or directory) */ +static int record_inode_badname(SGlobPtr gptr, uint32_t inode_id, + char *incorrect, char *correct) +{ + fsckPrint(gptr->context, E_DirInodeBadName, inode_id); + fsckPrint(gptr->context, E_BadValue, correct, incorrect); + + gptr->CatStat |= S_LinkErrNoRepair; + + return 0; +} + +/* Record corruption for incorrect number of directory hard links and + * directory inode, and invalid list of directory hard links + */ +void record_link_badchain(SGlobPtr gptr, Boolean isdir) +{ + int fval = (isdir ? S_DirHardLinkChain : S_FileHardLinkChain); + int err = (isdir ? E_DirHardLinkChain : E_FileHardLinkChain); + if ((gptr->CatStat & fval) == 0) { + fsckPrint(gptr->context, err); + gptr->CatStat |= fval; + } +} + +/* Record minor repair for invalid ownerflags for directory hard links. + * If corruption is recorded during verification, do not check for + * duplicates as none should exist. If this corruption is recorded + * during repair, check for duplicates because before early termination + * of verification, we might have seen this corruption. + */ +int record_dirlink_badownerflags(SGlobPtr gptr, uint32_t file_id, + uint8_t incorrect, uint8_t correct, int check_duplicates) +{ + RepairOrderPtr p; + char str1[12]; + char str2[12]; + + p = AllocMinorRepairOrder(gptr, 0); + if (p == NULL) { + return ENOMEM; + } + + p->type = E_DirHardLinkOwnerFlags; + p->correct = correct; + p->incorrect = incorrect; + p->parid = file_id; + + gptr->CatStat |= S_LinkErrRepair; + + if ((check_duplicates != 0) && + (IsDuplicateRepairOrder(gptr, p) == 1)) { + DeleteRepairOrder(gptr, p); + } else { + fsckPrint(gptr->context, E_DirHardLinkOwnerFlags, file_id); + snprintf(str1, sizeof(str1), "0x%x", correct); + snprintf(str2, sizeof(str2), "0x%x", incorrect); + fsckPrint(gptr->context, E_BadValue, str1, str2); + } + + return 0; +} + +/* Record minor repair for invalid finderInfo for directory hard links */ +int record_link_badfinderinfo(SGlobPtr gptr, uint32_t file_id, Boolean isdir) +{ + RepairOrderPtr p; + + p = AllocMinorRepairOrder(gptr, 0); + if (p == NULL) { + return ENOMEM; + } + + p->type = isdir ? E_DirHardLinkFinderInfo : E_FileHardLinkFinderInfo; + p->parid = file_id; + + gptr->CatStat |= (isdir ? S_DirHardLinkChain : S_FileHardLinkChain); + + /* Recording this corruption is being called from both + * inode_check() and dirlink_check(). It is possible that + * the error we are adding is a duplicate error. Check for + * duplicates, and if any duplicates are found delete the new + * repair order. + */ + if (IsDuplicateRepairOrder(gptr, p) == 1) { + DeleteRepairOrder(gptr, p); + } else { + fsckPrint(gptr->context, p->type, file_id); + } + + return 0; +} + +/* Record minor repair for invalid flags in one of the parent directories + * of a directory hard link. + */ +static int record_parent_badflags(SGlobPtr gptr, uint32_t dir_id, + uint32_t incorrect, uint32_t correct) +{ + RepairOrderPtr p; + char str1[12]; + char str2[12]; + + p = AllocMinorRepairOrder(gptr, 0); + if (p == NULL) { + return ENOMEM; + } + + p->type = E_DirLinkAncestorFlags; + p->correct = correct; + p->incorrect = incorrect; + p->parid = dir_id; + + gptr->CatStat |= S_LinkErrRepair; + + /* This corruption is logged when traversing ancestors of all + * directory hard links. Therefore common corrupt ancestors of + * directory hard link will result in duplicate repair orders. + * Check for duplicates, and if any duplicates are found delete + * the new repair order. + */ + if (IsDuplicateRepairOrder(gptr, p) == 1) { + DeleteRepairOrder(gptr, p); + } else { + fsckPrint(gptr->context, E_DirLinkAncestorFlags, dir_id); + snprintf(str1, sizeof(str1), "0x%x", correct); + snprintf(str2, sizeof(str2), "0x%x", incorrect); + fsckPrint(gptr->context, E_BadValue, str1, str2); + } + + return 0; +} + +/* Look up the ".HFS+ Private Directory Data\xd" directory */ +static int priv_dir_lookup(SGlobPtr gptr, CatalogKey *key, CatalogRecord *rec) +{ + int i; + int retval; + char *dirname = HFSPLUS_DIR_METADATA_FOLDER; + CatalogName cat_dirname; + uint16_t recsize; + uint32_t hint; + + /* Look up the catalog btree record for the private metadata directory */ + cat_dirname.ustr.length = strlen(dirname); + for (i = 0; i < cat_dirname.ustr.length; i++) { + cat_dirname.ustr.unicode[i] = (u_int16_t) dirname[i]; + } + BuildCatalogKey(kHFSRootFolderID, &cat_dirname, true, key); + retval = SearchBTreeRecord (gptr->calculatedCatalogFCB, key, kNoHint, + NULL, rec, &recsize, &hint); + return retval; +} + +/* This function initializes the directory hard link check by looking up + * private directory that stores directory inodes. + */ +int dirhardlink_init(SGlobPtr gptr) +{ + int retval = 0; + CatalogRecord rec; + CatalogKey key; + + /* Check if the volume is HFS+. */ + if (VolumeObjectIsHFSPlus() == false) { + goto out; + } + + /* Look up the private metadata directory */ + retval = priv_dir_lookup(gptr, &key, &rec); + if (retval == 0) { + gptr->dirlink_priv_dir_id = rec.hfsPlusFolder.folderID; + gptr->dirlink_priv_dir_valence = rec.hfsPlusFolder.valence; + } else { + gptr->dirlink_priv_dir_id = 0; + gptr->dirlink_priv_dir_valence = 0; + } + + retval = 0; + +out: + return retval; +} + +/* Check the private directory for directory hard links */ +static void dirlink_priv_dir_check(SGlobPtr gptr, HFSPlusCatalogFolder *rec, + HFSPlusCatalogKey *key) +{ + /* ownerFlags should have UF_IMMUTABLE and UF_HIDDEN set, and + fileMode should have S_ISVTX set */ + if (((rec->bsdInfo.ownerFlags & UF_IMMUTABLE) == 0) || + //(((rec->bsdInfo.adminFlags << 16) & UF_HIDDEN) == 0) || + ((rec->bsdInfo.fileMode & S_ISVTX) == 0)) { + record_privdir_bad_perm(gptr, rec->folderID); + } +} + +/* Get the first link ID information for a hard link inode. + * For directory inodes, we get it from the extended attribute + * of the directory inode; for files, we get it from hl_firstLinkID + * Returns - zero if the lookup succeeded with the first link ID + * in the pointer provided, and non-zero if the extended attribute + * does not exist, or any other error encountered during lookup. + */ +int get_first_link_id(SGlobPtr gptr, CatalogRecord *inode_rec, uint32_t inode_id, + Boolean isdir, uint32_t *first_link_id) +{ + int retval = 0; + int i; + BTreeIterator iterator; + FSBufferDescriptor bt_data; + HFSPlusAttrData *rec; + HFSPlusAttrKey *key; + u_int8_t attrdata[FIRST_LINK_XATTR_REC_SIZE]; + size_t unicode_bytes = 0; + + bzero(&iterator, sizeof(iterator)); + + if (isdir) { + /* Create key for the required attribute */ + key = (HFSPlusAttrKey *)&iterator.key; + utf_decodestr((unsigned char *)FIRST_LINK_XATTR_NAME, + strlen(FIRST_LINK_XATTR_NAME), key->attrName, + &unicode_bytes, sizeof(key->attrName)); + key->attrNameLen = unicode_bytes / sizeof(UniChar); + key->keyLength = kHFSPlusAttrKeyMinimumLength + unicode_bytes; + key->pad = 0; + key->fileID = inode_id; + key->startBlock = 0; + + rec = (HFSPlusAttrData *)&attrdata[0]; + bt_data.bufferAddress = rec; + bt_data.itemSize = sizeof(attrdata); + bt_data.itemCount = 1; + + retval = BTSearchRecord(gptr->calculatedAttributesFCB, &iterator, kNoHint, + &bt_data, NULL, NULL); + if (retval == 0) { + /* Attribute should be an inline attribute */ + if (rec->recordType != kHFSPlusAttrInlineData) { + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tfirst link EA is not inline for dirinode=%u (found=0x%x)\n", inode_id, rec->recordType); + } + retval = ENOENT; + goto out; + } + + /* Attribute data should be null terminated, attrSize includes + * size of the attribute data including the null termination. + */ + if (rec->attrData[rec->attrSize-1] != '\0') { + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tfirst link EA attrData is not NULL terminated for dirinode=%u\n", inode_id); + } + retval = ENOENT; + goto out; + } + + /* All characters are numbers in the attribute data */ + for (i = 0; i < rec->attrSize-1; i++) { + if (isdigit(rec->attrData[i]) == 0) { + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tfirst link EA attrData contains non-digit 0x%x for dirinode=%u\n", rec->attrData[i], inode_id); + } + retval = ENOENT; + goto out; + } + } + + *first_link_id = strtoul((char *)&rec->attrData[0], NULL, 10); + if (*first_link_id < kHFSFirstUserCatalogNodeID) { + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tfirst link ID=%u is < 16 for dirinode=%u\n", *first_link_id, inode_id); + } + *first_link_id = 0; + retval = ENOENT; + goto out; + } + } + } else { + *first_link_id = 0; + if ((inode_rec != NULL) && + (inode_rec->recordType == kHFSPlusFileRecord)) { + *first_link_id = inode_rec->hfsPlusFile.hl_firstLinkID; + if (*first_link_id < kHFSFirstUserCatalogNodeID) { + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog("\tfirst link ID=%u is < 16 for fileinode=%u\n", *first_link_id, inode_id); + } + *first_link_id = 0; + retval = ENOENT; + goto out; + } + } else { + CatalogRecord rec; + CatalogKey key; + uint16_t recsize; + + /* No record or bad record provided, look it up */ + retval = GetCatalogRecordByID(gptr, inode_id, true, &key, &rec, &recsize); + if (retval == 0) { + *first_link_id = rec.hfsPlusFile.hl_firstLinkID; + if (rec.recordType != kHFSPlusFileRecord || + *first_link_id < kHFSFirstUserCatalogNodeID) { + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog("\tfirst link ID=%u is < 16 for fileinode=%u\n", *first_link_id, inode_id); + } + *first_link_id = 0; + retval = ENOENT; + } + } else { + *first_link_id = 0; + retval = ENOENT; + } + } + } + +out: + return retval; +} + +/* Adds the directory inode, and directory hard link pair to the + * prime remainder bucket provided. This is based on Chinese Remainder + * Theorem, and the buckets are later compared to find if the directory + * hard link chains for all directory inodes are valid. + */ +void hardlink_add_bucket(PrimeBuckets *bucket, uint32_t inode_id, + uint32_t cur_link_id) +{ + uint64_t num; + + num = ((uint64_t)inode_id << 32) | cur_link_id; + + add_prime_bucket_uint64(bucket, num); +} + +/* Structure to store the directory hard link IDs found during doubly linked + * list traversal in inode_check() + */ +struct link_list { + uint32_t link_id; + struct link_list *next; +}; + +/* Verifies the inode record. Validates if the flags are set + * correctly, parent is the private metadata directory, first link ID + * is stored correctly, and the doubly linked * list of hard links is valid. + * + * Returns - + * zero - if no corruption is detected, or the corruption detected is + * such that a repair order can be created. + * non-zero - if the corruption detected requires complete knowledge of + * all the related directory hard links to suggest repair. + */ +int inode_check(SGlobPtr gptr, PrimeBuckets *bucket, + CatalogRecord *rec, CatalogKey *key, Boolean isdir) +{ + int retval = 0; + uint32_t inode_id; + uint32_t cur_link_id; + uint32_t prev_link_id; + uint32_t count; + uint32_t linkCount; + char calc_name[32]; + char found_name[NAME_MAX]; + size_t calc_len; + size_t found_len; + CatalogKey linkkey; + CatalogRecord linkrec; + uint16_t recsize; + int flags; + uint32_t parentid; + uint32_t link_ref_num = 0; + + struct link_list *head = NULL; + struct link_list *cur; + + (void) utf_encodestr(key->hfsPlus.nodeName.unicode, key->hfsPlus.nodeName.length * 2, + (unsigned char *)found_name, &found_len, NAME_MAX); + found_name[found_len] = '\0'; + + if (isdir) { + inode_id = rec->hfsPlusFolder.folderID; + flags = rec->hfsPlusFolder.flags; + linkCount = rec->hfsPlusFolder.bsdInfo.special.linkCount; + parentid = gptr->dirlink_priv_dir_id; + } else { + inode_id = rec->hfsPlusFile.fileID; + flags = rec->hfsPlusFile.flags; + linkCount = rec->hfsPlusFile.bsdInfo.special.linkCount; + parentid = gptr->filelink_priv_dir_id; + link_ref_num = strtoul(&found_name[strlen(HFS_INODE_PREFIX)], NULL, 10); + } + + /* inode should only reside in its corresponding private directory */ + if ((parentid != 0) && (key->hfsPlus.parentID != parentid)) { + (void) record_inode_badparent(gptr, inode_id, isdir, key->hfsPlus.parentID, parentid); + } + + /* Compare the names for directory inode only because the names + * of file inodes can have random number suffixed. + */ + if (isdir) { + (void) snprintf(calc_name, sizeof(calc_name), "%s%u", HFS_DIRINODE_PREFIX, inode_id); + calc_len = strlen(calc_name); + + if ((found_len != calc_len) || + (strncmp(calc_name, found_name, calc_len) != 0)) { + (void) record_inode_badname(gptr, inode_id, found_name, + calc_name); + } + } + + /* At least one hard link should always point at an inode. */ + if (linkCount == 0) { + record_link_badchain(gptr, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tlinkCount=0 for dirinode=%u\n", inode_id); + } + retval = 1; + goto out; + } + + /* A directory inode should always have kHFSHasLinkChainBit + * set. A file inode created on pre-Leopard OS does not have + * kHFSHasLinkChainBit set and firstLinkID is zero. Therefore + * ignore such file inodes from CRT check and instead add the + * the inode to hash used for checking link count. + */ + if ((flags & kHFSHasLinkChainMask) == 0) { + if ((isdir) || (!isdir && (rec->hfsPlusFile.hl_firstLinkID != 0))) { + (void) record_inode_badflags(gptr, inode_id, isdir, + flags, flags | kHFSHasLinkChainMask, false); + } else { + filelink_hash_inode(link_ref_num, linkCount); + retval = 0; + goto out; + } + } + + /* Lookup the ID of first link from the extended attribute */ + retval = get_first_link_id(gptr, rec, inode_id, isdir, &cur_link_id); + if (retval) { + record_link_badchain(gptr, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tError getting first link ID for inode=%u\n", inode_id); + } + goto out; + } + + /* Check doubly linked list of hard links that point to this inode */ + prev_link_id = 0; + count = 0; + + while (cur_link_id != 0) { + /* Lookup the current directory link record */ + retval = GetCatalogRecordByID(gptr, cur_link_id, true, + &linkkey, &linkrec, &recsize); + if (retval) { + record_link_badchain(gptr, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tError getting link=%u for inode=%u\n", cur_link_id, inode_id); + } + goto out; + } + + /* Hard link is a file record */ + if (linkrec.recordType != kHFSPlusFileRecord) { + record_link_badchain(gptr, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tIncorrect record type for link=%u for inode=%u (expected=2, found=%u)\n", cur_link_id, inode_id, linkrec.recordType); + } + retval = 1; + goto out; + } + + /* Hard link should have hard link bit set */ + if ((linkrec.hfsPlusFile.flags & kHFSHasLinkChainMask) == 0) { + (void) record_link_badchain(gptr, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tIncorrect flag for link=%u for inode=%u (found=0x%x)\n", cur_link_id, inode_id, linkrec.hfsPlusFile.flags); + } + retval = 1; + goto out; + } + + if (isdir) { + /* Check if the hard link has correct finder info */ + if ((linkrec.hfsPlusFile.userInfo.fdType != kHFSAliasType) || + (linkrec.hfsPlusFile.userInfo.fdCreator != kHFSAliasCreator) || + ((linkrec.hfsPlusFile.userInfo.fdFlags & kIsAlias) == 0)) { + record_link_badfinderinfo(gptr, linkrec.hfsPlusFile.fileID, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog("\tdirlink: fdType = 0x%08lx, fdCreator = 0x%08lx\n", + (unsigned long)linkrec.hfsPlusFile.userInfo.fdType, + (unsigned long)linkrec.hfsPlusFile.userInfo.fdCreator); + } + } + + /* Check if hard link points to the current inode */ + if (linkrec.hfsPlusFile.hl_linkReference != inode_id) { + record_link_badchain(gptr, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tIncorrect dirinode ID for dirlink=%u (expected=%u, found=%u)\n", cur_link_id, inode_id, linkrec.hfsPlusFile.hl_linkReference); + } + retval = 1; + goto out; + } + + } else { + /* Check if the hard link has correct finder info */ + if ((linkrec.hfsPlusFile.userInfo.fdType != kHardLinkFileType) || + (linkrec.hfsPlusFile.userInfo.fdCreator != kHFSPlusCreator)) { + record_link_badfinderinfo(gptr, linkrec.hfsPlusFile.fileID, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog("\tfilelink: fdType = 0x%08lx, fdCreator = 0x%08lx\n", + (unsigned long)linkrec.hfsPlusFile.userInfo.fdType, + (unsigned long)linkrec.hfsPlusFile.userInfo.fdCreator); + } + } + + /* Check if hard link has correct link reference number */ + if (linkrec.hfsPlusFile.hl_linkReference != link_ref_num) { + record_link_badchain(gptr, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tIncorrect link reference number for filelink=%u (expected=%u, found=%u)\n", cur_link_id, inode_id, linkrec.hfsPlusFile.hl_linkReference); + } + retval = 1; + goto out; + } + } + + /* For directory hard links, add the directory inode ID and + * the current link ID pair to the prime bucket. For file + * hard links, add the link reference number and current + * link ID pair to the prime bucket. + */ + if (isdir) { + hardlink_add_bucket(bucket, inode_id, cur_link_id); + } else { + hardlink_add_bucket(bucket, link_ref_num, cur_link_id); + } + + /* Check the previous directory hard link */ + if (prev_link_id != linkrec.hfsPlusFile.hl_prevLinkID) { + record_link_badchain(gptr, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tIncorrect prevLinkID for link=%u for inode=%u (expected=%u, found=%u)\n", cur_link_id, inode_id, prev_link_id, linkrec.hfsPlusFile.hl_prevLinkID); + } + retval = 1; + goto out; + } + + /* Check if we saw this directory hard link previously */ + cur = head; + while (cur) { + if (cur->link_id == cur_link_id) { + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tDuplicate link=%u found in list for inode=%u\n", cur_link_id, inode_id); + } + record_link_badchain(gptr, isdir); + retval = 1; + goto out; + } + cur = cur->next; + } + + /* Add the new unique directory hard link to our list */ + cur = malloc(sizeof(struct link_list)); + if (!cur) { + retval = ENOMEM; + goto out; + } + cur->link_id = cur_link_id; + cur->next = head; + head = cur; + + count++; + prev_link_id = cur_link_id; + cur_link_id = linkrec.hfsPlusFile.hl_nextLinkID; + } + + /* If the entire chain looks good, match the link count */ + if (linkCount != count) { + record_link_badchain(gptr, isdir); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tIncorrect linkCount for inode=%u (expected=%u, found=%u)\n", inode_id, count, linkCount); + } + retval = 1; + goto out; + } + +out: + /* Free memory used for checking duplicates in the doubly linked list */ + while(head) { + cur = head; + head = head->next; + free(cur); + } + + return retval; +} + + +/* Check if the parent ancestors starting at the given directory has + * the kHFSHasChildLinkBit set. This bit indicates that a descendant of + * this directory is a directory hard link. Note that the root folder + * and the "private directory data" directory does not have this bit + * set, and the check stops as soon as we encounter one of these + * directories. + */ +static void check_dirlink_ancestors(SGlobPtr gptr, uint32_t dir_id) +{ + int retval = 0; + CatalogRecord rec; + CatalogKey key; + uint16_t recsize; + + while ((dir_id != kHFSRootFolderID) && (dir_id != gptr->dirlink_priv_dir_id)) { + retval = GetCatalogRecordByID(gptr, dir_id, true, &key, &rec, &recsize); + if (retval != 0) { + break; + } + + if (rec.recordType != kHFSPlusFolderRecord) { + break; + } + + if ((rec.hfsPlusFolder.flags & kHFSHasChildLinkMask) == 0) { + (void) record_parent_badflags(gptr, dir_id, + rec.hfsPlusFolder.flags, + rec.hfsPlusFolder.flags | kHFSHasChildLinkMask); + } + + dir_id = key.hfsPlus.parentID; + } + + /* If there was any problem in looking up parent directory, + * the catalog check should have also detected the problem. + * But there are cases which are not detected like names in + * thread record and file/folder record key do not match. + * Therefore force repair for incorrect number of thread + * records if lookup fails. + */ + if ((dir_id != kHFSRootFolderID) && (dir_id != gptr->dirlink_priv_dir_id)) { + fsckPrint(gptr->context, E_BadParentHierarchy, dir_id); + gptr->CBTStat |= S_Orphan; + } + + return; +} + +/* Verifies the directory hard link record. Validates if the flags are set + * correctly, the finderInfo fields are correct, and if the parent hierarchy + * till the root folder (except the root folder) has the kHFSHasChildLinkBit + * set correctly. This function also add the directory inode, and the + * directory hard link pair to the prime buckets for comparison later. + * + * This function does not verify the first and the next directory hard link + * pointers in the doubly linked list because the check is already done + * in directory inode check (inode_check()) . Any orphan directory + * hard link will also be detected later by the prime bucket comparison. + */ +static void dirlink_check(SGlobPtr gptr, PrimeBuckets *bucket, + HFSPlusCatalogFile *rec, HFSPlusCatalogKey *key, Boolean isdir) +{ + /* Add this directory hard link and corresponding inode number pair + * to prime buckets + */ +#if DEBUG_HARDLINKCHECK + if (fsckGetVerbosity(gptr->context) >= kDebugLog) + plog("link_check: adding <%u, %u>\n", rec->hl_linkReference, rec->fileID); +#endif + + hardlink_add_bucket(bucket, rec->hl_linkReference, rec->fileID); + + /* Check if the directory hard link has UF_IMMUTABLE bit set */ + if ((rec->bsdInfo.ownerFlags & UF_IMMUTABLE) == 0) { + record_dirlink_badownerflags(gptr, rec->fileID, + rec->bsdInfo.ownerFlags, + rec->bsdInfo.ownerFlags | UF_IMMUTABLE, false); + } + + /* Check Finder Info */ + if ((rec->userInfo.fdType != kHFSAliasType) || + (rec->userInfo.fdCreator != kHFSAliasCreator) || + ((rec->userInfo.fdFlags & kIsAlias) == 0)) { + record_link_badfinderinfo(gptr, rec->fileID, isdir); + } + + /* XXX - Check resource fork/alias data */ + + /* Check if all the parent directories have the kHFSHasChildLinkBit set */ + check_dirlink_ancestors(gptr, key->parentID); +} + +/* Searches the next child directory record to return given the parent ID + * and the current child ID. If the current child ID is zero, this is the + * first time we are looking up this directory, therefore return the + * first child directory or directory hard link found. If child ID is + * non-zero, return the first child directory or directory hard + * link found after the current child record. + * + * For normal directories, the folder ID is returned as the new child inode_id + * and catalog_id. For directory hard links, the inode_id of the directory + * inode is returned in the inode_id, and the fileID of the directory hard link + * is returned in the catalog_id. If the inode_id returned corresponds to a + * directory inode, is_dirinode is set to true. If no child record is found, + * or an error occurred on btree traversal, these values are zero. + * + * Returns - + * zero - on successfully determining if the next child record exists + * or not. + * non-zero - error, like during btree lookup, etc. + */ +static int find_next_child_dir(SGlobPtr gptr, uint32_t parent_id, + uint32_t cur_child_catalog_id, uint32_t *child_inode_id, + uint32_t *child_catalog_id, uint32_t *is_dirinode) +{ + int retval; + SFCB *fcb; + int return_next_rec = true; + BTreeIterator iterator; + FSBufferDescriptor buf_desc; + uint16_t recsize; + CatalogRecord rec; + CatalogKey *key; + + *child_inode_id = 0; + *child_catalog_id = 0; + *is_dirinode = false; + + fcb = gptr->calculatedCatalogFCB; + key = (CatalogKey *)&iterator.key; + + /* If no child record for this parent has been looked up previously, + * return the first child record found. Otherwise lookup the + * catalog record for the last child ID provided and return the + * next valid child ID. If the lookup of the last child failed, + * fall back to iterating all child records for given parent + * directory and returning next child found after given child ID. + */ + if (cur_child_catalog_id == 0) { +iterate_parent: + /* Lookup catalog record with key containing given parent ID and NULL + * name. This will place iterator just before the first child record + * for this directory. + */ + bzero(&iterator, sizeof(iterator)); + bzero(&buf_desc, sizeof(buf_desc)); + buf_desc.bufferAddress = &rec; + buf_desc.itemCount = 1; + buf_desc.itemSize = sizeof(rec); + BuildCatalogKey(parent_id, NULL, true, key); + retval = BTSearchRecord(fcb, &iterator, kNoHint, &buf_desc, &recsize, + &iterator); + if ((retval != 0) && (retval != btNotFound)) { + goto out; + } + } else { + /* Lookup the thread record for the last child seen */ + bzero(&iterator, sizeof(iterator)); + bzero(&buf_desc, sizeof(buf_desc)); + buf_desc.bufferAddress = &rec; + buf_desc.itemCount = 1; + buf_desc.itemSize = sizeof(rec); + BuildCatalogKey(cur_child_catalog_id, NULL, true, key); + retval = BTSearchRecord(fcb, &iterator, kNoHint, &buf_desc, + &recsize, &iterator); + if (retval) { + return_next_rec = false; + goto iterate_parent; + } + + /* Check if really we found a thread record */ + if ((rec.recordType != kHFSPlusFolderThreadRecord) && + (rec.recordType != kHFSPlusFileThreadRecord)) { + return_next_rec = false; + goto iterate_parent; + } + + /* Lookup the corresponding file/folder record */ + bzero(&iterator, sizeof(iterator)); + bzero(&buf_desc, sizeof(buf_desc)); + buf_desc.bufferAddress = &rec; + buf_desc.itemCount = 1; + buf_desc.itemSize = sizeof(rec); + BuildCatalogKey(rec.hfsPlusThread.parentID, + (CatalogName *)&(rec.hfsPlusThread.nodeName), + true, (CatalogKey *)&(iterator.key)); + retval = BTSearchRecord(fcb, &iterator, kInvalidMRUCacheKey, + &buf_desc, &recsize, &iterator); + if (retval) { + return_next_rec = false; + goto iterate_parent; + } + } + + /* Lookup the next record */ + retval = BTIterateRecord(fcb, kBTreeNextRecord, &iterator, &buf_desc, + &recsize); + while (retval == 0) { + /* Not the same parent anymore, stop the search */ + if (key->hfsPlus.parentID != parent_id) { + break; + } + + if (rec.recordType == kHFSPlusFolderRecord) { + /* Found a catalog folder record, and if we are + * supposed to return the next record found, return + * this catalog folder. + */ + if (return_next_rec) { + if (rec.hfsPlusFolder.flags & kHFSHasLinkChainMask) { + *is_dirinode = true; + } + *child_inode_id = rec.hfsPlusFolder.folderID; + *child_catalog_id = rec.hfsPlusFolder.folderID; + break; + } + /* If the current record is the current child, we + * have to return the next child record. + */ + if (rec.hfsPlusFolder.folderID == cur_child_catalog_id) { + return_next_rec = true; + } + } else if (rec.recordType == kHFSPlusFileRecord) { + /* Check if the hard link bit is set with correct + * alias type/creator. If the parent is private + * metadata directory for file hard links, this + * is a hard link inode for an alias, and not + * directory hard link. Skip this file from our + * check. + */ + if ((rec.hfsPlusFile.flags & kHFSHasLinkChainMask) && + (rec.hfsPlusFile.userInfo.fdType == kHFSAliasType) && + (rec.hfsPlusFile.userInfo.fdCreator == kHFSAliasCreator) && + (key->hfsPlus.parentID != gptr->filelink_priv_dir_id)) { + /* Found a directory hard link, and if we are + * supposed to return the next record found, + * then return this directory hard link. + */ + if (return_next_rec) { + *child_inode_id = rec.hfsPlusFile.hl_linkReference; + *child_catalog_id = rec.hfsPlusFile.fileID; + *is_dirinode = true; + break; + } + /* If the current record is the current child, + * we have to return the next child record. + */ + if (rec.hfsPlusFile.fileID == cur_child_catalog_id) { + return_next_rec = true; + } + } + } + + /* Lookup the next record */ + retval = BTIterateRecord(fcb, kBTreeNextRecord, &iterator, + &buf_desc, &recsize); + } + + if (retval == btNotFound) { + retval = 0; + } + +out: + return retval; +} + +/* In-memory state for depth first traversal for finding loops in + * directory hierarchy. inode_id is the user visible ID of the given + * directory or directory hard link, and catalog_id is the inode ID for + * normal directories, and the directory hard link ID (file ID of the + * directory hard link record). + * + * The inode_id is used for checking loops in the hierarchy, whereas + * the catalog_id is used to maintain state for depth first traversal. + */ +struct dfs_id { + uint32_t inode_id; + uint32_t catalog_id; +}; + +struct dfs_stack { + uint32_t depth; + struct dfs_id *idptr; +}; + +/* Assuming that the name of a directory is single byte, the maximum depth + * of a directory hierarchy that can accommodate in PATH_MAX will be + * PATH_MAX/2. Note that catalog hierarchy check puts limitation of 100 + * on the maximum depth of a directory hierarchy. + */ +#define DIRLINK_DEFAULT_DFS_MAX_DEPTH PATH_MAX/2 + +/* Check if the current directory exists in the current traversal path. + * If yes, loops in directory exists and return non-zero value. If not, + * return zero. + */ +static int check_loops(struct dfs_stack *dfs, struct dfs_id id) +{ + int retval = 0; + int i; + + for (i = 0; i < dfs->depth; i++) { + if (dfs->idptr[i].inode_id == id.inode_id) { + retval = 1; + break; + } + } + + return retval; +} + +static void print_dfs(struct dfs_stack *dfs) +{ + int i; + + plog ("\t"); + for (i = 0; i < dfs->depth; i++) { + plog ("(%u,%u) ", dfs->idptr[i].inode_id, dfs->idptr[i].catalog_id); + } + plog ("\n"); +} + +/* Store information about visited directory inodes such that we do not + * reenter the directory multiple times while following directory hard links. + */ +struct visited_dirinode { + uint32_t *list; /* Pointer to array of IDs */ + uint32_t size; /* Maximum number of entries in the array */ + uint32_t offset; /* Offset where next ID will be added */ + uint32_t wrapped; /* Boolean, true if list wraps around */ +}; + +/* Add the given dirinode_id to the list of visited nodes. If all the slots + * in visited list are used, wrap around and add the new ID. + */ +static void mark_dirinode_visited(uint32_t dirinode_id, struct visited_dirinode *visited) +{ + if (visited->list == NULL) { + return; + } + + if (visited->offset >= visited->size) { + visited->offset = 0; + visited->wrapped = true; + } + visited->list[visited->offset] = dirinode_id; + visited->offset++; +} + +/* Check if given directory inode exists in the visited list or not */ +static int is_dirinode_visited(uint32_t dirinode_id, struct visited_dirinode *visited) +{ + int is_visited = false; + uint32_t end_offset; + uint32_t off; + + if (visited->list == NULL) { + return is_visited; + } + + /* If the list had wrapped, search the entire list */ + if (visited->wrapped == true) { + end_offset = visited->size; + } else { + end_offset = visited->offset; + } + + for (off = 0; off < end_offset; off++) { + if (visited->list[off] == dirinode_id) { + is_visited = true; + break; + } + } + + return is_visited; +} + +/* Check if there are any loops in the directory hierarchy. + * + * This function performs a depth first traversal of directories as they + * will be visible to the user. If the lookup of private metadata directory + * succeeded in dirlink_init(), the traversal starts from the private + * metadata directory. Otherwise it starts at the root folder. It stores + * the current depth first traversal state, and looks up catalog records as + * required. The current traversal state consists of two IDs, the user + * visible ID or inode_id, and the on-disk ID or catalog_id. For normal + * directories, the user visible ID is same as the on-disk ID, but for + * directory hard links, the user visible ID is the inode ID, and the + * on-disk ID is the file ID of the directory hard link. This function + * stores the list of visited directory inode ID and checks the list before + * traversing down the directory inode hierarchy. After traversing down a + * directory inode and checking that is valid, it adds the directory inode + * ID to the visited list. + * + * The inode_id is used for checking loops in the hierarchy, whereas + * the catalog_id is used to maintain state for depth first traversal. + * + * Returns - + * zero - if the check was performed successfully, and no loops exist + * in the directory hierarchy. + * non-zero - on error, or if loops were detected in directory hierarchy. + */ +static int check_hierarchy_loops(SGlobPtr gptr) +{ + int retval = 0; + struct dfs_stack dfs; + struct dfs_id unknown_child; + struct dfs_id child; + struct dfs_id parent; + struct visited_dirinode visited; + size_t max_alloc_depth = DIRLINK_DEFAULT_DFS_MAX_DEPTH; + uint32_t is_dirinode; + +#define DFS_PUSH(dfsid) \ + { \ + dfs.idptr[dfs.depth].inode_id = dfsid.inode_id; \ + dfs.idptr[dfs.depth].catalog_id = dfsid.catalog_id; \ + dfs.depth++; \ + if (dfs.depth == max_alloc_depth) { \ + void *tptr = realloc(dfs.idptr, (max_alloc_depth + DIRLINK_DEFAULT_DFS_MAX_DEPTH) * sizeof(struct dfs_id)); \ + if (tptr == NULL) { \ + break; \ + } else { \ + dfs.idptr = tptr; \ + max_alloc_depth += DIRLINK_DEFAULT_DFS_MAX_DEPTH; \ + } \ + } \ + } + +#define DFS_POP(dfsid) \ + { \ + dfs.depth--; \ + dfsid.inode_id = dfs.idptr[dfs.depth].inode_id; \ + dfsid.catalog_id = dfs.idptr[dfs.depth].catalog_id; \ + } + +#define DFS_PEEK(dfsid) \ + { \ + dfsid.inode_id = dfs.idptr[dfs.depth-1].inode_id; \ + dfsid.catalog_id = dfs.idptr[dfs.depth-1].catalog_id; \ + } + + /* Initialize the traversal stack */ + dfs.idptr = malloc(max_alloc_depth * sizeof(struct dfs_id)); + if (!dfs.idptr) { + return ENOMEM; + } + dfs.depth = 0; + + /* Initialize unknown child IDs which are used when a directory is + * seen for the first time. + */ + unknown_child.inode_id = unknown_child.catalog_id = 0; + + /* Allocate visited list for total number of directory inodes seen */ + if (gptr->calculated_dirinodes) { + visited.size = gptr->calculated_dirinodes; + } else { + visited.size = 1024; + } + + /* If visited list allocation failed, perform search without cache */ + visited.list = malloc(visited.size * sizeof(uint32_t)); + if (visited.list == NULL) { + visited.size = 0; + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tcheck_loops: Allocation failed for visited list\n"); + } + } + visited.offset = 0; + visited.wrapped = false; + + /* Set the starting directory for traversal */ + if (gptr->dirlink_priv_dir_id) { + parent.inode_id = parent.catalog_id = gptr->dirlink_priv_dir_id; + } else { + parent.inode_id = parent.catalog_id = kHFSRootFolderID; + } + + /* Initialize the first parent and its first unknown child */ + do { + DFS_PUSH(parent); + DFS_PUSH(unknown_child); + } while (0); + + while (dfs.depth > 1) { + DFS_POP(child); + DFS_PEEK(parent); + retval = find_next_child_dir(gptr, parent.inode_id, + child.catalog_id, &(child.inode_id), + &(child.catalog_id), &is_dirinode); + if (retval) { + break; + } + + if (child.inode_id) { + retval = check_loops(&dfs, child); + if (retval) { + fsckPrint(gptr->context, E_DirLoop); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tDetected when adding (%u,%u) to following traversal stack -\n", child.inode_id, child.catalog_id); + print_dfs(&dfs); + } + gptr->CatStat |= S_LinkErrNoRepair; + retval = E_DirLoop; + break; + } + + /* Push the current child on traversal stack */ + DFS_PUSH(child); + + /* Traverse down directory inode only if it was not + * visited previously and mark it visited. + */ + if (is_dirinode == true) { + if (is_dirinode_visited(child.inode_id, &visited)) { + continue; + } else { + mark_dirinode_visited(child.inode_id, &visited); + } + } + + /* Push unknown child to traverse down the child directory */ + DFS_PUSH(unknown_child); + } + } + + if (dfs.depth >= max_alloc_depth) { + fsckPrint(gptr->context, E_DirHardLinkNesting); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + print_dfs(&dfs); + } + gptr->CatStat |= S_LinkErrNoRepair; + retval = E_DirHardLinkNesting; + } + + if (dfs.idptr) { + free(dfs.idptr); + } + if (visited.list) { + free(visited.list); + } + return retval; +} + +/* This function traverses the entire catalog btree, and checks all + * directory inodes and directory hard links found. + * + * Returns zero if the check is successful, and non-zero if an error was + * encountered during verification. + */ +int dirhardlink_check(SGlobPtr gptr) +{ + int retval = 0; + uint16_t selcode; + uint32_t hint; + + CatalogRecord catrec; + CatalogKey catkey; + uint16_t recsize; + + PrimeBuckets *inode_view = NULL; + PrimeBuckets *dirlink_view = NULL; + + /* Check if the volume is HFS+ */ + if (VolumeObjectIsHFSPlus() == false) { + goto out; + } + + /* Shortcut out if no directory hard links exists on the disk */ + if ((gptr->dirlink_priv_dir_valence == 0) && + (gptr->calculated_dirlinks == 0) && + (gptr->calculated_dirinodes == 0)) { + goto out; + } + + fsckPrint(gptr->context, hfsMultiLinkDirCheck); + + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tprivdir_valence=%u, calc_dirlinks=%u, calc_dirinode=%u\n", gptr->dirlink_priv_dir_valence, gptr->calculated_dirlinks, gptr->calculated_dirinodes); + } + + /* If lookup of private directory failed and the volume has + * some directory hard links and directory inodes, we will need + * to create the private directory for directory hard links. + */ + if (gptr->dirlink_priv_dir_id == 0) { + fsckPrint(gptr->context, E_MissingPrivDir); + gptr->CatStat |= S_LinkErrNoRepair; + } + + /* Initialize the two prime number buckets, both buckets keep track + * of inode ID and corresponding directory hard link ID. The first + * bucket is filled when traversing the directory hard link doubly + * linked list from the directory inode, and the second bucket is + * filled when btree traversal encounters directory hard links. + * This method quickly allows us to check if the mapping of all + * inodes and directory hard links is same, and no orphans exists. + */ + inode_view = (PrimeBuckets *)calloc(1, sizeof(PrimeBuckets)); + if (!inode_view) { + retval = ENOMEM; + goto out; + } + + dirlink_view = (PrimeBuckets *)calloc(1, sizeof(PrimeBuckets)); + if (!dirlink_view) { + retval = ENOMEM; + goto out; + } + + /* Traverse the catalog btree from the first record */ + selcode = 0x8001; + retval = GetBTreeRecord(gptr->calculatedCatalogFCB, selcode, &catkey, + &catrec, &recsize, &hint); + if (retval != 0) { + goto out; + } + + /* Set code to get the next record */ + selcode = 1; + do { + if (catrec.hfsPlusFolder.recordType == kHFSPlusFolderRecord) { + /* Check directory hard link private metadata directory */ + if (catrec.hfsPlusFolder.folderID == gptr->dirlink_priv_dir_id) { + dirlink_priv_dir_check(gptr, + &(catrec.hfsPlusFolder), &(catkey.hfsPlus)); + } + + /* Check directory inode */ + if ((catrec.hfsPlusFolder.flags & kHFSHasLinkChainMask) || + (catkey.hfsPlus.parentID == gptr->dirlink_priv_dir_id)) { + retval = inode_check(gptr, inode_view, + &catrec, + &catkey, + true); + if (retval) { + /* If the corruption detected requires + * knowledge of all associated directory + * hard links for repair, stop the + * catalog btree traversal + */ + retval = 0; + break; + } + } + } else + if (catrec.recordType == kHFSPlusFileRecord) { + /* Check if the hard link bit is set with correct + * alias type/creator. If the parent is private + * metadata directory for file hard links, this + * is a hard link inode for an alias, and not + * directory hard link. Skip this file from our + * check. + */ + if ((catrec.hfsPlusFile.flags & kHFSHasLinkChainMask) && + (catrec.hfsPlusFile.userInfo.fdType == kHFSAliasType) && + (catrec.hfsPlusFile.userInfo.fdCreator == kHFSAliasCreator) && + (catkey.hfsPlus.parentID != gptr->filelink_priv_dir_id)) { + dirlink_check(gptr, dirlink_view, + &(catrec.hfsPlusFile), &(catkey.hfsPlus), true); + } + } + + retval = GetBTreeRecord(gptr->calculatedCatalogFCB, 1, + &catkey, &catrec, &recsize, &hint); + } while (retval == noErr); + + if (retval == btNotFound) { + retval = 0; + } else if (retval != 0) { + goto out; + } + + /* Compare the two prime number buckets only the if catalog traversal did + * not detect incorrect number of directory hard links corruption. + */ + if ((gptr->CatStat & S_DirHardLinkChain) == 0) { + retval = compare_prime_buckets(inode_view, dirlink_view); + if (retval) { + record_link_badchain(gptr, true); + if (fsckGetVerbosity(gptr->context) >= kDebugLog) { + plog ("\tdirlink prime buckets do not match\n"); + } + retval = 0; + } + } + + /* Check if there are any loops in the directory hierarchy */ + retval = check_hierarchy_loops(gptr); + if (retval) { + retval = 0; + goto out; + } + +out: + if (inode_view) { + free (inode_view); + } + if (dirlink_view) { + free (dirlink_view); + } + + return retval; +} + diff --git a/fsck_hfs/dfalib/fsck_journal.c b/fsck_hfs/dfalib/fsck_journal.c new file mode 100644 index 0000000..fa93f99 --- /dev/null +++ b/fsck_hfs/dfalib/fsck_journal.c @@ -0,0 +1,582 @@ +/* + * Copyright (c) 2010-2012 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include <stdio.h> +#include <stddef.h> +#include <stdlib.h> +#include <string.h> +#include <limits.h> +#include <err.h> +#include <errno.h> +#include <fcntl.h> +#include <unistd.h> +#include <stdarg.h> +#include <sys/types.h> +#include <sys/param.h> +#include <sys/stat.h> +#include <sys/ioctl.h> +#include <sys/disk.h> +#include <sys/param.h> + +#include "../fsck_hfs.h" +#include "fsck_journal.h" + +extern char debug; + +#include <hfs/hfs_format.h> +#include <libkern/OSByteOrder.h> + +typedef struct SwapType { + const char *name; + uint16_t (^swap16)(uint16_t); + uint32_t (^swap32)(uint32_t); + uint64_t (^swap64)(uint64_t); +} swapper_t; + +static swapper_t nativeEndian = { + "native endian", + ^(uint16_t x) { return x; }, + ^(uint32_t x) { return x; }, + ^(uint64_t x) { return x; } +}; + +static swapper_t swappedEndian = { + "swapped endian", + ^(uint16_t x) { return OSSwapInt16(x); }, + ^(uint32_t x) { return OSSwapInt32(x); }, + ^(uint64_t x) { return OSSwapInt64(x); } +}; + +typedef int (^journal_write_block_t)(off_t, void *, size_t); + +// +// this isn't a great checksum routine but it will do for now. +// we use it to checksum the journal header and the block list +// headers that are at the start of each transaction. +// +static uint32_t +calc_checksum(char *ptr, int len) +{ + int i; + uint32_t cksum = 0; + + // this is a lame checksum but for now it'll do + for(i = 0; i < len; i++, ptr++) { + cksum = (cksum << 8) ^ (cksum + *(unsigned char *)ptr); + } + + return (~cksum); +} + +typedef struct JournalIOInfo { + int jfd; // File descriptor for journal buffer + int wrapCount; // Incremented when it wraps around. + size_t bSize; // Block size. I/O needs to be done in that amount. + uint64_t base; // Base offset of journal buffer, past the header + uint64_t size; // Size of the journal, minus the header size + uint64_t end; // End of the journal (initially the "end" field from the journal header) + uint64_t current; // Current offset; starts at "start" +} JournalIOInfo_t; + +/* + * Attempt to read <length> bytes from the journal buffer. + * Since this is a wrapped buffer, it may have to start at the + * beginning. info->{base, size, end} are read-only; info->current + * is updated with the current offset. It returns the number of bytes + * it read, or -1 on error. + */ +static ssize_t +journalRead(JournalIOInfo_t *info, uint8_t *buffer, size_t length) +{ + size_t nread = 0; + uint8_t *ptr = buffer; + +// fprintf(stderr, "%s(%p, %p, %zu)\n", __FUNCTION__, info, buffer, length); + if (info->wrapCount > 1) { + fplog(stderr, "%s(%p, %p, %zu): journal buffer wrap count = %d\n", __FUNCTION__, info, buffer, length, info->wrapCount); + return -1; + } + while (nread < length) { + off_t end; + size_t amt; + ssize_t n; + + if (info->end < info->current) { + // It wraps, so we max out at bse+size + end = info->base + info->size; + } else { + end = info->end; + } + amt = MIN((length - nread), (end - info->current)); + if (amt == 0) { + if (debug) { + fplog(stderr, "Journal read amount is 0, is that right?\n"); + } + goto done; + } + + n = pread(info->jfd, ptr, amt, info->current); + if (n == -1) { + warn("pread(%d, %p, %zu, %llu)", info->jfd, ptr, amt, info->current); + goto done; + } + if (n != amt) { + if (debug) { + fplog(stderr, "%s(%d): Wanted to read %zu, but only read %zd\n", __FUNCTION__, __LINE__, amt, n); + } + } + nread += n; + ptr += n; + info->current += n; + if (info->current == (info->base + info->size)) { + info->current = info->base; + info->wrapCount++; + } + } +done: + return nread; +} + +/* + * Read a transaction from the journal buffer. + * A transaction is a list of block_list_headers, and their + * associated data. It needs to read all of the block_lists in + * a transaction, or it fails. It returns NULL if there are + * no transactions, and on error. (Maybe that should change?) + */ +static block_list_header * +getJournalTransaction(JournalIOInfo_t *jinfo, swapper_t *swap) +{ + block_list_header *retval = NULL; + uint8_t block[jinfo->bSize]; + block_list_header *hdr = (void*)█ + ssize_t nread; + ssize_t amt; + + memset(block, 0, sizeof(block)); + nread = journalRead(jinfo, block, sizeof(block)); + if (nread == -1 || + (size_t)nread != sizeof(block)) { + if (debug) + plog("%s: wanted %zd, got %zd\n", __FUNCTION__, sizeof(block), nread); + return NULL; + } + if (swap->swap32(hdr->num_blocks) == 0) { + /* + * Either there really are no blocks, or this is not a valid + * transaction. Either way, there's nothing for us to do here. + */ + if (debug) + fplog(stderr, "%s(%d): hdr->num_blocks == 0\n", __FUNCTION__, __LINE__); + return NULL; + } + /* + * Now we check the checksum to see if this is a valid header. + * Note that we verify the checksum before reading any more -- if + * it's not a valid header, we don't want to read more than a block + * size. + */ + uint32_t tmpChecksum = swap->swap32(hdr->checksum); + uint32_t compChecksum; + hdr->checksum = 0; + compChecksum = calc_checksum((void*)hdr, sizeof(*hdr)); + hdr->checksum = swap->swap32(tmpChecksum); + + if (compChecksum != tmpChecksum) { + if (debug) + fplog(stderr, "%s(%d): hdr has bad checksum, returning NULL\n", __FUNCTION__, __LINE__); + return NULL; + } + + if (swap->swap32(hdr->bytes_used) < sizeof(block)) { + if (debug) { + fplog(stderr, "%s(%d): hdr has bytes_used (%u) less than sizeof block (%zd)\n", + __FUNCTION__, __LINE__, swap->swap32(hdr->bytes_used), sizeof(block)); + } + return NULL; + } + + retval = malloc(swap->swap32(hdr->bytes_used)); + if (retval == NULL) + return NULL; + + memset(retval, 0, swap->swap32(hdr->bytes_used)); + memcpy(retval, block, sizeof(block)); + amt = swap->swap32(hdr->bytes_used) - sizeof(block); + nread = journalRead(jinfo, ((uint8_t*)retval) + sizeof(block), amt); + if (nread != amt) { + free(retval); + return NULL; + } + + return retval; +} + +/* + * Replay a transaction. + * Transactions have a blockListSize amount of block_list_header, and + * are then followed by data. We read it in, verify the checksum, and + * if it's good, we call the block that was passed in to do something + * with it. Maybe write it out. Maybe laugh about it. + * + * It returns -1 if there was an error before it wrote anything out, + * and -2 if there was an error after it wrote something out. + * + * The arguments are: + * txn -- a block_list_header pointer, which has the description and data + * to be replayed. + * blSize -- the size of the block_list for this journal. (The data + * are after the block_list, but part of the same buffer.) + * blkSize -- The block size used to convert block numbers to offsets. This + * is defined to be the size of the journal header. + * swap -- A pointer to a swapper_t used to swap journal data structure elements. + * writer -- A block-of-code that does writing. + * + * "writer" should return -1 to stop the replay (this propagates an error up). + */ +static int +replayTransaction(block_list_header *txn, size_t blSize, size_t blkSize, swapper_t *swap, journal_write_block_t writer) +{ + uint32_t i; + uint8_t *endPtr = ((uint8_t*)txn) + swap->swap32(txn->bytes_used); + uint8_t *dataPtr = ((uint8_t*)txn) + blSize; + int retval = -1; + for (i = 1; i < swap->swap32(txn->num_blocks); i++) { + if (debug) + plog("\tBlock %d: blkNum %llu, size %u, data offset = %zd\n", i, swap->swap64(txn->binfo[i].bnum), swap->swap32(txn->binfo[i].bsize), dataPtr - (uint8_t*)txn); + /* + * XXX + * Check with security types on these checks. Need to ensure + * that the fields don't take us off into the dark scary woods. + * It's mostly the second one that I am unsure about. + */ + if (dataPtr > endPtr) { + if (debug) + plog("\tData out of range for block_list_header\n"); + return retval; + } + if ((endPtr - dataPtr) < swap->swap32(txn->binfo[i].bsize)) { + if (debug) + plog("\tData size for block %d out of range for block_list_header\n", i); + return retval; + } + if ((dataPtr + swap->swap32(txn->binfo[i].bsize)) > endPtr) { + if (debug) + plog("\tData end out of range for block_list_header\n"); + return retval; + } + // Just for debugging + if (debug) { + if (swap->swap64(txn->binfo[i].bnum) == 2) { + HFSPlusVolumeHeader *vp = (void*)dataPtr; + plog("vp->signature = %#x, version = %#x\n", vp->signature, vp->version); + } + } + // It's in the spec, and I saw it come up once on a live volume. + if (swap->swap64(txn->binfo[i].bnum) == ~(uint64_t)0) { + if (debug) + plog("\tSkipping this block due to magic skip number\n"); + } else { + // Should we set retval to -2 here? + if (writer) { + if ((writer)(swap->swap64(txn->binfo[i].bnum) * blkSize, dataPtr, swap->swap32(txn->binfo[i].bsize)) == -1) + return retval; + } + } + dataPtr += swap->swap32(txn->binfo[i].bsize); + retval = -2; + } + return 0; +} + +/* + * Read a journal header in from the journal device. + */ +static int +loadJournalHeader(int jfd, off_t offset, size_t blockSize, journal_header *jhp) +{ + uint8_t buffer[blockSize]; + ssize_t nread; + + nread = pread(jfd, buffer, sizeof(buffer), offset); + if (nread == -1 || + (size_t)nread != sizeof(buffer)) { + warn("tried to read %zu for journal header buffer, got %zd", sizeof(buffer), nread); + return -1; + } + *jhp = *(journal_header*)buffer; + return 0; +} + +/* + * Replay a journal (called "journal_open" because you have to + * to replay it as part of opening it). At this point, all it + * is useful for is replaying the journal. + * + * It is passed in: + * jfd -- file descriptor for the journal device + * offset -- offset (in bytes) of the journal on the journal device + * journal_size -- size of the jorunal (in bytes) + * min_fs_blksize -- Blocksize of the data filesystem + * flags -- unused for now + * jdev_name -- string name for the journal device. used for logging. + * do_write_b -- a block which does the actual writing. + * + * Currently, for fsck_hfs, the do_write_b block writes to the cache. It could also + * just print out the block numbers, or just check their integrity, as much as is + * possible. + * + * The function works by loading the journal header. From there, it then starts + * loading transactions, via block_list_header groups. When it gets to the end + * of the journal, it tries continuing, in case there were transactions that + * didn't get updated in the header (this apparently happens). + * + * It returns 0 on success, and -1 on error. Note that there's not a lot + * fsck_hfs can probably do in the event of error. + * + */ +int +journal_open(int jfd, + off_t offset, // Offset of journal + off_t journal_size, // Size, in bytes, of the entire journal + size_t min_fs_blksize, // Blocksize of the data filesystem, journal blocksize must be at least this size + uint32_t flags __unused, // Not used in this implementation + const char *jdev_name, // The name of the journal device, for logging + int (^do_write_b)(off_t, void*, size_t)) +{ + journal_header jhdr = { 0 }; + swapper_t *jnlSwap; // Used to swap fields of the journal + uint32_t tempCksum; // Temporary checksum value + uint32_t jBlkSize = 0; + + if (ioctl(jfd, DKIOCGETBLOCKSIZE, &jBlkSize) == -1) { + jBlkSize = min_fs_blksize; + } else { + if (jBlkSize < min_fs_blksize) { + fplog(stderr, "%s: journal block size %u < min block size %zu for %s\n", __FUNCTION__, jBlkSize, min_fs_blksize, jdev_name); + return -1; + } + if ((jBlkSize % min_fs_blksize) != 0) { + fplog(stderr, "%s: journal block size %u is not a multiple of fs block size %zu for %s\n", __FUNCTION__, jBlkSize, min_fs_blksize, jdev_name); + return -1; + } + } + if (loadJournalHeader(jfd, offset, jBlkSize, &jhdr) != 0) { + fplog(stderr, "%s: unable to load journal header from %s\n", __FUNCTION__, jdev_name); + return -1; + } + + /* + * Unlike the rest of the filesystem, the journal can be in native or + * non-native byte order. Barring moving a filesystem from one host + * to another, it'll almost always be in native byte order. + */ + if (jhdr.endian == ENDIAN_MAGIC) { + jnlSwap = &nativeEndian; + } else if (OSSwapInt32(jhdr.endian) == ENDIAN_MAGIC) { + jnlSwap = &swappedEndian; + } else { + fplog(stderr, "%s: Unknown journal endian magic number %#x from %s\n", __FUNCTION__, jhdr.endian, jdev_name); + return -1; + } + /* + * Two different magic numbers are valid. + * Do they mean different thigs, though? + */ + if (jnlSwap->swap32(jhdr.magic) != JOURNAL_HEADER_MAGIC && + jnlSwap->swap32(jhdr.magic) != OLD_JOURNAL_HEADER_MAGIC) { + fplog(stderr, "%s: Unknown journal header magic number %#x from %s\n", __FUNCTION__, jhdr.magic, jdev_name); + return -1; + } + + /* + * Checksums have to be done with the checksum field set to 0. + * So we have to stash it aside for a bit, and set the field to + * 0, before we can compare. Afterwards, if it compares correctly, + * we put the original (swapped, if necessary) value back, just + * in case. + */ + tempCksum = jnlSwap->swap32(jhdr.checksum); + jhdr.checksum = 0; + if (jnlSwap->swap32(jhdr.magic) == JOURNAL_HEADER_MAGIC && + (calc_checksum((void*)&jhdr, JOURNAL_HEADER_CKSUM_SIZE) != tempCksum)) { + fplog(stderr, "%s: Invalid journal checksum from %s\n", __FUNCTION__, jdev_name); + return -1; + } + jhdr.checksum = jnlSwap->swap32(tempCksum); + + /* + * Set up information about the journal which we use to do the I/O. + * The journal is a circular buffer. However, the start of the journal + * buffer is past the journal header. See the JournalIOInfo structure above. + */ + off_t startOffset = jnlSwap->swap64(jhdr.start); + off_t endOffset =jnlSwap->swap64(jhdr.end); + off_t journalStart = offset + jnlSwap->swap32(jhdr.jhdr_size); + + /* + * The journal code was updated to be able to read past the "end" of the journal, + * to see if there were any valid transactions there. If we are peeking past the + * end, we don't care if we have checksum errors -- that just means they're not + * valid transactions. + * + */ + int into_the_weeds = 0; + uint32_t last_sequence_number = 0; + + JournalIOInfo_t jinfo = { 0 }; + + if (debug) + plog("Journal start sequence number = %u\n", jnlSwap->swap32(jhdr.sequence_num)); + + /* + * Now set up the JournalIOInfo object with the file descriptor, + * the block size, start and end of the journal buffer, and where + * the journal pointer currently is. + */ + jinfo.jfd = jfd; + jinfo.bSize = jnlSwap->swap32(jhdr.jhdr_size); + jinfo.base = journalStart; + jinfo.size = journal_size - jinfo.bSize; + jinfo.end = offset + endOffset; + jinfo.current = offset + startOffset; + + const char *state = ""; + int bad_journal = 0; + block_list_header *txn = NULL; + + /* + * Loop while getting transactions. We exit when we hit a checksum + * error, or when the sequence number for a transaction doesn't match + * what we expect it to. (That's the trickiest part -- the into_the_weeds + * portion of the code. It doesn't match the TN11150 documentation, so + * I've had to go by both my experience with real-world journals and by + * looking at the kernel code.) + */ + while (1) { + int rv; + + if (jinfo.current == jinfo.end && into_the_weeds == 0) { + /* + * This is a bit weird, but it works: if current == end, but gone_into_weeds is 1, + * then this code will not execute. If it does execute, it'll go to get a transaction. + * That will put the pointer past end. + */ + if (jhdr.sequence_num == 0) { + /* + * XXX + * I am not sure about this; this behaviour is not in TN1150 at all, + * but I _think_ this is what the kernel is doing. + */ + plog("Journal sequence number is 0, is going into the end okay?\n"); + } + into_the_weeds = 1; + if (debug) + plog("Attempting to read past stated end of journal\n"); + state = "tentative "; + jinfo.end = (jinfo.base + startOffset - jinfo.bSize); + continue; + } + if (debug) + plog("Before getting %stransaction: jinfo.current = %llu\n", state, jinfo.current); + /* + * Note that getJournalTransaction verifies the checksum on the block_list_header, so + * if it's bad, it'll return NULL. + */ + txn = getJournalTransaction(&jinfo, jnlSwap); + if (txn == NULL) { + if (debug) + plog("txn is NULL, jinfo.current = %llu\n", jinfo.current); + if (into_the_weeds) { + if (debug) + plog("\tBut we do not care, since it is past the end of the journal\n"); + } else { + bad_journal = 1; + } + break; + } + if (debug) { + plog("After getting %stransaction: jinfo.current = %llu\n", state, jinfo.current); + plog("%stxn = { %u max_blocks, %u num_blocks, %u bytes_used, binfo[0].next = %u }\n", state, jnlSwap->swap32(txn->max_blocks), jnlSwap->swap32(txn->num_blocks), jnlSwap->swap32(txn->bytes_used), jnlSwap->swap32(txn->binfo[0].next)); + } + if (into_the_weeds) { + /* + * This seems to be what the kernel was checking: if the + * last_sequence_number was set, and the txn sequence number + * is set, and the txn sequence number doesn't match either + * last_sequence_number _or_ an incremented version of it, then + * the transaction isn't worth looking at, and we've reached + * the end of the journal. + */ + if (last_sequence_number != 0 && + txn->binfo[0].next != 0 && + jnlSwap->swap32(txn->binfo[0].next) != last_sequence_number && + jnlSwap->swap32(txn->binfo[0].next) != (last_sequence_number + 1)) { + // Probably not a valid transaction + if (debug) + plog("\tTentative txn sequence %u is not expected %u, stopping journal replay\n", jnlSwap->swap32(txn->binfo[0].next), last_sequence_number + 1); + break; + } + } + /* + * If we've got a valid transaction, then we replay it. + * If there was an error, we're done with the journal replay. + * (If the error occurred after the "end," then we don't care, + * and it's not a bad journal.) + */ + rv = replayTransaction(txn, + jnlSwap->swap32(jhdr.blhdr_size), + jnlSwap->swap32(jhdr.jhdr_size), + jnlSwap, + do_write_b); + + if (rv < 0) { + if (debug) + plog("\tTransaction replay failed, returned %d\n", rv); + if (into_the_weeds) { + if (debug) + plog("\t\tAnd we don't care\n"); + } else { + bad_journal = 1; + } + break; + } + last_sequence_number = jnlSwap->swap32(txn->binfo[0].next); + free(txn); + txn = NULL; + } + if (txn) + free(txn); + if (bad_journal) { + if (debug) + plog("Journal was bad, stopped replaying\n"); + return -1; + } + + return 0; +} diff --git a/fsck_hfs/dfalib/fsck_journal.h b/fsck_hfs/dfalib/fsck_journal.h new file mode 100644 index 0000000..8b3ecbf --- /dev/null +++ b/fsck_hfs/dfalib/fsck_journal.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _FSCK_JOURNAL_H +#define _FSCK_JOURNAL_H + +#include <sys/cdefs.h> + +#include <sys/types.h> + +/* + * The guts of the journal: a descriptor for which + * block number on the data disk is to be written. + */ +typedef struct block_info { + uint64_t bnum; + uint32_t bsize; + uint32_t next; +} __attribute__((__packed__)) block_info; + +/* + * A "transaction," for want of a better word. + * This contains a series of block_info, in the + * binfo array, which are used to modify the + * filesystem. + */ +typedef struct block_list_header { + uint16_t max_blocks; + uint16_t num_blocks; + uint32_t bytes_used; + uint32_t checksum; + uint32_t pad; + block_info binfo[1]; +} __attribute__((__packed__)) block_list_header; + +/* + * This is written to block zero of the journal and it + * maintains overall state about the journal. + */ +typedef struct journal_header { + int32_t magic; + int32_t endian; + off_t start; // zero-based byte offset of the start of the first transaction + off_t end; // zero-based byte offset of where free space begins + off_t size; // size in bytes of the entire journal + int32_t blhdr_size; // size in bytes of each block_list_header in the journal + int32_t checksum; + int32_t jhdr_size; // block size (in bytes) of the journal header + uint32_t sequence_num; // NEW FIELD: a monotonically increasing value assigned to all txn's +} __attribute__((__packed__)) journal_header; + +#define JOURNAL_HEADER_MAGIC 0x4a4e4c78 // 'JNLx' +#define OLD_JOURNAL_HEADER_MAGIC 0x4a484452 // 'JHDR' +#define ENDIAN_MAGIC 0x12345678 + +// +// we only checksum the original size of the journal_header to remain +// backwards compatible. the size of the original journal_header is +// everything up to the the sequence_num field, hence we use the +// offsetof macro to calculate the size. +// +#define JOURNAL_HEADER_CKSUM_SIZE (offsetof(struct journal_header, sequence_num)) + +#define OLD_JOURNAL_HEADER_MAGIC 0x4a484452 // 'JHDR' + +/* + * The function used by fsck_hfs to replay the journal. + * It's modeled on the kernel function. + * + * For the do_write_b block, the offset argument is in bytes -- + * the journal replay code will convert from journal block to + * bytes. + */ + +int journal_open(int jdev, + off_t offset, + off_t journal_size, + size_t min_fs_block_size, + uint32_t flags, + const char *jdev_name, + int (^do_write_b)(off_t, void *, size_t)); + +#endif /* !_FSCK_JOURNAL_H */ diff --git a/fsck_hfs/dfalib/hfs_endian.c b/fsck_hfs/dfalib/hfs_endian.c new file mode 100755 index 0000000..74c66e6 --- /dev/null +++ b/fsck_hfs/dfalib/hfs_endian.c @@ -0,0 +1,1119 @@ +/* + * Copyright (c) 2002, 2004, 2005, 2007-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * hfs_endian.c + * + * This file implements endian swapping routines for the HFS/HFS Plus + * volume format. + */ + +#include <stddef.h> +#include <sys/types.h> +#include <sys/stat.h> + +#include <libkern/OSByteOrder.h> +#include <hfs/hfs_format.h> + +#include "Scavenger.h" +#include "BTreePrivate.h" +#include "hfs_endian.h" +#include "../fsck_hfs.h" + +#undef ENDIAN_DEBUG + +/* + * Internal swapping routines + * + * These routines handle swapping the records of leaf and index nodes. The + * layout of the keys and records varies depending on the kind of B-tree + * (determined by fileID). + * + * The direction parameter must be kSwapBTNodeBigToHost or kSwapBTNodeHostToBig. + * The kSwapBTNodeHeaderRecordOnly "direction" is not valid for these routines. + */ +static int hfs_swap_HFSPlusBTInternalNode (BlockDescriptor *src, SFCB *fcb, enum HFSBTSwapDirection direction); +static int hfs_swap_HFSBTInternalNode (BlockDescriptor *src, SFCB *fcb, enum HFSBTSwapDirection direction); + +/* + * hfs_swap_HFSPlusForkData + */ +static void +hfs_swap_HFSPlusForkData ( + HFSPlusForkData *src +) +{ + int i; + + src->logicalSize = SWAP_BE64 (src->logicalSize); + + src->clumpSize = SWAP_BE32 (src->clumpSize); + src->totalBlocks = SWAP_BE32 (src->totalBlocks); + + for (i = 0; i < kHFSPlusExtentDensity; i++) { + src->extents[i].startBlock = SWAP_BE32 (src->extents[i].startBlock); + src->extents[i].blockCount = SWAP_BE32 (src->extents[i].blockCount); + } +} + +/* + * hfs_swap_HFSMasterDirectoryBlock + * + * Specially modified to swap parts of the finder info + */ +void +hfs_swap_HFSMasterDirectoryBlock ( + void *buf +) +{ + HFSMasterDirectoryBlock *src = (HFSMasterDirectoryBlock *)buf; + + src->drSigWord = SWAP_BE16 (src->drSigWord); + src->drCrDate = SWAP_BE32 (src->drCrDate); + src->drLsMod = SWAP_BE32 (src->drLsMod); + src->drAtrb = SWAP_BE16 (src->drAtrb); + src->drNmFls = SWAP_BE16 (src->drNmFls); + src->drVBMSt = SWAP_BE16 (src->drVBMSt); + src->drAllocPtr = SWAP_BE16 (src->drAllocPtr); + src->drNmAlBlks = SWAP_BE16 (src->drNmAlBlks); + src->drAlBlkSiz = SWAP_BE32 (src->drAlBlkSiz); + src->drClpSiz = SWAP_BE32 (src->drClpSiz); + src->drAlBlSt = SWAP_BE16 (src->drAlBlSt); + src->drNxtCNID = SWAP_BE32 (src->drNxtCNID); + src->drFreeBks = SWAP_BE16 (src->drFreeBks); + + /* Don't swap drVN */ + + src->drVolBkUp = SWAP_BE32 (src->drVolBkUp); + src->drVSeqNum = SWAP_BE16 (src->drVSeqNum); + src->drWrCnt = SWAP_BE32 (src->drWrCnt); + src->drXTClpSiz = SWAP_BE32 (src->drXTClpSiz); + src->drCTClpSiz = SWAP_BE32 (src->drCTClpSiz); + src->drNmRtDirs = SWAP_BE16 (src->drNmRtDirs); + src->drFilCnt = SWAP_BE32 (src->drFilCnt); + src->drDirCnt = SWAP_BE32 (src->drDirCnt); + + /* Swap just the 'blessed folder' in drFndrInfo */ + src->drFndrInfo[0] = SWAP_BE32 (src->drFndrInfo[0]); + + src->drEmbedSigWord = SWAP_BE16 (src->drEmbedSigWord); + src->drEmbedExtent.startBlock = SWAP_BE16 (src->drEmbedExtent.startBlock); + src->drEmbedExtent.blockCount = SWAP_BE16 (src->drEmbedExtent.blockCount); + + src->drXTFlSize = SWAP_BE32 (src->drXTFlSize); + src->drXTExtRec[0].startBlock = SWAP_BE16 (src->drXTExtRec[0].startBlock); + src->drXTExtRec[0].blockCount = SWAP_BE16 (src->drXTExtRec[0].blockCount); + src->drXTExtRec[1].startBlock = SWAP_BE16 (src->drXTExtRec[1].startBlock); + src->drXTExtRec[1].blockCount = SWAP_BE16 (src->drXTExtRec[1].blockCount); + src->drXTExtRec[2].startBlock = SWAP_BE16 (src->drXTExtRec[2].startBlock); + src->drXTExtRec[2].blockCount = SWAP_BE16 (src->drXTExtRec[2].blockCount); + + src->drCTFlSize = SWAP_BE32 (src->drCTFlSize); + src->drCTExtRec[0].startBlock = SWAP_BE16 (src->drCTExtRec[0].startBlock); + src->drCTExtRec[0].blockCount = SWAP_BE16 (src->drCTExtRec[0].blockCount); + src->drCTExtRec[1].startBlock = SWAP_BE16 (src->drCTExtRec[1].startBlock); + src->drCTExtRec[1].blockCount = SWAP_BE16 (src->drCTExtRec[1].blockCount); + src->drCTExtRec[2].startBlock = SWAP_BE16 (src->drCTExtRec[2].startBlock); + src->drCTExtRec[2].blockCount = SWAP_BE16 (src->drCTExtRec[2].blockCount); +} + +/* + * hfs_swap_HFSPlusVolumeHeader + */ +void +hfs_swap_HFSPlusVolumeHeader ( + void *buf +) +{ + HFSPlusVolumeHeader *src = (HFSPlusVolumeHeader *)buf; + + src->signature = SWAP_BE16 (src->signature); + src->version = SWAP_BE16 (src->version); + src->attributes = SWAP_BE32 (src->attributes); + src->lastMountedVersion = SWAP_BE32 (src->lastMountedVersion); + + /* Don't swap reserved */ + + src->createDate = SWAP_BE32 (src->createDate); + src->modifyDate = SWAP_BE32 (src->modifyDate); + src->backupDate = SWAP_BE32 (src->backupDate); + src->checkedDate = SWAP_BE32 (src->checkedDate); + src->fileCount = SWAP_BE32 (src->fileCount); + src->folderCount = SWAP_BE32 (src->folderCount); + src->blockSize = SWAP_BE32 (src->blockSize); + src->totalBlocks = SWAP_BE32 (src->totalBlocks); + src->freeBlocks = SWAP_BE32 (src->freeBlocks); + src->nextAllocation = SWAP_BE32 (src->nextAllocation); + src->rsrcClumpSize = SWAP_BE32 (src->rsrcClumpSize); + src->dataClumpSize = SWAP_BE32 (src->dataClumpSize); + src->nextCatalogID = SWAP_BE32 (src->nextCatalogID); + src->writeCount = SWAP_BE32 (src->writeCount); + src->encodingsBitmap = SWAP_BE64 (src->encodingsBitmap); + + /* Don't swap finderInfo */ + + hfs_swap_HFSPlusForkData (&src->allocationFile); + hfs_swap_HFSPlusForkData (&src->extentsFile); + hfs_swap_HFSPlusForkData (&src->catalogFile); + hfs_swap_HFSPlusForkData (&src->attributesFile); + hfs_swap_HFSPlusForkData (&src->startupFile); +} + +/* + * hfs_swap_BTNode + * + * NOTE: This operation is not naturally symmetric. + * We have to determine which way we're swapping things. + */ +int +hfs_swap_BTNode ( + BlockDescriptor *src, + SFCB *fcb, + enum HFSBTSwapDirection direction +) +{ + BTNodeDescriptor *srcDesc = src->buffer; + BTreeControlBlockPtr btcb = fcb->fcbBtree; + UInt16 *srcOffs = NULL; + UInt32 i; + int error = 0; + +// WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + +#ifdef ENDIAN_DEBUG + if (direction == kSwapBTNodeBigToHost) { + plog ("BE -> Native Swap\n"); + } else if (direction == kSwapBTNodeHostToBig) { + plog ("Native -> BE Swap\n"); + } else if (direction == kSwapBTNodeHeaderRecordOnly) { + plog ("Not swapping descriptors\n"); + } else { + plog ("hfs_swap_BTNode: This is impossible"); + exit(99); + } +#endif + + /* + * If we are doing a swap from on-disk to in-memory, then swap the node + * descriptor and record offsets before we need to use them. + */ + if (direction == kSwapBTNodeBigToHost) { + srcDesc->fLink = SWAP_BE32 (srcDesc->fLink); + srcDesc->bLink = SWAP_BE32 (srcDesc->bLink); + if (srcDesc->fLink >= btcb->totalNodes) { + if (debug) plog("hfs_swap_BTNode: invalid forward link (0x%08X)\n", srcDesc->fLink); + } + if (srcDesc->bLink >= btcb->totalNodes) { + if (debug) plog("hfs_swap_BTNode: invalid backward link (0x%08X)\n", srcDesc->bLink); + } + + /* + * Don't swap srcDesc->kind or srcDesc->height because they are only one byte. + * We don't check them here because the upper layers will check (and possibly + * repair) them more effectively. + */ + if (srcDesc->kind < kBTLeafNode || srcDesc->kind > kBTMapNode) { + if (debug) plog("hfs_swap_BTNode: invalid node kind (%d)\n", srcDesc->kind); + } + if (srcDesc->height > btcb->treeDepth) { + if (debug) plog("hfs_swap_BTNode: invalid node height (%d)\n", srcDesc->height); + } + + /* Don't swap srcDesc->reserved */ + + srcDesc->numRecords = SWAP_BE16 (srcDesc->numRecords); + + /* + * Swap the node offsets (including the free space one!). + */ + srcOffs = (UInt16 *)((char *)src->buffer + (src->blockSize - ((srcDesc->numRecords + 1) * sizeof (UInt16)))); + + /* + * Sanity check that the record offsets are within the node itself. + */ + if ((char *)srcOffs > ((char *)src->buffer + src->blockSize) || + (char *)srcOffs < ((char *)src->buffer + sizeof(BTNodeDescriptor))) { + if (debug) plog("hfs_swap_BTNode: invalid record count (0x%04X)\n", srcDesc->numRecords); + WriteError(fcb->fcbVolume->vcbGPtr, E_NRecs, fcb->fcbFileID, src->blockNum); + error = E_NRecs; + goto fail; + } + + /* + * Swap and sanity check each of the record offsets. + */ + for (i = 0; i <= srcDesc->numRecords; i++) { + srcOffs[i] = SWAP_BE16 (srcOffs[i]); + + /* + * Sanity check: must be even, and within the node itself. + * + * We may be called to swap an unused node, which contains all zeroes. + * This is why we allow the record offset to be zero. + */ + if ((srcOffs[i] & 1) || (srcOffs[i] < sizeof(BTNodeDescriptor) && srcOffs[i] != 0) || (srcOffs[i] >= src->blockSize)) { + if (debug) plog("hfs_swap_BTNode: record #%d invalid offset (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + error = E_BadNode; + goto fail; + } + + /* + * Make sure the offsets are strictly increasing. Note that we're looping over + * them backwards, hence the order in the comparison. + */ + if ((i != 0) && (srcOffs[i] >= srcOffs[i-1])) { + if (debug) plog("hfs_swap_BTNode: offsets %d and %d out of order (0x%04X, 0x%04X)\n", + srcDesc->numRecords-i-1, srcDesc->numRecords-i, srcOffs[i], srcOffs[i-1]); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + error = E_BadNode; + goto fail; + } + } + } + + /* + * Swap the records (ordered by frequency of access) + */ + if ((srcDesc->kind == kBTIndexNode) || + (srcDesc-> kind == kBTLeafNode)) { + + if (fcb->fcbVolume->vcbSignature == kHFSPlusSigWord) { + error = hfs_swap_HFSPlusBTInternalNode (src, fcb, direction); + } else { + error = hfs_swap_HFSBTInternalNode (src, fcb, direction); + } + + if (error) goto fail; + + } else if (srcDesc-> kind == kBTMapNode) { + /* Don't swap the bitmaps, they'll be done in the bitmap routines */ + + } else if (srcDesc-> kind == kBTHeaderNode) { + /* The header's offset is hard-wired because we cannot trust the offset pointers. */ + BTHeaderRec *srcHead = (BTHeaderRec *)((char *)src->buffer + sizeof(BTNodeDescriptor)); + + srcHead->treeDepth = SWAP_BE16 (srcHead->treeDepth); + + srcHead->rootNode = SWAP_BE32 (srcHead->rootNode); + srcHead->leafRecords = SWAP_BE32 (srcHead->leafRecords); + srcHead->firstLeafNode = SWAP_BE32 (srcHead->firstLeafNode); + srcHead->lastLeafNode = SWAP_BE32 (srcHead->lastLeafNode); + + srcHead->nodeSize = SWAP_BE16 (srcHead->nodeSize); + srcHead->maxKeyLength = SWAP_BE16 (srcHead->maxKeyLength); + + srcHead->totalNodes = SWAP_BE32 (srcHead->totalNodes); + srcHead->freeNodes = SWAP_BE32 (srcHead->freeNodes); + + srcHead->clumpSize = SWAP_BE32 (srcHead->clumpSize); + srcHead->attributes = SWAP_BE32 (srcHead->attributes); + + /* Don't swap srcHead->reserved1 */ + /* Don't swap srcHead->btreeType; it's only one byte */ + /* Don't swap srcHead->reserved2 */ + /* Don't swap srcHead->reserved3 */ + /* Don't swap bitmap */ + } + /* Else: other node kinds will be caught by upper layers */ + + /* + * If we are doing a swap from in-memory to on-disk, then swap the node + * descriptor and record offsets after we're done using them. + */ + if (direction == kSwapBTNodeHostToBig) { + /* + * Swap the forward and backward links. + */ + if (srcDesc->fLink >= btcb->totalNodes) { + if (debug) plog("hfs_UNswap_BTNode: invalid forward link (0x%08X)\n", srcDesc->fLink); + } + if (srcDesc->bLink >= btcb->totalNodes) { + if (debug) plog("hfs_UNswap_BTNode: invalid backward link (0x%08X)\n", srcDesc->bLink); + } + srcDesc->fLink = SWAP_BE32 (srcDesc->fLink); + srcDesc->bLink = SWAP_BE32 (srcDesc->bLink); + + /* + * Don't swap srcDesc->kind or srcDesc->height because they are only one byte. + * We don't check them here because the upper layers will check (and possibly + * repair) them more effectively. + */ + if (srcDesc->kind < kBTLeafNode || srcDesc->kind > kBTMapNode) { + if (debug) plog("hfs_UNswap_BTNode: invalid node kind (%d)\n", srcDesc->kind); + } + if (srcDesc->height > btcb->treeDepth) { + if (debug) plog("hfs_UNswap_BTNode: invalid node height (%d)\n", srcDesc->height); + } + + /* Don't swap srcDesc->reserved */ + + /* + * Swap the node offsets (including the free space one!). + */ + srcOffs = (UInt16 *)((char *)src->buffer + (src->blockSize - ((srcDesc->numRecords + 1) * sizeof (UInt16)))); + + /* + * Sanity check that the record offsets are within the node itself. + */ + if ((char *)srcOffs > ((char *)src->buffer + src->blockSize) || + (char *)srcOffs < ((char *)src->buffer + sizeof(BTNodeDescriptor))) { + if (debug) plog("hfs_UNswap_BTNode: invalid record count (0x%04X)\n", srcDesc->numRecords); + WriteError(fcb->fcbVolume->vcbGPtr, E_NRecs, fcb->fcbFileID, src->blockNum); + error = E_NRecs; + goto fail; + } + + /* + * Swap and sanity check each of the record offsets. + */ + for (i = 0; i <= srcDesc->numRecords; i++) { + /* + * Sanity check: must be even, and within the node itself. + * + * We may be called to swap an unused node, which contains all zeroes. + * This is why we allow the record offset to be zero. + */ + if ((srcOffs[i] & 1) || (srcOffs[i] < sizeof(BTNodeDescriptor) && srcOffs[i] != 0) || (srcOffs[i] >= src->blockSize)) { + if (debug) plog("hfs_UNswap_BTNode: record #%d invalid offset (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + error = E_BadNode; + goto fail; + } + + /* + * Make sure the offsets are strictly increasing. Note that we're looping over + * them backwards, hence the order in the comparison. + */ + if ((i < srcDesc->numRecords) && (srcOffs[i+1] >= srcOffs[i])) { + if (debug) plog("hfs_UNswap_BTNode: offsets %d and %d out of order (0x%04X, 0x%04X)\n", + srcDesc->numRecords-i-2, srcDesc->numRecords-i-1, srcOffs[i+1], srcOffs[i]); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + error = E_BadNode; + goto fail; + } + + srcOffs[i] = SWAP_BE16 (srcOffs[i]); + } + + srcDesc->numRecords = SWAP_BE16 (srcDesc->numRecords); + } + +fail: + if (error && (cur_debug_level & d_dump_node)) + { + plog("Node %u:\n", src->blockNum); + HexDump(src->buffer, src->blockSize, TRUE); + } + return (error); +} + +static int +hfs_swap_HFSPlusBTInternalNode ( + BlockDescriptor *src, + SFCB *fcb, + enum HFSBTSwapDirection direction +) +{ + HFSCatalogNodeID fileID =fcb->fcbFileID; + BTNodeDescriptor *srcDesc = src->buffer; + UInt16 *srcOffs = (UInt16 *)((char *)src->buffer + (src->blockSize - (srcDesc->numRecords * sizeof (UInt16)))); + char *nextRecord; /* Points to start of record following current one */ + int32_t i; + UInt32 j; + + if (fileID == kHFSExtentsFileID) { + HFSPlusExtentKey *srcKey; + HFSPlusExtentDescriptor *srcRec; + size_t recordSize; /* Size of the data part of the record, or node number for index nodes */ + + if (srcDesc->kind == kBTIndexNode) + recordSize = sizeof(UInt32); + else + recordSize = sizeof(HFSPlusExtentDescriptor); + + for (i = 0; i < srcDesc->numRecords; i++) { + /* Point to the start of the record we're currently checking. */ + srcKey = (HFSPlusExtentKey *)((char *)src->buffer + srcOffs[i]); + + /* + * Point to start of next (larger offset) record. We'll use this + * to be sure the current record doesn't overflow into the next + * record. + */ + nextRecord = (char *)src->buffer + srcOffs[i-1]; + + /* + * Make sure the key and data are within the buffer. Since both key + * and data are fixed size, this is relatively easy. Note that this + * relies on the keyLength being a constant; we verify the keyLength + * below. + */ + if ((char *)srcKey + sizeof(HFSPlusExtentKey) + recordSize > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: extents key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + if (direction == kSwapBTNodeBigToHost) + srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + if (srcKey->keyLength != sizeof(*srcKey) - sizeof(srcKey->keyLength)) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: extents key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength); + WriteError(fcb->fcbVolume->vcbGPtr, E_KeyLen, fcb->fcbFileID, src->blockNum); + return E_KeyLen; + } + srcRec = (HFSPlusExtentDescriptor *)((char *)srcKey + srcKey->keyLength + sizeof(srcKey->keyLength)); + if (direction == kSwapBTNodeHostToBig) + srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + + /* Don't swap srcKey->forkType; it's only one byte */ + /* Don't swap srcKey->pad */ + + srcKey->fileID = SWAP_BE32 (srcKey->fileID); + srcKey->startBlock = SWAP_BE32 (srcKey->startBlock); + + if (srcDesc->kind == kBTIndexNode) { + /* For index nodes, the record data is just a child node number. */ + *((UInt32 *)srcRec) = SWAP_BE32 (*((UInt32 *)srcRec)); + } else { + /* Swap the extent data */ + for (j = 0; j < kHFSPlusExtentDensity; j++) { + srcRec[j].startBlock = SWAP_BE32 (srcRec[j].startBlock); + srcRec[j].blockCount = SWAP_BE32 (srcRec[j].blockCount); + } + } + } + + } else if (fileID == kHFSCatalogFileID || fileID == kHFSRepairCatalogFileID) { + HFSPlusCatalogKey *srcKey; + SInt16 *srcPtr; + u_int16_t keyLength; + + for (i = 0; i < srcDesc->numRecords; i++) { + /* Point to the start of the record we're currently checking. */ + srcKey = (HFSPlusCatalogKey *)((char *)src->buffer + srcOffs[i]); + + /* + * Point to start of next (larger offset) record. We'll use this + * to be sure the current record doesn't overflow into the next + * record. + */ + nextRecord = (char *)src->buffer + srcOffs[i-1]; + + /* + * Make sure we can safely dereference the keyLength and parentID fields. */ + if ((char *)srcKey + offsetof(HFSPlusCatalogKey, nodeName.unicode[0]) > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: catalog key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* + * Swap and sanity check the key length + */ + if (direction == kSwapBTNodeBigToHost) + srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + keyLength = srcKey->keyLength; /* Put it in a local (native order) because we use it several times */ + if (direction == kSwapBTNodeHostToBig) + srcKey->keyLength = SWAP_BE16 (keyLength); + + /* Sanity check the key length */ + if (keyLength < kHFSPlusCatalogKeyMinimumLength || keyLength > kHFSPlusCatalogKeyMaximumLength) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: catalog key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, keyLength); + WriteError(fcb->fcbVolume->vcbGPtr, E_KeyLen, fcb->fcbFileID, src->blockNum); + return E_KeyLen; + } + + /* + * Make sure that we can safely dereference the record's type field or + * an index node's child node number. + */ + srcPtr = (SInt16 *)((char *)srcKey + keyLength + sizeof(srcKey->keyLength)); + if ((char *)srcPtr + sizeof(UInt32) > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: catalog key #%d too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_KeyLen, fcb->fcbFileID, src->blockNum); + return E_KeyLen; + } + + srcKey->parentID = SWAP_BE32 (srcKey->parentID); + + /* + * Swap and sanity check the key's node name + */ + if (direction == kSwapBTNodeBigToHost) + srcKey->nodeName.length = SWAP_BE16 (srcKey->nodeName.length); + /* Make sure name length is consistent with key length */ + if (keyLength < sizeof(srcKey->parentID) + sizeof(srcKey->nodeName.length) + + srcKey->nodeName.length*sizeof(srcKey->nodeName.unicode[0])) { + if (debug){ + uintptr_t keyOffset = (uintptr_t)srcKey - (uintptr_t)src->buffer; + uintptr_t recordSize = (uintptr_t)nextRecord - (uintptr_t)srcKey; + unsigned recordIndex = srcDesc->numRecords - i; + + plog("hfs_swap_HFSPlusBTInternalNode: catalog record #%d (0-based, offset 0x%lX) keyLength=%d expected=%lu\n", + recordIndex, keyOffset, keyLength, sizeof(srcKey->parentID) + sizeof(srcKey->nodeName.length) + + srcKey->nodeName.length*sizeof(srcKey->nodeName.unicode[0])); + if (cur_debug_level & d_dump_record) { + plog("Record %u (offset 0x%04X):\n", recordIndex, keyOffset); + HexDump(srcKey, recordSize, FALSE); + } + } + WriteError(fcb->fcbVolume->vcbGPtr, E_KeyLen, fcb->fcbFileID, src->blockNum); + return E_KeyLen; + } + for (j = 0; j < srcKey->nodeName.length; j++) { + srcKey->nodeName.unicode[j] = SWAP_BE16 (srcKey->nodeName.unicode[j]); + } + if (direction == kSwapBTNodeHostToBig) + srcKey->nodeName.length = SWAP_BE16 (srcKey->nodeName.length); + + /* + * For index nodes, the record data is just the child's node number. + * Skip over swapping the various types of catalog record. + */ + if (srcDesc->kind == kBTIndexNode) { + *((UInt32 *)srcPtr) = SWAP_BE32 (*((UInt32 *)srcPtr)); + continue; + } + + /* Make sure the recordType is in native order before using it. */ + if (direction == kSwapBTNodeBigToHost) + srcPtr[0] = SWAP_BE16 (srcPtr[0]); + + if (srcPtr[0] == kHFSPlusFolderRecord) { + HFSPlusCatalogFolder *srcRec = (HFSPlusCatalogFolder *)srcPtr; + if ((char *)srcRec + sizeof(*srcRec) > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: catalog folder record #%d too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + srcRec->flags = SWAP_BE16 (srcRec->flags); + srcRec->valence = SWAP_BE32 (srcRec->valence); + srcRec->folderID = SWAP_BE32 (srcRec->folderID); + srcRec->createDate = SWAP_BE32 (srcRec->createDate); + srcRec->contentModDate = SWAP_BE32 (srcRec->contentModDate); + srcRec->attributeModDate = SWAP_BE32 (srcRec->attributeModDate); + srcRec->accessDate = SWAP_BE32 (srcRec->accessDate); + srcRec->backupDate = SWAP_BE32 (srcRec->backupDate); + + srcRec->bsdInfo.ownerID = SWAP_BE32 (srcRec->bsdInfo.ownerID); + srcRec->bsdInfo.groupID = SWAP_BE32 (srcRec->bsdInfo.groupID); + + /* Don't swap srcRec->bsdInfo.adminFlags; it's only one byte */ + /* Don't swap srcRec->bsdInfo.ownerFlags; it's only one byte */ + + srcRec->bsdInfo.fileMode = SWAP_BE16 (srcRec->bsdInfo.fileMode); + srcRec->bsdInfo.special.iNodeNum = SWAP_BE32 (srcRec->bsdInfo.special.iNodeNum); + + srcRec->textEncoding = SWAP_BE32 (srcRec->textEncoding); + + /* The only field we use in srcRec->userInfo is frFlags (used in VLockedChk). */ + srcRec->userInfo.frFlags = SWAP_BE16 (srcRec->userInfo.frFlags); + + /* Don't swap srcRec->finderInfo */ + srcRec->folderCount = SWAP_BE32 (srcRec->folderCount); + + } else if (srcPtr[0] == kHFSPlusFileRecord) { + HFSPlusCatalogFile *srcRec = (HFSPlusCatalogFile *)srcPtr; + if ((char *)srcRec + sizeof(*srcRec) > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: catalog file record #%d too big\n", srcDesc->numRecords-i-1); + return fsBTInvalidNodeErr; + } + + srcRec->flags = SWAP_BE16 (srcRec->flags); + + srcRec->fileID = SWAP_BE32 (srcRec->fileID); + + srcRec->createDate = SWAP_BE32 (srcRec->createDate); + srcRec->contentModDate = SWAP_BE32 (srcRec->contentModDate); + srcRec->attributeModDate = SWAP_BE32 (srcRec->attributeModDate); + srcRec->accessDate = SWAP_BE32 (srcRec->accessDate); + srcRec->backupDate = SWAP_BE32 (srcRec->backupDate); + + srcRec->bsdInfo.ownerID = SWAP_BE32 (srcRec->bsdInfo.ownerID); + srcRec->bsdInfo.groupID = SWAP_BE32 (srcRec->bsdInfo.groupID); + + /* Don't swap srcRec->bsdInfo.adminFlags; it's only one byte */ + /* Don't swap srcRec->bsdInfo.ownerFlags; it's only one byte */ + + srcRec->bsdInfo.fileMode = SWAP_BE16 (srcRec->bsdInfo.fileMode); + srcRec->bsdInfo.special.iNodeNum = SWAP_BE32 (srcRec->bsdInfo.special.iNodeNum); + + srcRec->textEncoding = SWAP_BE32 (srcRec->textEncoding); + + srcRec->hl_firstLinkID = SWAP_BE32 (srcRec->hl_firstLinkID); + + srcRec->userInfo.fdType = SWAP_BE32 (srcRec->userInfo.fdType); + srcRec->userInfo.fdCreator = SWAP_BE32 (srcRec->userInfo.fdCreator); + srcRec->userInfo.fdFlags = SWAP_BE16 (srcRec->userInfo.fdFlags); + srcRec->userInfo.fdLocation.v = SWAP_BE16 (srcRec->userInfo.fdLocation.v); + srcRec->userInfo.fdLocation.h = SWAP_BE16 (srcRec->userInfo.fdLocation.h); + srcRec->userInfo.opaque = SWAP_BE16 (srcRec->userInfo.opaque); + + /* Don't swap srcRec->finderInfo */ + /* Don't swap srcRec->reserved2 */ + + hfs_swap_HFSPlusForkData (&srcRec->dataFork); + hfs_swap_HFSPlusForkData (&srcRec->resourceFork); + + } else if ((srcPtr[0] == kHFSPlusFolderThreadRecord) || + (srcPtr[0] == kHFSPlusFileThreadRecord)) { + + /* + * Make sure there is room for parentID and name length. + */ + HFSPlusCatalogThread *srcRec = (HFSPlusCatalogThread *)srcPtr; + if ((char *) &srcRec->nodeName.unicode[0] > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: catalog thread record #%d too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* Don't swap srcRec->reserved */ + + srcRec->parentID = SWAP_BE32 (srcRec->parentID); + + if (direction == kSwapBTNodeBigToHost) + srcRec->nodeName.length = SWAP_BE16 (srcRec->nodeName.length); + + /* + * Make sure there is room for the name in the buffer. + * Then swap the characters of the name itself. + */ + if ((char *) &srcRec->nodeName.unicode[srcRec->nodeName.length] > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: catalog thread record #%d name too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + for (j = 0; j < srcRec->nodeName.length; j++) { + srcRec->nodeName.unicode[j] = SWAP_BE16 (srcRec->nodeName.unicode[j]); + } + + if (direction == kSwapBTNodeHostToBig) + srcRec->nodeName.length = SWAP_BE16 (srcRec->nodeName.length); + + } else { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: unrecognized catalog record type (0x%04X; record #%d)\n", srcPtr[0], srcDesc->numRecords-i-1); + } + + /* We can swap the record type now that we're done using it. */ + if (direction == kSwapBTNodeHostToBig) + srcPtr[0] = SWAP_BE16 (srcPtr[0]); + } + + } else if (fileID == kHFSAttributesFileID) { + HFSPlusAttrKey *srcKey; + HFSPlusAttrRecord *srcRec; + u_int16_t keyLength; + u_int32_t attrSize = 0; + + for (i = 0; i < srcDesc->numRecords; i++) { + /* Point to the start of the record we're currently checking. */ + srcKey = (HFSPlusAttrKey *)((char *)src->buffer + srcOffs[i]); + + /* + * Point to start of next (larger offset) record. We'll use this + * to be sure the current record doesn't overflow into the next + * record. + */ + nextRecord = (char *)src->buffer + srcOffs[i-1]; + + /* Make sure there is room in the buffer for a minimal key */ + if ((char *) &srcKey->attrName[1] > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: attr key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* Swap the key length field */ + if (direction == kSwapBTNodeBigToHost) + srcKey->keyLength = SWAP_BE16(srcKey->keyLength); + keyLength = srcKey->keyLength; /* Keep a copy in native order */ + if (direction == kSwapBTNodeHostToBig) + srcKey->keyLength = SWAP_BE16(srcKey->keyLength); + + /* + * Make sure that we can safely dereference the record's type field or + * an index node's child node number. + */ + srcRec = (HFSPlusAttrRecord *)((char *)srcKey + keyLength + sizeof(srcKey->keyLength)); + if ((char *)srcRec + sizeof(u_int32_t) > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: attr key #%d too big (%d)\n", srcDesc->numRecords-i-1, keyLength); + WriteError(fcb->fcbVolume->vcbGPtr, E_KeyLen, fcb->fcbFileID, src->blockNum); + return E_KeyLen; + } + + srcKey->fileID = SWAP_BE32(srcKey->fileID); + srcKey->startBlock = SWAP_BE32(srcKey->startBlock); + + /* + * Swap and check the attribute name + */ + if (direction == kSwapBTNodeBigToHost) + srcKey->attrNameLen = SWAP_BE16(srcKey->attrNameLen); + /* Sanity check the attribute name length */ + if (srcKey->attrNameLen > kHFSMaxAttrNameLen || keyLength < (kHFSPlusAttrKeyMinimumLength + sizeof(u_int16_t)*srcKey->attrNameLen)) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: attr key #%d keyLength=%d attrNameLen=%d\n", srcDesc->numRecords-i-1, keyLength, srcKey->attrNameLen); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + for (j = 0; j < srcKey->attrNameLen; j++) + srcKey->attrName[j] = SWAP_BE16(srcKey->attrName[j]); + if (direction == kSwapBTNodeHostToBig) + srcKey->attrNameLen = SWAP_BE16(srcKey->attrNameLen); + + /* + * For index nodes, the record data is just the child's node number. + * Skip over swapping the various types of attribute record. + */ + if (srcDesc->kind == kBTIndexNode) { + *((UInt32 *)srcRec) = SWAP_BE32 (*((UInt32 *)srcRec)); + continue; + } + + /* Swap the record data */ + if (direction == kSwapBTNodeBigToHost) + srcRec->recordType = SWAP_BE32(srcRec->recordType); + switch (srcRec->recordType) { + case kHFSPlusAttrInlineData: + /* Is there room for the inline data header? */ + if ((char *) &srcRec->attrData.attrData[0] > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: attr inline #%d too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* We're not swapping the reserved fields */ + + /* Swap the attribute size */ + if (direction == kSwapBTNodeHostToBig) + attrSize = srcRec->attrData.attrSize; + srcRec->attrData.attrSize = SWAP_BE32(srcRec->attrData.attrSize); + if (direction == kSwapBTNodeBigToHost) + attrSize = srcRec->attrData.attrSize; + + /* Is there room for the inline attribute data? */ + if ((char *) &srcRec->attrData.attrData[attrSize] > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: attr inline #%d too big (attrSize=%u)\n", srcDesc->numRecords-i-1, attrSize); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* Not swapping the attribute data itself */ + break; + + case kHFSPlusAttrForkData: + /* Is there room for the fork data record? */ + if ((char *)srcRec + sizeof(HFSPlusAttrForkData) > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: attr fork data #%d too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* We're not swapping the reserved field */ + + hfs_swap_HFSPlusForkData(&srcRec->forkData.theFork); + break; + + case kHFSPlusAttrExtents: + /* Is there room for an extent record? */ + if ((char *)srcRec + sizeof(HFSPlusAttrExtents) > nextRecord) { + if (debug) plog("hfs_swap_HFSPlusBTInternalNode: attr extents #%d too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* We're not swapping the reserved field */ + + for (j = 0; j < kHFSPlusExtentDensity; j++) { + srcRec->overflowExtents.extents[j].startBlock = + SWAP_BE32(srcRec->overflowExtents.extents[j].startBlock); + srcRec->overflowExtents.extents[j].blockCount = + SWAP_BE32(srcRec->overflowExtents.extents[j].blockCount); + } + break; + default: + if (debug) plog ("hfs_swap_BTNode: unrecognized attribute record type (%d)\n", srcRec->recordType); + } + if (direction == kSwapBTNodeHostToBig) + srcRec->recordType = SWAP_BE32(srcRec->recordType); + } + } else { + plog("hfs_swap_HFSPlusBTInternalNode: fileID %u is not a system B-tree\n", fileID); + exit(99); + } + + return (0); +} + +static int +hfs_swap_HFSBTInternalNode ( + BlockDescriptor *src, + SFCB *fcb, + enum HFSBTSwapDirection direction +) +{ + HFSCatalogNodeID fileID =fcb->fcbFileID; + BTNodeDescriptor *srcDesc = src->buffer; + UInt16 *srcOffs = (UInt16 *)((char *)src->buffer + (src->blockSize - (srcDesc->numRecords * sizeof (UInt16)))); + char *nextRecord; /* Points to start of record following current one */ + + int32_t i; + UInt32 j; + + if (fileID == kHFSExtentsFileID) { + HFSExtentKey *srcKey; + HFSExtentDescriptor *srcRec; + size_t recordSize; /* Size of the data part of the record, or node number for index nodes */ + + if (srcDesc->kind == kBTIndexNode) + recordSize = sizeof(UInt32); + else + recordSize = sizeof(HFSExtentDescriptor); + + for (i = 0; i < srcDesc->numRecords; i++) { + /* Point to the start of the record we're currently checking. */ + srcKey = (HFSExtentKey *)((char *)src->buffer + srcOffs[i]); + + /* + * Point to start of next (larger offset) record. We'll use this + * to be sure the current record doesn't overflow into the next + * record. + */ + nextRecord = (char *)src->buffer + srcOffs[i-1]; + + /* + * Make sure the key and data are within the buffer. Since both key + * and data are fixed size, this is relatively easy. Note that this + * relies on the keyLength being a constant; we verify the keyLength + * below. + */ + if ((char *)srcKey + sizeof(HFSExtentKey) + recordSize > nextRecord) { + if (debug) plog("hfs_swap_HFSBTInternalNode: extents key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* Don't swap srcKey->keyLength (it's only one byte), but do sanity check it */ + if (srcKey->keyLength != sizeof(*srcKey) - sizeof(srcKey->keyLength)) { + if (debug) plog("hfs_swap_HFSBTInternalNode: extents key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength); + } + + /* Don't swap srcKey->forkType; it's only one byte */ + + srcKey->fileID = SWAP_BE32 (srcKey->fileID); + srcKey->startBlock = SWAP_BE16 (srcKey->startBlock); + + /* Point to record data (round up to even byte boundary) */ + srcRec = (HFSExtentDescriptor *)((char *)srcKey + ((srcKey->keyLength + 2) & ~1)); + + if (srcDesc->kind == kBTIndexNode) { + /* For index nodes, the record data is just a child node number. */ + *((UInt32 *)srcRec) = SWAP_BE32 (*((UInt32 *)srcRec)); + } else { + /* Swap the extent data */ + for (j = 0; j < kHFSExtentDensity; j++) { + srcRec[j].startBlock = SWAP_BE16 (srcRec[j].startBlock); + srcRec[j].blockCount = SWAP_BE16 (srcRec[j].blockCount); + } + } + } + + } else if (fileID == kHFSCatalogFileID || fileID == kHFSRepairCatalogFileID) { + HFSCatalogKey *srcKey; + SInt16 *srcPtr; + size_t expectedKeyLength; + + for (i = 0; i < srcDesc->numRecords; i++) { + /* Point to the start of the record we're currently checking. */ + srcKey = (HFSCatalogKey *)((char *)src->buffer + srcOffs[i]); + + /* + * Point to start of next (larger offset) record. We'll use this + * to be sure the current record doesn't overflow into the next + * record. + */ + nextRecord = (char *)src->buffer + srcOffs[i-1]; + + /* + * Make sure we can safely dereference the keyLength and parentID fields. + * The value 8 below is 1 bytes for keyLength + 1 byte reserved + 4 bytes + * for parentID + 1 byte for nodeName's length + 1 byte to round up the + * record start to an even offset, which forms a minimal key. + */ + if ((char *)srcKey + 8 > nextRecord) { + if (debug) plog("hfs_swap_HFSBTInternalNode: catalog key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* Don't swap srcKey->keyLength (it's only one byte), but do sanity check it */ + if (srcKey->keyLength < kHFSCatalogKeyMinimumLength || srcKey->keyLength > kHFSCatalogKeyMaximumLength) { + if (debug) plog("hfs_swap_HFSBTInternalNode: catalog key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength); + } + + /* Don't swap srcKey->reserved */ + + srcKey->parentID = SWAP_BE32 (srcKey->parentID); + + /* Don't swap srcKey->nodeName */ + + /* Make sure the keyLength is big enough for the key's content */ + if (srcDesc->kind == kBTIndexNode) + expectedKeyLength = sizeof(*srcKey) - sizeof(srcKey->keyLength); + else + expectedKeyLength = srcKey->nodeName[0] + kHFSCatalogKeyMinimumLength; + if (srcKey->keyLength < expectedKeyLength) { + if (debug) plog("hfs_swap_HFSBTInternalNode: catalog record #%d keyLength=%u expected=%u\n", + srcDesc->numRecords-i, srcKey->keyLength, expectedKeyLength); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* Point to record data (round up to even byte boundary) */ + srcPtr = (SInt16 *)((char *)srcKey + ((srcKey->keyLength + 2) & ~1)); + + /* + * Make sure that we can safely dereference the record's type field or + * and index node's child node number. + */ + if ((char *)srcPtr + sizeof(UInt32) > nextRecord) { + if (debug) plog("hfs_swap_HFSBTInternalNode: catalog key #%d too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* + * For index nodes, the record data is just the child's node number. + * Skip over swapping the various types of catalog record. + */ + if (srcDesc->kind == kBTIndexNode) { + *((UInt32 *)srcPtr) = SWAP_BE32 (*((UInt32 *)srcPtr)); + continue; + } + + /* Make sure the recordType is in native order before using it. */ + if (direction == kSwapBTNodeBigToHost) + srcPtr[0] = SWAP_BE16 (srcPtr[0]); + + if (srcPtr[0] == kHFSFolderRecord) { + HFSCatalogFolder *srcRec = (HFSCatalogFolder *)srcPtr; + if ((char *)srcRec + sizeof(*srcRec) > nextRecord) { + if (debug) plog("hfs_swap_HFSBTInternalNode: catalog folder record #%d too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + srcRec->flags = SWAP_BE16 (srcRec->flags); + srcRec->valence = SWAP_BE16 (srcRec->valence); + + srcRec->folderID = SWAP_BE32 (srcRec->folderID); + srcRec->createDate = SWAP_BE32 (srcRec->createDate); + srcRec->modifyDate = SWAP_BE32 (srcRec->modifyDate); + srcRec->backupDate = SWAP_BE32 (srcRec->backupDate); + + /* The only field we use in srcRec->userInfo is frFlags (used in VLockedChk). */ + srcRec->userInfo.frFlags = SWAP_BE16 (srcRec->userInfo.frFlags); + + /* Don't swap srcRec->finderInfo */ + /* Don't swap resserved array */ + + } else if (srcPtr[0] == kHFSFileRecord) { + HFSCatalogFile *srcRec = (HFSCatalogFile *)srcPtr; + if ((char *)srcRec + sizeof(*srcRec) > nextRecord) { + if (debug) plog("hfs_swap_HFSBTInternalNode: catalog file record #%d too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + srcRec->flags = srcRec->flags; + srcRec->fileType = srcRec->fileType; + + /* Don't swap srcRec->userInfo */ + + srcRec->fileID = SWAP_BE32 (srcRec->fileID); + + srcRec->dataStartBlock = SWAP_BE16 (srcRec->dataStartBlock); + srcRec->dataLogicalSize = SWAP_BE32 (srcRec->dataLogicalSize); + srcRec->dataPhysicalSize = SWAP_BE32 (srcRec->dataPhysicalSize); + + srcRec->rsrcStartBlock = SWAP_BE16 (srcRec->rsrcStartBlock); + srcRec->rsrcLogicalSize = SWAP_BE32 (srcRec->rsrcLogicalSize); + srcRec->rsrcPhysicalSize = SWAP_BE32 (srcRec->rsrcPhysicalSize); + + srcRec->createDate = SWAP_BE32 (srcRec->createDate); + srcRec->modifyDate = SWAP_BE32 (srcRec->modifyDate); + srcRec->backupDate = SWAP_BE32 (srcRec->backupDate); + + /* Don't swap srcRec->finderInfo */ + + srcRec->clumpSize = SWAP_BE16 (srcRec->clumpSize); + + /* Swap the two sets of extents as an array of six (three each) UInt16 */ + for (j = 0; j < kHFSExtentDensity * 2; j++) { + srcRec->dataExtents[j].startBlock = SWAP_BE16 (srcRec->dataExtents[j].startBlock); + srcRec->dataExtents[j].blockCount = SWAP_BE16 (srcRec->dataExtents[j].blockCount); + } + + /* Don't swap srcRec->reserved */ + + } else if ((srcPtr[0] == kHFSFolderThreadRecord) || + (srcPtr[0] == kHFSFileThreadRecord)) { + HFSCatalogThread *srcRec = (HFSCatalogThread *)srcPtr; + + /* Make sure there is room for parentID and name length */ + if ((char *) &srcRec->nodeName[1] > nextRecord) { + if (debug) plog("hfs_swap_HFSBTInternalNode: catalog thread record #%d too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + + /* Don't swap srcRec->reserved array */ + + srcRec->parentID = SWAP_BE32 (srcRec->parentID); + + /* Don't swap srcRec->nodeName */ + + /* Make sure there is room for the name in the buffer */ + if ((char *) &srcRec->nodeName[srcRec->nodeName[0]] > nextRecord) { + if (debug) plog("hfs_swap_HFSBTInternalNode: catalog thread record #%d name too big\n", srcDesc->numRecords-i-1); + WriteError(fcb->fcbVolume->vcbGPtr, E_BadNode, fcb->fcbFileID, src->blockNum); + return E_BadNode; + } + } else { + if (debug) plog("hfs_swap_HFSBTInternalNode: unrecognized catalog record type (0x%04X; record #%d)\n", srcPtr[0], srcDesc->numRecords-i-1); + } + + /* We can swap the record type now that we're done using it */ + if (direction == kSwapBTNodeHostToBig) + srcPtr[0] = SWAP_BE16 (srcPtr[0]); + } + + } else { + plog("hfs_swap_HFSBTInternalNode: fileID %u is not a system B-tree\n", fileID); + exit(99); + } + + return (0); +} diff --git a/fsck_hfs/dfalib/hfs_endian.h b/fsck_hfs/dfalib/hfs_endian.h new file mode 100755 index 0000000..923d7fd --- /dev/null +++ b/fsck_hfs/dfalib/hfs_endian.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2000, 2005 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __HFS_ENDIAN_H__ +#define __HFS_ENDIAN_H__ + +/* + * hfs_endian.h + * + * This file prototypes endian swapping routines for the HFS/HFS Plus + * volume format. + */ +#include <hfs/hfs_format.h> +#include <libkern/OSByteOrder.h> +#include "SRuntime.h" + +/*********************/ +/* BIG ENDIAN Macros */ +/*********************/ +#define SWAP_BE16(__a) OSSwapBigToHostInt16 (__a) +#define SWAP_BE32(__a) OSSwapBigToHostInt32 (__a) +#define SWAP_BE64(__a) OSSwapBigToHostInt64 (__a) + +#if BYTE_ORDER == BIG_ENDIAN + + /* HFS is always big endian, no swapping needed */ + #define SWAP_HFSMDB(__a) + #define SWAP_HFSPLUSVH(__a) + +/************************/ +/* LITTLE ENDIAN Macros */ +/************************/ +#elif BYTE_ORDER == LITTLE_ENDIAN + + #define SWAP_HFSMDB(__a) hfs_swap_HFSMasterDirectoryBlock ((__a)) + #define SWAP_HFSPLUSVH(__a) hfs_swap_HFSPlusVolumeHeader ((__a)) + +#else +#warning Unknown byte order +#error +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Constants for the "unswap" argument to hfs_swap_BTNode: + */ +enum HFSBTSwapDirection { + kSwapBTNodeBigToHost = 0, + kSwapBTNodeHostToBig = 1, + + /* + * kSwapBTNodeHeaderRecordOnly is used to swap just the header record + * of a header node from big endian (on disk) to host endian (in memory). + * It does not swap the node descriptor (forward/backward links, record + * count, etc.). It assumes the header record is at offset 0x000E. + * + * Since HFS Plus doesn't have fixed B-tree node sizes, we have to read + * the header record to determine the actual node size for that tree + * before we can set up the B-tree control block. We read it initially + * as 512 bytes, then re-read it once we know the correct node size. Since + * we may not have read the entire header node the first time, we can't + * swap the record offsets, other records, or do most sanity checks. + */ + kSwapBTNodeHeaderRecordOnly = 3 +}; + +void hfs_swap_HFSMasterDirectoryBlock (void *buf); +void hfs_swap_HFSPlusVolumeHeader (void *buf); +int hfs_swap_BTNode (BlockDescriptor *src, SFCB *fcb, enum HFSBTSwapDirection direction); + +#ifdef __cplusplus +} +#endif + +#endif /* __HFS_FORMAT__ */ diff --git a/fsck_hfs/dfalib/uuid.c b/fsck_hfs/dfalib/uuid.c new file mode 100644 index 0000000..a0b2457 --- /dev/null +++ b/fsck_hfs/dfalib/uuid.c @@ -0,0 +1,65 @@ +#include <fcntl.h> +#include <util.h> +#include <unistd.h> +#include <string.h> +#include <sys/mount.h> +#include <uuid/uuid.h> +#include <IOKit/IOBSD.h> +#include <IOKit/IOKitLib.h> +#include <IOKit/storage/IOMedia.h> + +extern char debug; +extern void plog(const char *, ...); + +/* + * Given a uuid string, look up the BSD device and open it. + * This code comes from DanM. + * + * Essentially, it is given a UUID string (from the journal header), + * and then looks it up via IOKit. From there, it then gets the + * BSD name (e.g., /dev/dsik3), and opens it read-only. + * + * It returns the file descriptor, or -1 on error. + */ +int +OpenDeviceByUUID(void *uuidp, char **namep) +{ + char devname[ MAXPATHLEN ]; + CFStringRef devname_string; + int fd = -1; + CFMutableDictionaryRef matching; + io_service_t media; + uuid_string_t uuid_cstring; + CFStringRef uuid_string; + + memcpy(&uuid_cstring, uuidp, sizeof(uuid_cstring)); + + uuid_string = CFStringCreateWithCString( kCFAllocatorDefault, uuid_cstring, kCFStringEncodingUTF8 ); + if ( uuid_string ) { + matching = IOServiceMatching( kIOMediaClass ); + if ( matching ) { + CFDictionarySetValue( matching, CFSTR( kIOMediaUUIDKey ), uuid_string ); + media = IOServiceGetMatchingService( kIOMasterPortDefault, matching ); + if ( media ) { + devname_string = IORegistryEntryCreateCFProperty( media, CFSTR( kIOBSDNameKey ), kCFAllocatorDefault, 0 ); + if ( devname_string ) { + if ( CFStringGetCString( devname_string, devname, sizeof( devname ), kCFStringEncodingUTF8 ) ) { + if (debug) + plog("external journal device name = `%s'\n", devname); + + fd = opendev( devname, O_RDONLY, 0, NULL ); + if (fd != -1 && namep != NULL) { + *namep = strdup(devname); + } + } + CFRelease( devname_string ); + } + IOObjectRelease( media ); + } + /* do not CFRelease( matching ); */ + } + CFRelease( uuid_string ); + } + + return fd; +} diff --git a/fsck_hfs/docs/fsck_gui_interface_design.rtf b/fsck_hfs/docs/fsck_gui_interface_design.rtf new file mode 100644 index 0000000..37fb883 --- /dev/null +++ b/fsck_hfs/docs/fsck_gui_interface_design.rtf @@ -0,0 +1,881 @@ +{\rtf1\mac\ansicpg10000\cocoartf824\cocoasubrtf330 +{\fonttbl\f0\fnil\fcharset77 LucidaGrande-Bold;\f1\fnil\fcharset77 LucidaGrande;} +{\colortbl;\red255\green255\blue255;\red0\green0\blue255;\red255\green0\blue0;} +{\info +{\author Puja Gupta} +{\*\company Apple Computer}}\margl1440\margr1440\vieww17160\viewh15500\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b\fs36 \cf0 \ul \ulc0 FSCK - GUI CLIENT INTERFACE DESIGN +\fs24 \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f1\b0 \cf0 \ulnone Last Modified: 04/06/2006.\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 \ +The documents uses fsck collectively for any file system check programs like fsck_hfs, fsck_msdos, fsck_ufs, etc. Any GUI client like Disk Utility that displays the output of fsck in GUI or parses the GUI output of fsck is termed as GUI or client.\ +\ +\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b\fs32 \cf0 \ul AIMS +\fs24 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 \ulnone This document describes the design of interaction interface between fsck and any GUI client (like Disk Utility or other program). The aims for this change are:\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 \ +1. Provide flexibility in fsck to add/change message strings in the fsck output after the project's UI freeze.\ + a. Make it possible to hide extra details of disk verify/repair operation from normal user.\ + b. Provide all details of disk verify/repair\'caoperation for expert users, developers, AppleCare when required.\ +\ +2. Provide a sustainable and extensible interface for communication between fsck and client.\ + a. Standardize the message format sent from fsck to client.\ + b. Provide more information like type of operation, name of corruption files in the message being sent from fsck to client. c. Provide flexibility to add a new message type or information from fsck to client easily.\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural\pardirnatural +\cf0 3. Provide an output that other tools can parse.\ + a. Standardized output for general messages (verifying, repairing, success/fail result, etc.)\ + b. Tools need not know the file system type in order to parse the output\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural\pardirnatural +\cf0 4. Be able to support file systems whose fsck behaves differently than fsck_hfs.\ + a. Make their messages localizable in Disk Utility\ + b. Make their output look more like fsck_hfs (user interface consistency)\ + c. Allow ] reporting\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf0 Description +\f1\b0 \ +If we look closely, requirement (1a) contradicts (1b).\'ca Therefore we propose a multi-level fsck GUI output which will tag every message from fsck with a verbosity level. This is described in more details later with example.\cf2 \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 The design aims towards an independent interface between fsck and client which does not require string comparison and provide more information about the type of message being sent from fsck. Due to ease of extensibility, we use XML format. The output generated will be well-formed and valid XML format. If a client wants to display the output of fsck in realtime, it will have to parse the XML output in real time. Note that all fsck will generate this output on a -g option (GUI option).\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 fsck will generate an XML output with a standard set of XML tags for achieving a sustainable and extensible communication interface. All fscks will generate similar XML output which the client will be able to parse. If a new message is added to fsck which belongs to existing message type, no change in the client code will be required. If a new message is added to fsck which does not belong to existing message types, a new XML tag will be created and the client will have to change to recognize it. \ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 Aims (3) and (4) are bonus as (3) is similar to aim (2) and will be achieved if we fix (2). Aim (4) deals more with standardizing multiple fscks (fsck_hfs, fsck_msdos, fsck_ufs, etc). In the process of converting fscks to the new format described in this design document, we will be achieving (4) (except for 4c).\ +\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b\fs32 \cf0 \ul MUTLI-LEVEL FSCK GUI OUTPUT +\fs30 \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f1\b0\fs24 \cf0 \ulnone All messages sent by fsck to client will have a verbosity level associated with it. The verbosity level allows a client to filter its output; for example, it might choose to only show level 0 messages to a naive user, but log messages of all levels.\cf2 \ +\cf0 \ +Currently there will be two verbosity levels of client output:\ +a. level 0 or default level and \ +b. level 1 or expert level.\ +\ +Verbosity level 0 includes generic status messages such as verify, repair, success and fail. The text of the message displayed in level 0 will be consistent across all volume types. Each message has a unique positive number so that engineering or technical support can identify the message even if it has been localized. The message numbers at level 0 will be consistent across all volume types. Verbosity level 1 provides details about the verify and repair operation including error messages. Every volume format will define the message text and message number displayed in level 1 independent from other volume formats. \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 \ul \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\fs28 \cf0 VERBOSITY LEVEL 0: +\f1\b0\fs24 \ulnone \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf0 What strings must be in level 0? +\f1\b0 \ +1. Strings that indicate generic status of verify/repair operation and its final result independent of file system type.\ +2. Strings that display information about how repair operation has changed the user-level view of the file system and its data. \ +3. Strings that make sense to grandma-like users.\ +\ + +\f0\b Where do level 0 strings exist? +\f1\b0 \ +All level 0 strings are pre-defined, and all fscks must use these pre-defined strings for all level 0 output. No other strings must be identified as level 0. Any given fsck may output only a subset of the level 0 strings. Similar to level 0 strings, level 0 message numbers are also pre-defined and all fscks must use these pre-defined values.\ +\ + +\f0\b What are the valid message numbers for level 0 strings? +\f1\b0 \ +All level 0 strings will have pre-defined values for message numbers. The message numbers in range 1-99 are reserved for level 0 strings. \ +\ +The pre-defined level 0 strings and message numbers will be located in common location and will be localized.\ +\ + +\f0\b Which are the common strings? +\f1\b0 \ + "Checking volume.",\ +\'ca \'ca "Rechecking volume.",\ +\'ca "Repairing volume.",\ +\'ca \'ca "The volume appears to be OK.",\ +\'ca \'ca "The volume was repaired successfully.", \ +\'ca \'ca "The volume could not be verified completely.",\ + "The volume could not be verified completely and can not be repaired.",\ + "The volume was found corrupt and can not be repaired.",\ + "The volume was found corrupt and needs to be repaired.",\ + "The volume could not be repaired.", \ + +\f0\b +\f1\b0 "The volume cannot be repaired when it is in use.",\ + "The volume cannot be verified when it is in use.",\cf3 \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 "%s may be damaged.",\ + "%s could not be repaired.",\ + "Look for missing items in %s directory.",\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf3 +\f1\b0 \cf0 "Look for links to corrupt files in %s directory."\ +\ +(Note: We can come up with user-friendly versions of the above strings).\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf0 Note: +\f1\b0 \ + 1. The above messages are classified in different types (described later).\ + 2. All\'cafsck must print "Repairing volume" in level 0 when it starts the first repair on the disk. \ + 3. The parameter in the following messages must be a file system object and of type "path" (described later):\ + "%s may be damaged." \ + "%s could not be repaired." \ + 4. The set of pre-defined strings do not have a parameter for volume name in its strings because all file systems do not require a volume to have a volume name. This means that we will have to support two incarnations of every strings - one with volume name and one without volume name. Therefore we choose not to display volume name in level 0 strings. \ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 \ul Example:\ulnone The new output in verbosity level 0 in client will be:\ +Checking volume.\ +File /Users/foo/myfile may be damaged.\ +File /Users/bar/otherfile may be damaged.\ +Repairing volume.\ +Look for links to corrupt files in DamagedFiles directory.\ +Rechecking volume.\ +The volume was repaired successfully.\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf3 \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\fs28 \cf0 \ul VERBOSITY LEVEL 1: +\f1\b0\fs24 \ulnone \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf0 What strings must be in level 1? +\f1\b0 \ +1. All file system specific messages for detailed information about verify/repair operation as well as error messages.\ +\ +Level 1 includes messages like verify, repair, error, debug, etc. The text of message will be chosen by fsck implementation and will be independent across different file system types. Every unique message string displayed in this level will have a unique number associated with it. Every implementation of fsck can choose any unique number independently. Therefore a message number 234 in fsck_hfs might not mean the same message for message number 234 in fsck_msdos. But once a message number is defined, its meaning must not be changed in future versions. The location of level 1 strings and message numbers of every file system is left to individual fsck implementation.\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf3 \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 The strings displayed in level 0 do not support volume name. All fsck must send details about fsck and corresponding file system it is verifying/repairing including volume name to the client. A message number 100 is reserved for this message. This is described in more detail in message type INFORMATION.\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf0 What are the valid message numbers for level 1 strings? +\f1\b0 \ +Numbers 1-99 are reserved for level 0 strings. Message number 100 is reserved for fsck information message (described later). fsck can choose a positive number greater than 100 for level 1 strings.\ +\ +Example: Listed below is an example fsck output. The message string is prefixed by the message number. Note that the numbers displayed are currently representative numbers and may not be same after implementation. Level 1 messages are distinguished from level 0 message with an extra tab on the output line. Note that this is not the literal fsck output and indentation is only to distinguish between different levels. It does not represent the way in which fsck or client should display their output.\cf2 \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 \ +045 Checking volume. +\f0\b \ + +\f1\b0 101 Checking HFS Plus volume.\ + 102 Checking Extents Overflow file.\ + 103 Checking Catalog file.\ + 500 Incorrect block count for file 1\ + 559 (It should be 1266 instead of 1272) \ + 511 Overlapped extent allocation (file 1203 /Users/foo/myfile)\ + 055 File /Users/foo/myfile may be damaged.\ + 511 Overlapped extent allocation (file 1203 /Users/bar/otherfile)\ + 055 File /Users/foo/myfile may be damaged.\ + 104 Checking Catalog hierarchy.\ + 106 Checking volume bitmap.\ + 556 Volume Bit Map needs minor repair\ + 108 Checking volume information.\ + 554 Invalid volume free block count\ + 559 (It should be 235411 instead of 235405)\ + 600 Verify Status: VIStat = 0xa800, ABTStat = 0x0000 EBTStat = 0x0000 CBTStat = 0x0000 CatStat = 0x0020\ +013 Repairing volume.\ +019 Rechecking volume.\ + 101 Checking HFS Plus volume.\ + 102 Checking Extents Overflow file.\ + 103 Checking Catalog file.\ + 104 Checking Catalog hierarchy.\ + 106 Checking volume bitmap.\ + 108 Checking volume information.\ +016 The volume was repaired successfully.\ +\ +\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b\fs32 \cf0 \ul XML COMMUNICATION INTERFACE +\f1\b0\fs24 \ulnone \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 fsck will generate an XML output only when called with "-g" option for GUI output.\cf2 \cf0 A message is a set of information like message type, message string, message number, etc which describes state/action of fsck. The XML chosen will be the standard plist ("Property List") uncompressed format. Each message will begin with "<plist version=\\"1.0\\">", and each message will end with "</plist>\\n". Newlines may appear between the two tags.\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b\fs28 \cf0 \ul XML KEYS +\fs24 \ulnone \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 The producer and consumer for XML format must agree on the XML tags (or keys) that they recognize. These keys will be provided as a part of common header file in /usr/local/include/fsck_common.h.\ +\ +\pard\pardeftab720\ql\qnatural +\cf0 The output consists of a single <plist> element which shall contain a single <dict> (dictionary) element.\'ca Inside that element are one or more <key> elements. Messages always have one key with the name "fsck_type." The messages describing percent progress have a value of "fsck_progress" for the "fsck_type" key. All other types of messages have several keys, including "verbosity", "fsck_msg_number", "fsck_msg_string" and an optional "parameter".\'ca The parameter element has a value component that is an array; each element of the array may be a <string> or <integer>, or a <dict> with a single key-value pair.\ +\ +Details about different keys are as follows:\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 fsck_type +\f1\b0 \ + This key describes the type of the message being sent. Different types of messages are described in detail later.\ + This key-value pair is required in a message.\ +\ + +\f0\b fsck_progress\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 This key describes the percentage progress of the verify/repair operation.\cf3 \cf0 The value for this key indicates a non-negative integer with a value between 0 and 100.\ + This key is the only key that must be present in the progress indicator messages (except "type") and must exist only as child of "message". This key must not exist in any other type of messages. \ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 fsck_msg_string \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 This key describes the string that needs to be displayed in the client. Some message strings will require parameter substitutions which is described in detail later. The associated value is a <string> object.\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 fsck_msg_number \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 This key describes the message number associated with the "msg_string" in the message. This message number is an unique unsigned 32-bit integer for every unique string. Every implementation of fsck can choose any unique number independently. All message numbers sent by fsck will be positive integer. Therefore a message number 234 in fsck_hfs might not mean the same message for message number 234 in fsck_msdos. The associated value is an <integer> object.\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 fsck_verbosity +\f1\b0 \ + This key describes the verbosity level in which the current message must be displayed in the client. This key-value pair must be sent by fsck with an exception of messages for percentage progress indicator. The verbosity level will be described as an <integer> object.\ +\ + +\f0\b parameters +\f1\b0 \ + fsck does not localize strings itself but sends the strings in the same format as they exist in localized files (i.e. without parameters) to the client. This key describes the parameters to substitute instead of "%@" or "%n$@" where 'n' is the parameter number generated in localized strings in the message strings in "msg_string". A message string can have multiple parameters therefore the values are sent in the order they must be replaced. The value associated with this key is an <array>; the elements of the array may be <string> or <integer>, or they may be <dict> objects with a single <key> and a corresponding value of either a <string> or an <integer>.\ + This key is optional; if present, it may contain no elements.\ +\ + +\f0\b fstype +\f1\b0 \ + This key describes the type of file system being checked by fsck as a UTF-8 string. The value can also specify a sub-type of file system to provide details. For example, fsck_hfs can send the values for this key as "HFS+", "Journaled HFS+", "Case-sensitive Journaled HFS+", etc. Every fsck implementation can choose the text of the string to be displayed.\ + This key-value pair is optional.\ + \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 \ul XML FORMAT\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 \ulnone The output generated by fsck will be well-formed and valid plist XML format. If a client wants to display the output of fsck in realtime, it will have to parse the XML output in real time. The start tag for each individual message will be "<plist version=\\"1.0\\">"; the end tag will be "</plist>\\n"; there may be zero more newlines between the tags. There will be only one newline between two different messages and the client must not assume any extra newlines (i.e. between </plist> and the next <plist version=\\"1.0\\">).\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 \ul SUMMARY OF REQUIRED AND OPTIONAL KEY-VALUE PAIRS +\f1\b0 \ulnone \ +Listed below are the required key-value pairs in the messages. Any other key-value pair are optional. +\f0\b \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 \ul \ +For all messages:\ulnone \ +fsck_type\ +\ul \ +For percent progress indicator messages:\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 \ulnone fsck_type\ +parameters\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 \ul For all other messages:\ulnone \ +fsck_msg_string\ +fsck_msg_number\ +fsck_verbosity\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 \ul SUMMARY OF RELATIONSHIP BETWEEN DIFFERENT KEYS +\f1\b0 \ulnone \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 \ul For percent progress indicator messages:\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 \ulnone plist\ + |- dict\ + |- fsck_type\ + |- parameters\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 \ul For all other messages:\ulnone \ +plist\ + |- dict\ + |- fsck_type\ + |- fsck_verbosity\ + |- fsck_msg_number\ + |- fsck_msg_string\ + |- parameters\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b\fs32 \cf0 \ul MESSAGE TYPES +\fs24 \ulnone \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 The messages sent from fsck can be classified into eight types. A message can only belong to one message type. The sequence of messages sent by fsck to client is independent of the type of message. Client must not expect any particular order of messages, like a repair message always preceded by a verify message. \ +\ +fsck must classify the messages based on the following guildelines and must send correct message type to the client. For example, fsck must never send any repair message to client if no repairs were ever performed on the disk.\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 1. VERIFY\ +\pard\pardeftab720\ql\qnatural + +\f1\b0 \cf0 \'ca\'ca \'caThis type indicates that fsck is performing a read-only operation to either check or to prepare to check the file system. \'ca\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 Some fsck support option "-n" and "-y" to force answers like "no" and "yes" for any intermediate questions asked during verify operation. This message type is independent of these options and must only be sent for message string describing read-only check. \ + This is an optional message type as fsck implementation can choose to repair a volume directly. This message type can exist in all verbosity levels.\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 Example:\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>0</integer>\ + <key>fsck_msg_number</key> <integer> 101 </integer>\ + <key>fsck_msg_string</key> <string>Checking volume.</string>\ + </dict>\ +</plist>\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 2. REPAIR\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 This type indicates that fsck is writing to file system to repair a corruption. \ + This type does not describe messages when fsck is writing to file system for any other reason than repairing the file system. These type of messages are classified as information messages (described later). For example, if the disk has a "last verified date" that is updated even if no problems are found.\'ca Or a journal might be replayed, and no errors are subsequently found.\ + Some fsck support option "-n" and "-y" to force answers like "no" and "yes" for any intermediate questions asked during repair operation. This message type is independent of these options and must only be sent for message string describing write operation on the file system. All\'cafsck must print "Repairing volume" in verbosity level 0 when it starts the first repair on the disk.\ + This is an optional message type as repairing a file system is not required always. This message type can exist in all verbosity levels.\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 Example:\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>REPAIR</string>\ + <key>fsck_verbosity</key> <integer>0</integer>\ + <key>fsck_msg_number</key> <integer>113</integer>\ + <key>fsck_msg_string</key><string>Repairing volume.</string>\ + </dict>\ +</plist>\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf0 3. SUCCESS\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f1\b0 \cf0 This type indicates one of the following:\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 1. verify operation succeeded and found that the volume is clean.\ + 2. repair operation has repaired the volume successfully.\ + The last message output by fsck must be of type SUCCESS or FAIL, to indicate whether the verify or repair operation completed successfully.\'ca No other message may be of type SUCCESS or FAIL. This message type must have verbosity level 0.\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 Example:\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key><string>SUCCESS</string>\ + <key>fsck_verbosity</key><integer>0</integer>\ + <key>fsck_msg_number</key> <integer>116</integer>\ + <key>fsck_msg_string</key><string>Volume was repaired successfully.</string>\ + </dict>\ +</plist>\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf0 4. FAIL +\f1\b0 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 This type indicates one of the following:\ + 1. fsck was told to verify but not repair and has found that the volume is corrupt\ + 2. verify operation was not completed due to error and fsck cannot determine if the volume is clean or corrupt.\ + 3. repair operation has failed.\ + The last message output by fsck must be of type SUCCESS or FAIL, to indicate whether the verify or repair operation completed successfully.\'ca No other message may be of type SUCCESS or FAIL. This message type must have verbosity level 0.\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 \ +Example:\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>FAIL</string>\ + <key>fsck_verbosity</key> <integer>0</integer>\ + <key>fsck_msg_number</key> <integer>113</integer>\ + <key>fsck_msg_string</key><string>Volume needs to be repaired.</string>\ + </dict>\ +</plist>\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf0 5. ERROR\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 This type indicates a message about one of the following:\ + 1. type of corruption causing fsck to conclude that the file system is corrupt\ + 2. type of condition that causes verify/repair operation to fail.\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 +\f1\b0 This is an optional message. This message type must never have verbosity level 0.\ + +\f0\b \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 Example:\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key><string>ERROR</string>\ + <key>fsck_verbosity</key><integer>1</integer>\ + <key>fsck_msg_number</key><integer>556</integer>\ + <key>fsck_msg_string</key> <string>Volume Bit Map needs minor repair.</string>\ + </dict>\ +</plist>\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf0 7. DAMAGEINFO\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f1\b0 \cf0 This type gives more information about one of the following:\ + 1. name/path of the file system objects that verify operation found corrupt\ + 2. name/path of the file system objects that repair operation repaired/could not repair.\ + 3. more information about how repair operation has changed user's view of file system and/or its data.\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 +\f1\b0 This is an optional message. This message type must always have verbosity level 0.\ +\ +Example:\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>DAMGEINFO</string>\ + <key>fsck_verbosity</key> <integer>0</integer>\ + <key>fsck_msg_number</key> <integer>657</integer>\ + <key>fsck_msg_string</key> <string>File %1$@ may be damaged.</string>\ + <key>parameters</key>\ + <array>\ + <string>/Users/foo/myfile</string>\ + </array>\ + </dict>\ +</plist>\ + +\f0\b \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 7. INFORMATION\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f1\b0 \cf0 This type indicates one of the following:\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 1. information about an error message.\ + 2. information/result of any fsck operation or any of its intermediate state.\ + This type includes messages when fsck is writing to file system for any other reason than repairing the file system. For example, if the disk has a "last verified date" that is updated even if no problems are found.\'ca Or a journal might be replayed, and no errors are subsequently found.\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 +\f1\b0 This is an optional message. This message type must never have verbosity level 0.\ + +\f0\b \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 Example:\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key><string>INFORMATION</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>559</integer>\ + <key>fsck_msg_string</key> <string>(It should be %1$% instead of %2$@)</string>\ + <key>parameters</key>\ + <array>\ + <integer>1266</integer>\ + <integer>1272</integer>\ + </dict>\ +</plist>\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 \ + +\f1\b0 All fsck must send an information message with verbosity level 1 and message number 100 to summarize details about fsck verify/repair operation to facilitate debugging. The information sent will be file used for verify/repair, the version number for that file, name of the volume, if available, and the type of file system. There is no specific requirement about the order of occurrence of this message but it must be sent as early as possible in the verify/repair operation i.e. as soon as fsck gets all information that needs to be sent. All fsck must send the following string: \ +\ + "Using %1$@ (version %2$@) for checking volume %3$@ of type %4$@." \ +where,\ + the first parameter is the name of binary file used for verify/repair operation, \ + the second parameter is the source number used to generate the binary (like diskdev_cmds-332.5, msdosfs-90), \ + the third parameter is the name of the volume, and \ + the last parameter is the type of file system (like Journaled HFS+, MSDOS (FAT16), etc). \ +\ + All fsck must use the same message number 100 for this string. If fsck can not obtain one or more information, it should send NULL value (i.e. just start and end tags) for that parameter.\ +\ +Example:\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key><string>INFORMATION</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>100</integer>\ + <key>fsck_msg_string</key> <string> Using %1$@ (version %2$@) for checking volume %3$@ of type %4$@.</string>\ + <key>parameters</key>\ + <array>\ + <string>fsck_hfs</string>\ + <string>diskdev_cmds-334.3</string>\ + <string>MyVol</string>\ + <string>journaled HFS+</string>\ + </array>\ + </dict>\ +</plist>\ +\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key><string>INFORMATION</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>100</integer>\ + <key>fsck_msg_string</key> <string> Using %1$@ (version %2$@) for checking volume %3$@ of type %4$@.</string>\ + <key>parameters</key>\ + <array>\ + <string>fsck_msdos</string>\ + <string>msdosfs-90</string>\ + <string></string>\ + <string>MSDOS (FAT16)</string>\ + </array>\ + </dict>\ +</plist>\ + +\f0\b \ +8. fsck_progress +\f1\b0 \ + This type gives more information about the percentage progress of verify/repair operation.\ +\ +Example:\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 \ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>fsck_progress</string>\ + <key>parameter</key>\ + <array>\ + <integer>70</integer>\ + </array>\ + </dict>\ +</plist>\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 \ul MESSAGE TYPES OF PRE-DEFINED (LEVEL 0) MESSAGES +\f1\b0 \ulnone \ +VERIFY +\f0\b \ul \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f1\b0 \cf0 \ulnone "Checking volume.",\ + "Rechecking volume.",\ +\ +REPAIR\ +\'ca "Repairing volume.",\ +\ +SUCCESS\ +\'ca \'ca "The volume appears to be OK.",\ +\'ca \'ca "The volume was repaired successfully.", \ +\ +FAIL\ +\'ca \'ca "The volume could not be verified completely.",\ + "The volume could not be verified completely and can not be repaired.",\ + "The volume was found corrupt and can not be repaired."\ + "The volume was found corrupt and needs to be repaired."\ + "The volume could not be repaired.", \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf0 +\f1\b0 "The volume cannot be repaired when it is in use."\ + "The volume cannot be verified when it is in use."\ +\ +DAMAGEINFO\cf3 \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 "%s may be damaged." \ + "%s could not be repaired." \ + "Look for missing items in %s directory.", \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f0\b \cf3 +\f1\b0 \cf0 "Look for links to corrupt files in %s directory." +\f0\b \cf2 \ +\ +\ +\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\fs32 \cf0 \ul SUMMARY +\fs24 \cf2 \ulnone \ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural + +\f1\b0 \cf0 TBD\ +\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b\fs32 \cf0 \ul HANDLING UNKNOWN KEY-VALUES +\fs24 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 \ulnone If the client receives an unknown key, it should ignore it in its current implementations but enhance its XML parser to recognize that key.\cf3 \cf0 \ +\ +If the client receives an unknown message type and the message has "fsck_msg_string", it should display the "fsck_msg_string" (with parameter substitution, if any) in the verbosity level specified with the message.\ +\ +If the clients receives unknown key which is child of "parameters", the client consider it as "string". \ +\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b\fs32 \cf0 \ul HANDLING UNSUPPORTED FSCK +\fs24 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 \ulnone Some fsck might choose not send messages to client in the new XML format described in this document. This can be because of any reason like phasing out an implementation, development of new fsck, etc. An implementation of fsck under such conditions must send messages to the client as UTF-8 strings which client must display in its UI. The client must handle such unsupported fscks gracefully.\ +\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b\fs32 \cf0 \ul FUTURE EXTENSIBILITY +\fs24 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 \ulnone If a client requires additional data from fsck in future which is not accommodated by the existing keys and/or message types, fsck and client can establish a known key and/or message type. The client will have to change to recognize the new key and/or message type. Only relevant fsck implementations will have to change to generate the new key and/or message type.\ +\ +Since fsck is the producer of the XML data, it may add new keys and/or message types. It is the responsibility of fsck to update the common header file, update this design document and inform clients about the changes.\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b\fs32 \cf0 \ul OPEN ISSUES +\f1\b0\fs24 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 \ulnone 1. The strings displayed in verbosity level 1 are more information strings about verify/repair operation. These strings are very file-system specific and can include lot of debugging information. We suggest that these strings should not be localized. This will also enable fsck to add strings independent of localization deadlines.\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b\fs32 \cf0 \ul SAMPLE OUTPUT +\f1\b0\fs24 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 \ulnone The output of fsck is a series of plists. A sample output is shown below:\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\cf0 \ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>0</integer>\ + <key>fsck_msg_number</key> <integer>101</integer>\ + <key>fsck_msg_string</key><string>Checking volume.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>INFORMATION</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>100</integer>\ + <key>fsck_msg_string</key><string>Using %1$@ (version %2$@) for checking volume %3$@ of type %4$@.</string>\ + <key>parameters</key>\ + <array>\ + <dict> <key>file</key> <string>fsck_hfs</string></dict>\ + <string>diskdev_cmds-334.3</string>\ + <dict> <key>volumename</key> <string>MyVol</string> </dict>\ + <dict> <key>fstype</key> <string>Journaled HFS+</string> </dict>\ + </array>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string> \ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>102</integer>\ + <key>fsck_msg_string</key><string>Checking Extents Overflow file.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>103</integer>\ + <key>fsck_msg_string</key><string>Checking Catalog file.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>ERROR</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>500</integer>\ + <key>fsck_msg_string</key><string>Incorrect block count for %1$@</string>\ + <key>parameters</key>\ + <array>\ + <dict> <key>file</key> <string>file1</string></dict>\ + </array>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>INFORMATION</string> \ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>559</integer>\ + <key>fsck_msg_string</key><string>(It should be %1$@ instead of %2$@)</string>\ + <key>parameters</key>\ + <array>\ + <integer>1266</integer>\ + <integer>1272</integer>\ + </array>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>ERROR</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>511</integer>\ + <key>fsck_msg_string</key><string>Overlapped extent allocation (file %1$@ %2$@)</string>\ + <key>parameters</key>\ + <array>\ + <integer>1204</integer>\ + <dict> <key>path</key> <string>/Users/foo/myfile</string> </dict>\ + </array>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>DAMAGEINFO</string>\ + <key>fsck_verbosity</key> <integer>0</integer>\ + <key>fsck_msg_number</key> <integer>657</integer>\ + <key>fsck_msg_string</key><string>File %1$@ may be damaged.</string>\ + <key>parameters</key>\ + <array>\ + <dict> <key>path</key> <string>/Users/foo/myfile</string> </dict>\ + </array>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>104</integer>\ + <key>fsck_msg_string</key><string>Checking Catalog hierarchy.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>106</integer>\ + <key>fsck_msg_string</key><string>Checking volume bitmap.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>ERROR</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>556</integer>\ + <key>fsck_msg_string</key><string>Volume Bit Map needs minor repair.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>108</integer>\ + <key>fsck_msg_string</key><string>Checking volume information.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>REPAIR</string>\ + <key>fsck_verbosity</key> <integer>0</integer>\ + <key>fsck_msg_number</key> <integer>113</integer>\ + <key>fsck_msg_string</key><string>Repairing volume.</string> \ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>DAMAGEINFO</string> \ + <key>fsck_verbosity</key> <integer>0</integer>\ + <key>fsck_msg_number</key> <integer>657</integer>\ + <key>fsck_msg_string</key><string> "Look for links to corrupt files in %1$@ directory."</string>\ + <key>parameters</key>\ + <array>\ + <dict> <key>path</key> <string>/DamagedFiles</string> </dict> \ + </array>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>118</integer>\ + <key>fsck_msg_string</key><string>Rechecking volume.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>102</integer>\ + <key>fsck_msg_string</key><string>Checking Extents Overflow file.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>103</integer>\ + <key>fsck_msg_string</key><string>Checking Catalog file.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>104</integer>\ + <key>fsck_msg_string</key><string>Checking Catalog hierarchy.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>106</integer>\ + <key>fsck_msg_string</key><string>Checking volume bitmap.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>VERIFY</string>\ + <key>fsck_verbosity</key> <integer>1</integer>\ + <key>fsck_msg_number</key> <integer>108</integer>\ + <key>fsck_msg_string</key><string>Checking volume information.</string>\ + </dict>\ +</plist>\ +<plist version=\\"1.0\\">\ + <dict>\ + <key>fsck_type</key> <string>SUCCESS</string>\ + <key>fsck_verbosity</key> <integer>0</integer>\ + <key>fsck_msg_number</key> <integer>116</integer>\ + <key>fsck_msg_string</key><string>Volume was repaired successfully.</string>\ + </dict>\ +</plist>\ +\ +} \ No newline at end of file diff --git a/fsck_hfs/fsck_debug.c b/fsck_hfs/fsck_debug.c new file mode 100644 index 0000000..79e2726 --- /dev/null +++ b/fsck_hfs/fsck_debug.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include <stdio.h> +#include <stdarg.h> +#include "fsck_debug.h" +#include "fsck_hfs.h" + +/* Current debug level of fsck_hfs for printing messages via DPRINTF */ +unsigned long cur_debug_level; + +/* Function: DPRINTF + * + * Description: Debug function similar to printf except the first parameter + * which indicates the type of message to be printed by DPRINTF. Based on + * current debug level and the type of message, the function decides + * whether to print the message or not. + * + * Each unique message type has a bit assigned to it. The message type + * passed to DPRINTF can be one or combination (OR-ed value) of pre-defined + * debug message types. Only the messages whose type have one or more similar + * bits set in comparison with current global debug level are printed. + * + * For example, if cur_debug_level = 0x11 (d_info|d_xattr) + * ---------------------------------------- + * message type - printed/not printed + * ---------------------------------------- + * d_info - printed + * d_error|d_xattr - printed + * d_error - not printed + * d_overlap - not printed + * + * Input: + * message_type - type of message, to determine when to print the message + * variable arguments - similar to printfs + * + * Output: + * Nothing + */ +void DPRINTF (unsigned long type, char *fmt, ...) +{ + if (cur_debug_level & type) { + va_list ap; + + plog ("\t"); + va_start(ap, fmt); + vplog(fmt, ap); + va_end(ap); + } +} + +void HexDump(const void *p_arg, unsigned length, int showOffsets) +{ + const u_int8_t *p = p_arg; + unsigned i; + char ascii[17]; + u_int8_t byte; + + ascii[16] = '\0'; + + for (i=0; i<length; ++i) + { + if (showOffsets && (i & 0xF) == 0) + plog("%08X: ", i); + + byte = p[i]; + plog("%02X ", byte); + if (byte < 32 || byte > 126) + ascii[i & 0xF] = '.'; + else + ascii[i & 0xF] = byte; + + if ((i & 0xF) == 0xF) + { + plog(" %s\n", ascii); + } + } + + if (i & 0xF) + { + unsigned j; + for (j = i & 0xF; j < 16; ++j) + plog(" "); + ascii[i & 0xF] = 0; + plog(" %s\n", ascii); + } +} diff --git a/fsck_hfs/fsck_debug.h b/fsck_hfs/fsck_debug.h new file mode 100644 index 0000000..fbfd03f --- /dev/null +++ b/fsck_hfs/fsck_debug.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2005-2006, 2008, 2010-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __FSCK_DEBUG__ +#define __FSCK_DEBUG__ + +#include <sys/types.h> + +enum debug_message_type { + /* Type of information */ + d_info = 0x0001, /* Normal information messages during execution */ + d_error = 0x0002, /* Error messages */ + + /* Category of verify/repair operation */ + d_xattr = 0x0010, /* Extended attributes related messages */ + d_overlap = 0x0020, /* Overlap extents related messages */ + d_trim = 0x0040, /* TRIM (discard/unmap) related messages */ + + d_dump_record = 0x0400, /* Dump corrupt keys and records */ + d_dump_node = 0x0800, /* In hfs_swap_BTNode or BTCheck, dump out damaged nodes */ + d_check_slink = 0x1000, /* Read the contents of a symlink and check length */ +}; + +/* Current debug level of fsck_hfs for printing messages via DPRINTF */ +extern unsigned long cur_debug_level; + +/* Function: DPRINTF + * + * Description: Debug function similar to printf except the first parameter + * which indicates the type of message to be printed by DPRINTF. Based on + * current debug level and the type of message, the function decides + * whether to print the message or not. + * + * Each unique message type has a bit assigned to it. The message type + * passed to DPRINTF can be one or combination (OR-ed value) of pre-defined + * debug message types. Only the messages whose type have one or more similar + * bits set in comparison with current global debug level are printed. + * + * For example, if cur_debug_level = 0x11 (d_info|d_xattr) + * ---------------------------------------- + * message type - printed/not printed + * ---------------------------------------- + * d_info - printed + * d_error|d_xattr - printed + * d_error - not printed + * d_overlap - not printed + * + * Input: + * message_type - type of message, to determine when to print the message + * variable arguments - similar to printfs + * + * Output: + * Nothing + */ +extern void DPRINTF (unsigned long message_type, char *format, ...); + +void HexDump(const void *p_arg, unsigned length, int showOffsets); + +#endif /* __FSCK_DEBUG__ */ diff --git a/fsck_hfs/fsck_hfs.8 b/fsck_hfs/fsck_hfs.8 new file mode 100644 index 0000000..159a532 --- /dev/null +++ b/fsck_hfs/fsck_hfs.8 @@ -0,0 +1,250 @@ +.\" Copyright (c) 2002,2008 Apple Inc. All rights reserved. +.\" +.\" The contents of this file constitute Original Code as defined in and +.\" are subject to the Apple Public Source License Version 1.1 (the +.\" "License"). You may not use this file except in compliance with the +.\" License. Please obtain a copy of the License at +.\" http://www.apple.com/publicsource and read it before using this file. +.\" +.\" This Original Code and all software distributed under the License are +.\" distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER +.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +.\" FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the +.\" License for the specific language governing rights and limitations +.\" under the License. +.\" +.\" @(#)fsck_hfs.8 +.Dd August 5, 2008 +.Dt FSCK_HFS 8 +.Os "Mac OS X" +.Sh NAME +.Nm fsck_hfs +.Nd HFS file system consistency check +.Sh SYNOPSIS +.Nm fsck_hfs +.Fl q +.Op Fl df +.Ar special ... +.Nm fsck_hfs +.Fl p +.Op Fl df +.Ar special ... +.Nm fsck_hfs +.Op Fl n | y | r +.Op Fl dfgxlES +.Op Fl D Ar flags +.Op Fl b Ar size +.Op Fl B Ar path +.Op Fl m Ar mode +.Op Fl c Ar size +.Op Fl R Ar flags +.Ar special ... +.Sh DESCRIPTION +.Pp +The +.Nm +utility verifies and repairs standard HFS and HFS+ file systems. +.Pp +The first form of +.Nm +quickly checks the specified file systems to determine whether +they were cleanly unmounted. +.Pp +The second form of +.Nm +preens the specified file systems. +It is normally started by +.Xr fsck 8 +run from +.Pa /etc/rc.boot +during automatic reboot, when a HFS file system is detected. +When preening file systems, +.Nm +will fix common inconsistencies for file systems that were not +unmounted cleanly. +If more serious problems are found, +.Nm +does not try to fix them, indicates that it was not +successful, and exits. +.Pp +The third form of +.Nm +checks the specified file systems and tries to repair all +detected inconsistencies. +.Pp +If no options are specified +.Nm +will always check and attempt to fix the specified file systems. +.Pp +The options are as follows: +.Bl -hang -offset indent +.It Fl c Ar size +Specify the +.Ar size +of the cache used by +.Nm +internally. Bigger +.Ar size +can result in better performance but can result in deadlock when +used with +.Fl l +option. Size can be specified as a decimal, octal, or +hexadecimal number. If the number ends with a ``k'', ``m'', +or ``g'', the number is multiplied by 1024 (1K), 1048576 (1M), +or 1073741824 (1G), respectively. +.It Fl d +Display debugging information. +This option may provide useful information when +.Nm +cannot repair a damaged file system. +.It Fl D Ar flags +Print extra debugging information. The +.Ar flags +are a bitmap that control which kind of debug information is printed. +The following values are currently implemented: +.Bl -hang -offset indent -compact +.It 0x0001 +Informational messages +.It 0x0002 +Error messages +.It 0x0010 +Extended attributes related messages +.It 0x0020 +Overlapped extents related messages +.El +.It Fl b Ar size +Specify the size, in bytes, of the physical blocks used by the +.Fl B +option. +.It Fl B Ar path +Print the files containing the physical blocks listed in the file +.Ar path . +The file should contain one or more decimal, octal (with leading 0) or +hexadecimal (with leading 0x) numbers separated by white space. The physical +block numbers are relative to the start of the partition, so if you +have block numbers relative to the start of the device, you will have to +subtract the block number of the start of the partition. The size of a +physical block is given with the +.Fl b +option; the default is 512 bytes per block. +.It Fl f +When used with the +.Fl p +option, force +.Nm +to check `clean' file systems, otherwise it means force +.Nm +to check and repair journaled HFS+ file systems. +.It Fl g +Causes +.Nm +to generate its output strings in GUI format. +This option is used when another application with a graphical user interface +(like Mac OS X Disk Utility) is invoking the +.Nm +tool. +.It Fl x +Causes +.Nm +to generate its output strings in XML (plist) format. This option +implies the +.Fl g +option. +.It Fl l +Lock down the file system and perform a test-only check. +This makes it possible to check a file system that is currently mounted, +although no repairs can be made. +.It Fl m Ar mode +Mode is an octal number that will be used to set the permissions for the +lost+found directory when it is created. +The lost+found directory is only created when a volume is repaired and orphaned +files or directories are detected. +.Nm +places orphaned files and directories into the lost+found directory (located +at the root of the volume). +The default mode is 01777. +.It Fl p +Preen the specified file systems. +.It Fl q +Causes +.Nm +to quickly check whether the volume was unmounted cleanly. +If the volume was unmounted cleanly, then the exit status is 0. +If the volume was not unmounted cleanly, then the exit status will be non-zero. +In either case, a message is printed to standard output describing whether the +volume was clean or dirty. +.It Fl y +Always attempt to repair any damage that is found. +.It Fl n +Never attempt to repair any damage that is found. +.It Fl E +Cause +.Nm +to exit (with a value of 47) if it encounters any +major errors. A ``major error'' is considered one which +would impact using the volume in normal usage; an inconsistency +which would not impact such use is considered ``minor'' for this +option. Only valid with the +.Fl n +option. +.It Fl S +Cause +.Nm +to scan the entire device looking for I/O errors. It will +attempt to map the blocks with errors to names, similar to +the +.Fl B +option. +.It Fl R Ar flags +Rebuilds the requested btree. The following flags are supported: +.Bl -hang -offset indent -compact +.It a +Attribute btree +.It c +Catalog btree +.It e +Extents overflow btree +.El +Rebuilding a btree will only +work if there is enough free space on the file system for the new btree +file, and if +.Nm +is able to traverse each of the nodes in the requested btree successfully. +Rebuilding btrees is not supported on HFS Standard volumes. +.It Fl r +Rebuild the catalog btree. This is synonymous with +.Fl Rc . +.El +.Pp +Because of inconsistencies between the block device and the buffer cache, +the raw device should always be used. +.Sh EXIT VALUES +.Nm +indicates some status by exit value. The current list of exit status results +is: +.Bl -hang -offset indent -compact +.It 0 +No errors found, or successfully repaired. +.It 3 +A quick-check (the +.Fl n +option) found a dirty filesystem; no repairs were made. +.It 4 +During boot, the root filesystem was found to be dirty; repairs were +made, and the filesystem was remounted. The system should be rebooted. +.It 8 +A corrupt filesystem was found during a check, or repairs did not succeed. +.It 47 +A major error was found with +.Fl E . +.El +.Sh SEE ALSO +.Xr fsck 8 +.Sh BUGS +.Nm +is not able to fix some inconsistencies that it detects. +.Sh HISTORY +The +.Nm +command appeared in Mac OS X Server 1.0 . diff --git a/fsck_hfs/fsck_hfs.c b/fsck_hfs/fsck_hfs.c new file mode 100644 index 0000000..35ccbc9 --- /dev/null +++ b/fsck_hfs/fsck_hfs.c @@ -0,0 +1,1041 @@ +/* + * Copyright (c) 1999-2000, 2002-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/param.h> +#include <sys/ucred.h> +#include <sys/mount.h> +#include <sys/ioctl.h> +#include <sys/disk.h> +#include <sys/sysctl.h> +#include <setjmp.h> + +#include <hfs/hfs_mount.h> + +#include <errno.h> +#include <fcntl.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> +#include <stdlib.h> +#include <ctype.h> +#include <signal.h> + +#include <TargetConditionals.h> + +#include "fsck_hfs.h" +#include "fsck_msgnums.h" +#include "fsck_hfs_msgnums.h" + +#include "fsck_debug.h" +#include "dfalib/CheckHFS.h" + +/* + * These definitions are duplicated from xnu's hfs_readwrite.c, and could live + * in a shared header file if desired. On the other hand, the freeze and thaw + * commands are not really supposed to be public. + */ +#ifndef F_FREEZE_FS +#define F_FREEZE_FS 53 /* "freeze" all fs operations */ +#define F_THAW_FS 54 /* "thaw" all fs operations */ +#endif // F_FREEZE_FS + +/* Global Variables for front end */ +const char *cdevname; /* name of device being checked */ +char *progname; +char lflag; /* live fsck */ +char nflag; /* assume a no response */ +char yflag; /* assume a yes response */ +char preen; /* just fix normal inconsistencies */ +char force; /* force fsck even if clean (preen only) */ +char quick; /* quick check returns clean, dirty, or failure */ +char debug; /* output debugging info */ +char disable_journal; /* If debug, and set, do not simulate journal replay */ +char scanflag; /* Scan entire disk for bad blocks */ +#if !TARGET_OS_EMBEDDED +char embedded = 0; +#else +char embedded = 1; +#endif + +char hotroot; /* checking root device */ +char hotmount; /* checking read-only mounted device */ +char guiControl; /* this app should output info for gui control */ +char xmlControl; /* Output XML (plist) messages -- implies guiControl as well */ +char rebuildBTree; /* Rebuild requested btree files */ +int rebuildOptions; /* Options to indicate which btree should be rebuilt */ +char modeSetting; /* set the mode when creating "lost+found" directory */ +char errorOnExit = 0; /* Exit on first error */ +int upgrading; /* upgrading format */ +int lostAndFoundMode = 0; /* octal mode used when creating "lost+found" directory */ +uint64_t reqCacheSize; /* Cache size requested by the caller (may be specified by the user via -c) */ + +int fsmodified; /* 1 => write done to file system */ +int fsreadfd; /* file descriptor for reading file system */ +int fswritefd; /* file descriptor for writing file system */ +Cache_t fscache; + +/* + * Variables used to map physical block numbers to file paths + */ +enum { BLOCK_LIST_INCREMENT = 512 }; +int gBlkListEntries = 0; +u_int64_t *gBlockList = NULL; +int gFoundBlockEntries = 0; +struct found_blocks *gFoundBlocksList = NULL; +long gBlockSize = 512; +static void ScanDisk(int); +static int getblocklist(const char *filepath); + + +static int checkfilesys __P((char * filesys)); +static int setup __P(( char *dev, int *canWritePtr )); +static void usage __P((void)); +static void getWriteAccess __P(( char *dev, int *canWritePtr )); +extern char *unrawname __P((char *name)); + +int +main(argc, argv) + int argc; + char *argv[]; +{ + int ch; + int ret; + extern int optind; + extern char *optarg; + char * lastChar; + + if ((progname = strrchr(*argv, '/'))) + ++progname; + else + progname = *argv; + + while ((ch = getopt(argc, argv, "b:B:c:D:e:Edfglm:npqrR:SuyxJ")) != EOF) { + switch (ch) { + case 'b': + gBlockSize = atoi(optarg); + if ((gBlockSize < 512) || (gBlockSize & (gBlockSize-1))) { + (void) fprintf(stderr, "%s invalid block size %d\n", + progname, gBlockSize); + exit(2); + } + break; + case 'S': + scanflag = 1; + break; + case 'B': + getblocklist(optarg); + break; + case 'c': + /* Cache size to use in fsck_hfs */ + reqCacheSize = strtoull(optarg, &lastChar, 0); + if (*lastChar) { + switch (tolower(*lastChar)) { + case 'g': + reqCacheSize *= 1024ULL; + /* fall through */ + case 'm': + reqCacheSize *= 1024ULL; + /* fall through */ + case 'k': + reqCacheSize *= 1024ULL; + break; + default: + reqCacheSize = 0; + break; + }; + } + break; + + case 'd': + debug++; + break; + + case 'J': + disable_journal++; + break; + case 'D': + /* Input value should be in hex example: -D 0x5 */ + cur_debug_level = strtoul(optarg, NULL, 0); + if (cur_debug_level == 0) { + (void) fplog (stderr, "%s: invalid debug development argument. Assuming zero\n", progname); + } + break; + + case 'e': + if (optarg) { + if (strcasecmp(optarg, "embedded") == 0) + embedded = 1; + else if (strcasecmp(optarg, "desktop") == 0) + embedded = 0; + } + break; + + case 'E': + /* Exit on first error, after logging it */ + errorOnExit = 1; + break; + case 'f': + force++; + break; + + case 'g': + guiControl++; + break; + + case 'x': + guiControl = 1; + xmlControl++; + break; + + case 'l': + lflag++; + nflag++; + yflag = 0; + force++; + break; + + case 'm': + modeSetting++; + lostAndFoundMode = strtol( optarg, NULL, 8 ); + if ( lostAndFoundMode == 0 ) + { + (void) fplog(stderr, "%s: invalid mode argument \n", progname); + usage(); + } + break; + + case 'n': + nflag++; + yflag = 0; + break; + + case 'p': + preen++; + break; + + case 'q': + quick++; + break; + + case 'r': + // rebuild catalog btree + rebuildBTree++; + rebuildOptions |= REBUILD_CATALOG; + break; + + case 'R': + if (optarg) { + char *cp = optarg; + while (*cp) { + switch (*cp) { + case 'a': + // rebuild attribute btree + rebuildBTree++; + rebuildOptions |= REBUILD_ATTRIBUTE; + break; + + case 'c': + // rebuild catalog btree + rebuildBTree++; + rebuildOptions |= REBUILD_CATALOG; + break; + + case 'e': + // rebuild extents overflow btree + rebuildBTree++; + rebuildOptions |= REBUILD_EXTENTS; + break; + + default: + fprintf(stderr, "%s: unknown btree rebuild code `%c' (%#x)\n", progname, *cp, *cp); + exit(2); + } + cp++; + } + break; + } + + case 'y': + yflag++; + nflag = 0; + break; + + case 'u': + case '?': + default: + usage(); + } + } + + argc -= optind; + argv += optind; + + if (debug == 0 && disable_journal != 0) + disable_journal = 0; + + if (gBlkListEntries != 0 && gBlockSize == 0) + gBlockSize = 512; + + if (guiControl) + debug = 0; /* debugging is for command line only */ + + if (signal(SIGINT, SIG_IGN) != SIG_IGN) + (void)signal(SIGINT, catch); + + if (argc < 1) { + (void) fplog(stderr, "%s: missing special-device\n", progname); + usage(); + } + + ret = 0; + while (argc-- > 0) + ret |= checkfilesys(blockcheck(*argv++)); + + exit(ret); +} + +int fs_fd=-1; // fd to the root-dir of the fs we're checking (only w/lfag == 1) + +void +cleanup_fs_fd(void) +{ + if (fs_fd >= 0) { + fcntl(fs_fd, F_THAW_FS, NULL); + close(fs_fd); + fs_fd = -1; + } +} + +static char * +mountpoint(const char *cdev) +{ + char *retval = NULL; + struct statfs *fsinfo; + char *unraw = NULL; + int result; + int i; + + unraw = strdup(cdev); + unrawname(unraw); + + if (unraw == NULL) + goto done; + + result = getmntinfo(&fsinfo, MNT_NOWAIT); + + for (i = 0; i < result; i++) { + if (strcmp(unraw, fsinfo[i].f_mntfromname) == 0) { + retval = strdup(fsinfo[i].f_mntonname); + break; + } + } + +done: + if (unraw) + free(unraw); + + return retval; +} + +static int +checkfilesys(char * filesys) +{ + int flags; + int result = 0; + int chkLev, repLev, logLev; + int canWrite; + char *mntonname = NULL; + fsck_ctx_t context = NULL; + flags = 0; + cdevname = filesys; + canWrite = 0; + hotmount = hotroot; // hotroot will be 1 or 0 by this time + + // + // initialize the printing/logging without actually printing anything + // DO NOT DELETE THIS or else you can deadlock during a live fsck + // when something is printed and we try to create the log file. + // + plog(""); + + context = fsckCreate(); + + mntonname = mountpoint(cdevname); + if (hotroot) { + if (mntonname) + free(mntonname); + mntonname = strdup("/"); + } + + if (lflag) { + struct stat fs_stat; + + /* + * Ensure that, if we're doing a live verify, that we're not trying + * to do input or output to the same device. This would cause a deadlock. + */ + + if (stat(cdevname, &fs_stat) != -1 && + (((fs_stat.st_mode & S_IFMT) == S_IFCHR) || + ((fs_stat.st_mode & S_IFMT) == S_IFBLK))) { + struct stat io_stat; + + if (fstat(fileno(stdin), &io_stat) != -1 && + (fs_stat.st_rdev == io_stat.st_dev)) { + plog("ERROR: input redirected from target volume for live verify.\n"); + return EEXIT; + } + if (fstat(fileno(stdout), &io_stat) != -1 && + (fs_stat.st_rdev == io_stat.st_dev)) { + plog("ERROR: output redirected to target volume for live verify.\n"); + return EEXIT; + } + if (fstat(fileno(stderr), &io_stat) != -1 && + (fs_stat.st_rdev == io_stat.st_dev)) { + plog("ERROR: error output redirected to target volume for live verify.\n"); + return EEXIT; + } + + } + } + + /* + * If the device is mounted somewhere, then we need to make sure that it's + * a read-only device, or that a live-verify has been requested. + */ + if (mntonname != NULL) { + struct statfs stfs_buf; + + if (statfs(mntonname, &stfs_buf) == 0) { + if (lflag) { + // Need to try to freeze it + fs_fd = open(mntonname, O_RDONLY); + if (fs_fd < 0) { + plog("ERROR: could not open %s to freeze the volume.\n", mntonname); + free(mntonname); + return 0; + } + + if (fcntl(fs_fd, F_FREEZE_FS, NULL) != 0) { + free(mntonname); + plog("ERROR: could not freeze volume (%s)\n", strerror(errno)); + return 0; + } + } else if (stfs_buf.f_flags & MNT_RDONLY) { + hotmount = 1; + } + } + } + + if (debug && preen) + pwarn("starting\n"); + + if (setup( filesys, &canWrite ) == 0) { + if (preen) + pfatal("CAN'T CHECK FILE SYSTEM."); + result = EEXIT; + goto ExitThisRoutine; + } + + if (preen == 0) { + if (hotroot && !guiControl) + plog("** Root file system\n"); + } + + /* start with defaults for dfa back-end */ + chkLev = kAlwaysCheck; + repLev = kMajorRepairs; + logLev = kVerboseLog; + + if (yflag) + repLev = kMajorRepairs; + + if (quick) { + chkLev = kNeverCheck; + repLev = kNeverRepair; + logLev = kFatalLog; + } else if (force) { + chkLev = kForceCheck; + } + if (preen) { + repLev = kMinorRepairs; + chkLev = force ? kAlwaysCheck : kDirtyCheck; + logLev = kFatalLog; + } + if (debug) + logLev = kDebugLog; + + if (nflag) + repLev = kNeverRepair; + + if ( rebuildBTree ) { + chkLev = kPartialCheck; + repLev = kForceRepairs; // this will force rebuild of B-Tree file + } + + fsckSetVerbosity(context, logLev); + /* All of fsck_hfs' output should go thorugh logstring */ + fsckSetOutput(context, NULL); + /* Setup writer that will output to standard out */ + fsckSetWriter(context, &outstring); + /* Setup logger that will write to log file */ + fsckSetLogger(context, &logstring); + if (guiControl) { + if (xmlControl) + fsckSetOutputStyle(context, fsckOutputXML); + else + fsckSetOutputStyle(context, fsckOutputGUI); + } else { + fsckSetOutputStyle(context, fsckOutputTraditional); + } + + if (errorOnExit && nflag) { + chkLev = kMajorCheck; + } + + /* + * go check HFS volume... + */ + + if (rebuildOptions && canWrite == 0) { + plog("BTree rebuild requested but writing disabled\n"); + result = EEXIT; + goto ExitThisRoutine; + } + + if (gBlockList != NULL && scanflag != 0) { + plog("Cannot scan for bad blocks and ask for listed blocks to file mapping\n"); + result = EEXIT; + goto ExitThisRoutine; + } + if (scanflag != 0) { + plog("Scanning entire disk for bad blocks\n"); + ScanDisk(fsreadfd); + } + + result = CheckHFS( filesys, fsreadfd, fswritefd, chkLev, repLev, context, + lostAndFoundMode, canWrite, &fsmodified, + lflag, rebuildOptions ); + if (debug) + plog("\tCheckHFS returned %d, fsmodified = %d\n", result, fsmodified); + + if (!hotmount) { + ckfini(1); + if (quick) { + if (result == 0) { + pwarn("QUICKCHECK ONLY; FILESYSTEM CLEAN\n"); + result = 0; + goto ExitThisRoutine; + } else if (result == R_Dirty) { + pwarn("QUICKCHECK ONLY; FILESYSTEM DIRTY\n"); + result = DIRTYEXIT; + goto ExitThisRoutine; + } else if (result == R_BadSig) { + pwarn("QUICKCHECK ONLY; NO HFS SIGNATURE FOUND\n"); + result = DIRTYEXIT; + goto ExitThisRoutine; + } else { + result = EEXIT; + goto ExitThisRoutine; + } + } + } else { + struct statfs stfs_buf; + + /* + * Check to see if root is mounted read-write. + */ + if (statfs(mntonname, &stfs_buf) == 0) + flags = stfs_buf.f_flags; + else + flags = 0; + ckfini(flags & MNT_RDONLY); + } + + /* XXX free any allocated memory here */ + + if (hotmount && fsmodified) { + struct hfs_mount_args args; + /* + * We modified the root. Do a mount update on + * it, unless it is read-write, so we can continue. + */ + if (!preen) + fsckPrint(context, fsckVolumeModified); + if (flags & MNT_RDONLY) { + bzero(&args, sizeof(args)); + flags |= MNT_UPDATE | MNT_RELOAD; + if (debug) + fprintf(stderr, "doing update / reload mount for %s now\n", mntonname); + if (mount("hfs", mntonname, flags, &args) == 0) { + if (result != 0) + result = EEXIT; + goto ExitThisRoutine; + } else { + //if (debug) + fprintf(stderr, "update/reload mount for %s failed: %s\n", mntonname, strerror(errno)); + } + } + if (!preen) + plog("\n***** REBOOT NOW *****\n"); + sync(); + result = FIXEDROOTEXIT; + goto ExitThisRoutine; + } + + if (result != 0 && result != MAJOREXIT) + result = EEXIT; + +ExitThisRoutine: + if (lflag) { + if (fs_fd >= 0) { + fcntl(fs_fd, F_THAW_FS, NULL); + close(fs_fd); + fs_fd = -1; + } + } + if (mntonname) + free(mntonname); + + if (context) + fsckDestroy(context); + + return (result); +} + + +/* + * Setup for I/O to device + * Return 1 if successful, 0 if unsuccessful. + * canWrite - 1 if we can safely write to the raw device or 0 if not. + */ +static int +setup( char *dev, int *canWritePtr ) +{ + struct stat statb; + int devBlockSize; + uint32_t cacheBlockSize; + uint32_t cacheTotalBlocks; + int preTouchMem = 0; + + fswritefd = -1; + *canWritePtr = 0; + + if (stat(dev, &statb) < 0) { + plog("Can't stat %s: %s\n", dev, strerror(errno)); + return (0); + } + if ((statb.st_mode & S_IFMT) != S_IFCHR) { + pfatal("%s is not a character device", dev); + if (reply("CONTINUE") == 0) + return (0); + } + /* Always attempt to replay the journal */ + if (!nflag && !quick) { + // We know we have a character device by now. + if (strncmp(dev, "/dev/rdisk", 10) == 0) { + char block_device[MAXPATHLEN+1]; + int rv; + snprintf(block_device, sizeof(block_device), "/dev/%s", dev + 6); + rv = journal_replay(block_device); + if (debug) + plog("journal_replay(%s) returned %d\n", block_device, rv); + } + } + /* attempt to get write access to the block device and if not check if volume is */ + /* mounted read-only. */ + if (nflag == 0 && quick == 0) { + getWriteAccess( dev, canWritePtr ); + } + + if (nflag || quick || (fswritefd = open(dev, O_RDWR | (hotmount ? 0 : O_EXLOCK))) < 0) { + fswritefd = -1; + if (preen) { + pfatal("** %s (NO WRITE ACCESS)\n", dev); + } + } + + if (preen == 0 && !guiControl) { + if (nflag || quick || fswritefd == -1) { + plog("** %s (NO WRITE)\n", dev); + } else { + plog("** %s\n", dev); + } + } + + if (fswritefd == -1) { + if ((fsreadfd = open(dev, O_RDONLY)) < 0) { + plog("Can't open %s: %s\n", dev, strerror(errno)); + return (0); + } + } else { + fsreadfd = dup(fswritefd); + if (fsreadfd < 0) { + plog("Can't dup fd for reading on %s: %s\n", dev, strerror(errno)); + close(fswritefd); + return(0); + } + } + + + /* Get device block size to initialize cache */ + if (ioctl(fsreadfd, DKIOCGETBLOCKSIZE, &devBlockSize) < 0) { + pfatal ("Can't get device block size\n"); + return (0); + } + + /* + * Calculate the cache block size and total blocks. + * + * If a quick check was requested, we'll only be checking to see if + * the volume was cleanly unmounted or journalled, so we won't need + * a lot of cache. Since lots of quick checks can be run in parallel + * when a new disk with several partitions comes on line, let's avoid + * the memory usage when we don't need it. + */ + if (reqCacheSize == 0 && quick == 0) { + /* + * Auto-pick the cache size. The cache code will deal with minimum + * maximum values, so we just need to find out the size of memory, and + * how much of it we'll use. + * + * If we're looking at the root device, and it's not a live verify (lflag), + * then we will use half of physical memory; otherwise, we'll use an eigth. + * + */ + uint64_t memSize; + size_t dsize = sizeof(memSize); + int rv; + + rv = sysctlbyname("hw.memsize", &memSize, &dsize, NULL, 0); + if (rv == -1) { + (void)fplog(stderr, "sysctlbyname failed, not auto-setting cache size\n"); + } else { + int d = (hotroot && !lflag) ? 2 : 8; + int safeMode = 0; + dsize = sizeof(safeMode); + rv = sysctlbyname("kern.safeboot", &safeMode, &dsize, NULL, 0); + if (rv != -1 && safeMode != 0 && hotroot && !lflag) { +#define kMaxSafeModeMem ((size_t)2 * 1024 * 1024 * 1024) /* 2Gbytes, means cache will max out at 1gbyte */ + if (debug) { + (void)fplog(stderr, "Safe mode and single-user, setting memsize to a maximum of 2gbytes\n"); + } + memSize = (memSize < kMaxSafeModeMem) ? memSize : kMaxSafeModeMem; + } + reqCacheSize = memSize / d; + } + } + + CalculateCacheSizes(reqCacheSize, &cacheBlockSize, &cacheTotalBlocks, debug); + + preTouchMem = (hotroot != 0) && (lflag != 0); + /* Initialize the cache */ + if (CacheInit (&fscache, fsreadfd, fswritefd, devBlockSize, + cacheBlockSize, cacheTotalBlocks, CacheHashSize, preTouchMem) != EOK) { + pfatal("Can't initialize disk cache\n"); + return (0); + } + + return (1); +} + + +// This routine will attempt to open the block device with write access for the target +// volume in order to block others from mounting the volume with write access while we +// check / repair it. If we cannot get write access then we check to see if the volume +// has been mounted read-only. If it is read-only then we should be OK to write to +// the raw device. Note that this does not protect use from someone upgrading the mount +// from read-only to read-write. + +static void getWriteAccess( char *dev, int *canWritePtr ) +{ + int i; + int myMountsCount; + void * myPtr; + char * myCharPtr; + struct statfs * myBufPtr; + void * myNamePtr; + int blockDevice_fd = -1; + + myPtr = NULL; + myNamePtr = malloc( strlen(dev) + 2 ); + if ( myNamePtr == NULL ) + return; + + strcpy( (char *)myNamePtr, dev ); + if ( (myCharPtr = strrchr( (char *)myNamePtr, '/' )) != 0 ) { + if ( myCharPtr[1] == 'r' ) { + memmove(&myCharPtr[1], &myCharPtr[2], strlen(&myCharPtr[2]) + 1); + blockDevice_fd = open( (char *)myNamePtr, O_WRONLY | (hotmount ? 0 : O_EXLOCK) ); + } + } + + if ( blockDevice_fd > 0 ) { + // we got write access to the block device so we can safely write to raw device + *canWritePtr = 1; + goto ExitThisRoutine; + } + + // get count of mounts then get the info for each + myMountsCount = getfsstat( NULL, 0, MNT_NOWAIT ); + if ( myMountsCount < 0 ) + goto ExitThisRoutine; + + myPtr = (void *) malloc( sizeof(struct statfs) * myMountsCount ); + if ( myPtr == NULL ) + goto ExitThisRoutine; + myMountsCount = getfsstat( myPtr, + (int)(sizeof(struct statfs) * myMountsCount), + MNT_NOWAIT ); + if ( myMountsCount < 0 ) + goto ExitThisRoutine; + + myBufPtr = (struct statfs *) myPtr; + for ( i = 0; i < myMountsCount; i++ ) + { + if ( strcmp( myBufPtr->f_mntfromname, myNamePtr ) == 0 ) { + if ( myBufPtr->f_flags & MNT_RDONLY ) + *canWritePtr = 1; + goto ExitThisRoutine; + } + myBufPtr++; + } + *canWritePtr = 1; // single user will get us here, f_mntfromname is not /dev/diskXXXX + +ExitThisRoutine: + if ( myPtr != NULL ) + free( myPtr ); + + if ( myNamePtr != NULL ) + free( myNamePtr ); + + if (blockDevice_fd != -1) { + close(blockDevice_fd); + } + + return; + +} /* getWriteAccess */ + + +static void +usage() +{ + (void) fplog(stderr, "usage: %s [-b [size] B [path] c [size] e [mode] ESdfglx m [mode] npqruy] special-device\n", progname); + (void) fplog(stderr, " b size = size of physical blocks (in bytes) for -B option\n"); + (void) fplog(stderr, " B path = file containing physical block numbers to map to paths\n"); + (void) fplog(stderr, " c size = cache size (ex. 512m, 1g)\n"); + (void) fplog(stderr, " e mode = emulate 'embedded' or 'desktop'\n"); + (void) fplog(stderr, " E = exit on first major error\n"); + (void) fplog(stderr, " d = output debugging info\n"); + (void) fplog(stderr, " f = force fsck even if clean (preen only) \n"); + (void) fplog(stderr, " g = GUI output mode\n"); + (void) fplog(stderr, " x = XML output mode\n"); + (void) fplog(stderr, " l = live fsck (lock down and test-only)\n"); + (void) fplog(stderr, " m arg = octal mode used when creating lost+found directory \n"); + (void) fplog(stderr, " n = assume a no response \n"); + (void) fplog(stderr, " p = just fix normal inconsistencies \n"); + (void) fplog(stderr, " q = quick check returns clean, dirty, or failure \n"); + (void) fplog(stderr, " r = rebuild catalog btree \n"); + (void) fplog(stderr, " S = Scan disk for bad blocks\n"); + (void) fplog(stderr, " u = usage \n"); + (void) fplog(stderr, " y = assume a yes response \n"); + + exit(1); +} + + +static void +AddBlockToList(long long block) +{ + + if ((gBlkListEntries % BLOCK_LIST_INCREMENT) == 0) { + void *tmp; + +// gBlkListEntries += BLOCK_LIST_INCREMENT; + tmp = realloc(gBlockList, (gBlkListEntries + BLOCK_LIST_INCREMENT) * sizeof(u_int64_t)); + if (tmp == NULL) { + pfatal("Can't allocate memory for block list (%llu entries).\n", gBlkListEntries); + } + gBlockList = (u_int64_t*)tmp; + } + gBlockList[gBlkListEntries++] = block; + return; +} + +static int printStatus; +static void +siginfo(int signo) +{ + printStatus = 1; +} + +static void +ScanDisk(int fd) +{ + uint32_t devBlockSize = 512; + uint64_t devBlockTotal; + off_t diskSize; + uint8_t *buffer = NULL; + size_t bufSize = 1024 * 1024; + ssize_t nread; + off_t curPos = 0; + void (*oldhandler)(int); + uint32_t numErrors = 0; + uint32_t maxErrors = 40; // Something more variable? + + oldhandler = signal(SIGINFO, &siginfo); + +#define PRSTAT \ + do { \ + if (diskSize) { \ + fprintf(stderr, "Scanning offset %lld of %lld (%d%%)\n", \ + curPos, diskSize, (int)((curPos * 100) / diskSize)); \ + } else { \ + fprintf(stderr, "Scanning offset %lld\n", curPos); \ + } \ + printStatus = 0; \ + } while (0) + + if (ioctl(fd, DKIOCGETBLOCKSIZE, &devBlockSize) == -1) { + devBlockSize = 512; + } + + if (ioctl(fd, DKIOCGETBLOCKCOUNT, &devBlockTotal) == -1) { + diskSize = 0; + } else + diskSize = devBlockTotal * devBlockSize; + + while (buffer == NULL && bufSize >= devBlockSize) { + buffer = malloc(bufSize); + if (buffer == NULL) { + bufSize /= 2; + } + } + if (buffer == NULL) { + pfatal("Cannot allocate buffer for disk scan.\n"); + } + +loop: + + if (printStatus) { + PRSTAT; + } + while ((nread = pread(fd, buffer, bufSize, curPos)) == bufSize) { + curPos += bufSize; + if (printStatus) { + PRSTAT; + } + } + + if (nread == 0) { + /* We're done with the disk */ + goto done; + } + if (nread == -1) { + if (errno == EIO) { + /* Try reading devBlockSize blocks */ + size_t total; + for (total = 0; total < bufSize; total += devBlockSize) { + nread = pread(fd, buffer, devBlockSize, curPos + total); + if (nread == -1) { + if (errno == EIO) { + if (debug) + fprintf(stderr, "Bad block at offset %lld\n", curPos + total); + AddBlockToList((curPos + total) / gBlockSize); + if (++numErrors > maxErrors) { + if (debug) + fprintf(stderr, "Got %u errors, maxing out so stopping scan\n", numErrors); + goto done; + } + continue; + } else { + pfatal("Got a non I/O error reading disk at offset %llu: %s\n", + curPos + total, strerror(errno)); + // Hey, pfatal wasn't fatal! + // But that seems to work out for us for some reason. + } + } + if (nread == 0) { + /* End of disk, somehow. */ + goto done; + } + if (nread != devBlockSize) { + pwarn("During disk scan, did not get block size (%zd) read, got %zd instead. Skipping rest of this block.\n", (size_t)devBlockSize, nread); + continue; + } + } + curPos += total; + goto loop; + } else if (errno == EINTR) { + goto loop; + } else { + pfatal("Got a non I/O error reading disk at offset %llu: %s\n", curPos, strerror(errno)); + exit(EEXIT); + } + } + if (nread < bufSize) { + if ((nread % devBlockSize) == 0) { + curPos += nread; + } else { + curPos = curPos + (((nread % devBlockSize) + 1) * devBlockSize); + } + goto loop; + } + goto loop; +done: + if (buffer) + free(buffer); + signal(SIGINFO, oldhandler); + return; + +} + +static int +getblocklist(const char *filepath) +{ + FILE * file; + long long block; + size_t blockListCount; /* Number of elements allocated to gBlockList array */ + + blockListCount = BLOCK_LIST_INCREMENT; + gBlockList = (u_int64_t *) malloc(blockListCount * sizeof(u_int64_t)); + if (gBlockList == NULL) + pfatal("Can't allocate memory for block list.\n"); + +// printf("getblocklist: processing blocklist %s...\n", filepath); + + if ((file = fopen(filepath, "r")) == NULL) + pfatal("Can't open %s\n", filepath); + + while (fscanf(file, "%lli", &block) > 0) { + AddBlockToList(block); + } + + (void) fclose(file); + + printf("%d blocks to match:\n", gBlkListEntries); + + return (0); +} diff --git a/fsck_hfs/fsck_hfs.h b/fsck_hfs/fsck_hfs.h new file mode 100644 index 0000000..cdac5db --- /dev/null +++ b/fsck_hfs/fsck_hfs.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 1999-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "cache.h" + + +const extern char *cdevname; /* name of device being checked */ +extern char *progname; +extern char nflag; /* assume a no response */ +extern char yflag; /* assume a yes response */ +extern char preen; /* just fix normal inconsistencies */ +extern char force; /* force fsck even if clean */ +extern char debug; /* output debugging info */ +extern char disable_journal; /* If debug, and set, do not simulate journal replay */ +extern char embedded; /* built for embedded */ +extern char hotroot; /* checking root device */ +extern char scanflag; /* Scan disk for bad blocks */ + +extern int upgrading; /* upgrading format */ + +extern int fsmodified; /* 1 => write done to file system */ +extern int fsreadfd; /* file descriptor for reading file system */ +extern int fswritefd; /* file descriptor for writing file system */ +extern Cache_t fscache; + + +#define DIRTYEXIT 3 /* Filesystem Dirty, no checks */ +#define FIXEDROOTEXIT 4 /* Writeable Root Filesystem was fixed */ +#define EEXIT 8 /* Standard error exit. */ +#define MAJOREXIT 47 /* We had major errors when doing a early-exit verify */ + + +char *blockcheck __P((char *name)); +void cleanup_fs_fd __P((void)); +void catch __P((int)); +void ckfini __P((int markclean)); +void pfatal __P((const char *fmt, ...)); +void pwarn __P((const char *fmt, ...)); +void logstring(void *, const char *); // write to log file +void outstring(void *, const char *); // write to standard out +void llog(const char *fmt, ...); // write to log file +void olog(const char *fmt, ...); // write to standard out +void plog(const char *fmt, ...); // printf replacement that writes to both log file and standard out +void vplog(const char *fmt, va_list ap); // vprintf replacement that writes to both log file and standard out +void fplog(FILE *stream, const char *fmt, ...); // fprintf replacement that writes to both log file and standard out +#define printf plog // just in case someone tries to use printf/fprint +#define fprintf fplog + +int reply __P((char *question)); + +void start_progress(void); +void draw_progress(int); +void end_progress(void); diff --git a/fsck_hfs/fsck_hfs_msgnums.h b/fsck_hfs/fsck_hfs_msgnums.h new file mode 100644 index 0000000..ff40ecd --- /dev/null +++ b/fsck_hfs/fsck_hfs_msgnums.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __FSCK_HFS_MSGNUMS_H +#define __FSCK_HFS_MSGNUMS_H + +/* + * HFS-specific status messages. These indicate the current + * state of fsck_hfs run. + */ +enum { + hfsUnknown = 200, + + hfsExtBTCheck = 201, /* Checking Extents Overflow file */ + hfsCatBTCheck = 202, /* Checking Catalog file */ + hfsCatHierCheck = 203, /* Checking Catalog hierarchy */ + hfsExtAttrBTCheck = 204, /* Checking Extended Attributes file */ + hfsVolBitmapCheck = 205, /* Checking volume bitmap */ + hfsVolInfoCheck = 206, /* Checking volume information */ + hfsHardLinkCheck = 207, /* Checking multi-linked files */ + hfsRebuildExtentBTree = 208, /* Rebuilding Extents Overflow B-tree */ + hfsRebuildCatalogBTree = 209, /* Rebuilding Catalog B-tree */ + hfsRebuildAttrBTree = 210, /* Rebuilding Extended Attributes B-tree */ + + hfsCaseSensitive = 211, /* Detected a case-sensitive catalog */ + hfsMultiLinkDirCheck = 212, /* Checking multi-linked directories */ + hfsJournalVolCheck = 213, /* Checking Journaled HFS Plus volume */ + hfsLiveVerifyCheck = 214, /* Performing live verification */ + hfsVerifyVolWithWrite = 215, /* Verifying volume when it is mounted with write access */ + hfsCheckHFS = 216, /* Checking HFS volume */ + hfsCheckNoJnl = 217, /* Checking Non-journaled HFS Plus volume */ +}; + +/* + * Scavenger errors. They are mostly corruptions detected + * during scavenging process. + * If negative, they are unrecoverable (scavenging terminates). + * If positive, they are recoverable (scavenging continues). + */ +enum { + E_FirstError = 500, + + E_PEOF = 500, /* Invalid PEOF */ + E_LEOF = 501, /* Invalid LEOF */ + E_DirVal = 502, /* Invalid directory valence */ + E_CName = 503, /* Invalid CName */ + E_NHeight = 504, /* Invalid node height */ + E_NoFile = 505, /* Missing file record for file thread */ + E_ABlkSz = -506, /* Invalid allocation block size */ + E_NABlks = -507, /* Invalid number of allocation blocks */ + E_VBMSt = -508, /* Invalid VBM start block */ + E_ABlkSt = -509, /* Invalid allocation block start */ + + E_ExtEnt = -510, /* Invalid extent entry */ + E_OvlExt = 511, /* Overlapped extent allocation (id, path) */ + E_LenBTH = -512, /* Invalid BTH length */ + E_ShortBTM = -513, /* BT map too short to repair */ + E_BTRoot = -514, /* Invalid root node number */ + E_NType = -515, /* Invalid node type */ + E_NRecs = -516, /* Invalid record count */ + E_IKey = -517, /* Invalid index key */ + E_IndxLk = -518, /* Invalid index link */ + E_SibLk = -519, /* Invalid sibling link */ + + E_BadNode = -520, /* Invalid node structure */ + E_OvlNode = -521, /* overlapped node allocation */ + E_MapLk = -522, /* Invalid map node linkage */ + E_KeyLen = -523, /* Invalid key length */ + E_KeyOrd = -524, /* Keys out of order */ + E_BadMapN = -525, /* Invalid map node */ + E_BadHdrN = -526, /* Invalid header node */ + E_BTDepth = -527, /* exceeded maximum BTree depth */ + E_CatRec = -528, /* Invalid catalog record type */ + E_LenDir = -529, /* Invalid directory record length */ + + E_LenThd = -530, /* Invalid thread record length */ + E_LenFil = -531, /* Invalid file record length */ + E_NoRtThd = -532, /* Missing thread record for root directory */ + E_NoThd = -533, /* Missing thread record */ + E_NoDir = 534, /* Missing directory record */ + E_ThdKey = -535, /* Invalid key for thread record */ + E_ThdCN = -536, /* Invalid parent CName in thread record */ + E_LenCDR = -537, /* Invalid catalog record length */ + E_DirLoop = -538, /* Loop in directory hierarchy */ + E_RtDirCnt = 539, /* Invalid root directory count */ + + E_RtFilCnt = 540, /* Invalid root file count */ + E_DirCnt = 541, /* Invalid volume directory count */ + E_FilCnt = 542, /* Invalid volume file count */ + E_CatPEOF = -543, /* Invalid catalog PEOF */ + E_ExtPEOF = -544, /* Invalid extent file PEOF */ + E_CatDepth = 545, /* Nesting of folders has exceeded the recommended limit of 100 */ + E_NoFThdFlg = -546, /* File thread flag not set in file record */ + E_CatalogFlagsNotZero = 547, /* Reserved fields in the catalog record have incorrect data */ + E_BadFileName = -548, /* Invalid file/folder name problem */ + E_InvalidClumpSize = 549, /* Invalid file clump size */ + + E_InvalidBTreeHeader = 550, /* Invalid B-tree header */ + E_LockedDirName = 551, /* Inappropriate locked folder name */ + E_EntryNotFound = -552, /* volume catalog entry not found */ + E_FreeBlocks = 553, /* Invalid volume free block count */ + E_MDBDamaged = 554, /* Master Directory Block needs minor repair */ + E_VolumeHeaderDamaged = 555, /* Volume Header needs minor repair */ + E_VBMDamaged = 556, /* Volume Bit Map needs minor repair */ + E_InvalidNodeSize = -557, /* Invalid B-tree node size */ + E_LeafCnt = 558, /* Invalid leaf record count */ + E_BadValue = 559, /* (It should be %s instead of %s) */ + + E_InvalidID = 560, /* Invalid file or directory ID found */ + E_VolumeHeaderTooNew = 561, /* I can't understand this version of HFS Plus */ + E_DiskFull = -562, /* Disk full error */ + E_InternalFileOverlap = -563, /* Internal files overlap (file %d) */ + E_InvalidVolumeHeader = -564, /* Invalid volume header */ + E_InvalidMDBdrAlBlSt = 565, /* HFS wrapper volume needs repair */ + E_InvalidWrapperExtents = 566, /* Wrapper catalog file location needs repair */ + E_InvalidLinkCount = 567, /* Indirect node %s needs link count adjustment */ + E_UnlinkedFile = 568, /* Unlinked file needs to be deleted */ + E_InvalidPermissions = 569, /* Invalid BSD file type */ + + E_InvalidUID_Unused = 570, /* Invalid UID/GID in BSD info - Unused (4538396) */ + E_IllegalName = 571, /* Illegal name */ + E_IncorrectNumThdRcd = 572, /* Incorrect number of thread records */ + E_SymlinkCreate = 573, /* Cannot create links to all corrupt files */ + E_BadJournal = 574, /* Invalid content in Journal */ + E_IncorrectAttrCount = 575, /* Incorrect number of attributes in attr btree when compared with attr bits set in catalog btree */ + E_IncorrectSecurityCount= 576, /* Incorrect number of security attributes in attr btree when compared with security bits set in catalog btree */ + E_PEOAttr = 577, /* Incorrect physical end of extended attribute data */ + E_LEOAttr = 578, /* Incorrect logical end of extended attribute data */ + E_AttrRec = 579, /* Invalid attribute record (overflow extent without original extent, unknown type) */ + + E_FldCount = 580, /* Incorrect folder count in a directory */ + E_HsFldCount = 581, /* HasFolderCount flag needs to be set */ + E_BadPermPrivDir = 582, /* Incorrect permissions for private directory for directory hard links */ + E_DirInodeBadFlags = 583, /* Incorrect flags for directory inode */ + E_DirInodeBadParent = -584, /* Invalid parent for directory inode */ + E_DirInodeBadName = -585, /* Invalid name for directory inode */ + E_DirHardLinkChain = 586, /* Incorrect number of directory hard link count */ + E_DirHardLinkOwnerFlags = 587, /* Incorrect owner flags for directory hard link */ + E_DirHardLinkFinderInfo = 588, /* Invalid finder info for directory hard link */ + E_DirLinkAncestorFlags = 589, /* Invalid flags for directory hard link parent ancestor */ + + E_BadParentHierarchy = -590, /* Bad parent hierarchy, could not lookup parent directory record */ + E_DirHardLinkNesting = -591, /* Maximum nesting of folders and directory hard links reached */ + E_MissingPrivDir = -592, /* Missing private directory for directory hard links */ + E_InvalidLinkChainPrev = 593, /* Previous ID in a hard lnk chain is incorrect */ + E_InvalidLinkChainNext = 594, /* Next ID in a hard link chain is incorrect */ + E_FileInodeBadFlags = 595, /* Incorrecgt flags for file inode */ + E_FileInodeBadParent = -596, /* Invalid parent for file inode */ + E_FileInodeBadName = -597, /* Invalid name for file inode */ + E_FileHardLinkChain = 598, /* Incorrect number of file hard link count */ + E_FileHardLinkFinderInfo= 599, /* Invalid finder info for file hard link */ + + E_InvalidLinkChainFirst = 600, /* Invalid first link in hard link chain */ + E_FileLinkBadFlags = 601, /* Incorrect flags for file hard link */ + E_DirLinkBadFlags = 602, /* Incorrect flags for directory hard link */ + E_OrphanFileLink = 603, /* Orphan file hard link */ + E_OrphanDirLink = 604, /* Orphan directory hard link */ + E_OrphanFileInode = 605, /* Orphan file inode, no file hard links pointing to this inode */ + E_OrphanDirInode = 606, /* Orphan directory inode, no directory hard links pointing to this inode */ + E_OvlExtID = 607, /* Overlapped extent allocation (id) */ + E_UnusedNodeNotZeroed = 608, /* An unused B-tree node is not full of zeroes */ + E_VBMDamagedOverAlloc = 609, /* Volume bitmap has has orphaned block allocation */ + + E_BadHardLinkDate = 610, /* Bad hard link creation date */ + E_DirtyJournal = 611, /* Journal need to be replayed but volume is read-only */ + E_LinkChainNonLink = 612, /* File record has hard link chain flag */ + E_LinkHasData = -613, /* Hard link record has data extents */ + E_FileLinkCountError = 614, /* File has incorrect link count */ + E_BTreeSplitNode = 615, /* B-tree node is split across extents */ + E_BadSymLink = 616, /* Bad information for symlink */ + E_BadSymLinkLength = 617, /* Symlink has bad length */ + E_BadSymLinkName = 618, /* Bad symbolic link name */ + E_LastError = 618 +}; + +#endif diff --git a/fsck_hfs/fsck_hfs_strings.c b/fsck_hfs/fsck_hfs_strings.c new file mode 100644 index 0000000..23cc2e5 --- /dev/null +++ b/fsck_hfs/fsck_hfs_strings.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include "fsck_messages.h" +#include "fsck_hfs_msgnums.h" + +/* + * HFS-specific status messages -- just indicating what phase fsck_hfs is in. + * The structure is explained in fsck_strings.c + */ +fsck_message_t +hfs_messages[] = { + /* Message Number Message Type Verbosity Arguments */ + /* 201 - 210 */ + { hfsExtBTCheck, "Checking extents overflow file.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsCatBTCheck, "Checking catalog file.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsCatHierCheck, "Checking catalog hierarchy.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsExtAttrBTCheck, "Checking extended attributes file.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsVolBitmapCheck, "Checking volume bitmap.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsVolInfoCheck, "Checking volume information.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsHardLinkCheck, "Checking multi-linked files.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsRebuildExtentBTree, "Rebuilding extents overflow B-tree.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsRebuildCatalogBTree, "Rebuilding catalog B-tree.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsRebuildAttrBTree, "Rebuilding extended attributes B-tree.", fsckMsgVerify, fsckLevel0, 0, }, + + /* 211 - 217 */ + { hfsCaseSensitive, "Detected a case-sensitive volume.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsMultiLinkDirCheck, "Checking multi-linked directories.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsJournalVolCheck, "Checking Journaled HFS Plus volume.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsLiveVerifyCheck, "Performing live verification.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsVerifyVolWithWrite, "Verifying volume when it is mounted with write access.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsCheckHFS, "Checking HFS volume.", fsckMsgVerify, fsckLevel0, 0, }, + { hfsCheckNoJnl, "Checking non-journaled HFS Plus Volume.", fsckMsgVerify, fsckLevel0, 0, }, + + /* End of the array */ + { 0, }, +}; + +/* + * HFS-specific error messages. Most are repairable; some are not, but there's no indication of + * which is which here (see fsck_hfs_msgnums.h; negative values are non-repairable). + * Messages need not be in any particular order, as fsckAddMessages will sort everything. + */ +fsck_message_t +hfs_errors[] = { + /* Message Number Message Type Verbosity Arguments */ + /* 500 - 509 */ + { E_PEOF, "Incorrect block count for file %s", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeFile } }, + { E_LEOF, "Incorrect size for file %s", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeFile } }, + { E_DirVal, "Invalid directory item count", fsckMsgError, fsckLevel1, 0, } , + { E_CName, "Invalid length for file name", fsckMsgError, fsckLevel1, 0, } , + { E_NHeight, "Invalid node height", fsckMsgError, fsckLevel1, 0, } , + { E_NoFile, "Missing file record for file thread", fsckMsgError, fsckLevel1, 0, } , + { E_ABlkSz, "Invalid allocation block size", fsckMsgError, fsckLevel1, 0, } , + { E_NABlks, "Invalid number of allocation blocks", fsckMsgError, fsckLevel1, 0, }, + { E_VBMSt, "Invalid VBM start block", fsckMsgError, fsckLevel1, 0, }, + { E_ABlkSt, "Invalid allocation block start", fsckMsgError, fsckLevel1, 0, }, + + /* 510 - 519 */ + { E_ExtEnt, "Invalid extent entry", fsckMsgError, fsckLevel1, 0, }, + { E_OvlExt, "Overlapped extent allocation (id = %u, %s)", fsckMsgError, fsckLevel1, 2, (const int[]){ fsckTypeInt, fsckTypePath } }, + { E_LenBTH, "Invalid BTH length", fsckMsgError, fsckLevel1, 0, } , + { E_ShortBTM, "BT map too short during repair", fsckMsgError, fsckLevel1, 0, } , + { E_BTRoot, "Invalid root node number", fsckMsgError, fsckLevel1, 0, }, + { E_NType, "Invalid node type", fsckMsgError, fsckLevel1, 0, }, + { E_NRecs, "Invalid record count", fsckMsgError, fsckLevel1, 0, }, + { E_IKey, "Invalid index key", fsckMsgError, fsckLevel1, 0, }, + { E_IndxLk, "Invalid index link", fsckMsgError, fsckLevel1, 0, }, + { E_SibLk, "Invalid sibling link", fsckMsgError, fsckLevel1, 0, }, + + /* 520 - 529 */ + { E_BadNode, "Invalid node structure", fsckMsgError, fsckLevel1, 0, }, + { E_OvlNode, "Overlapped node allocation", fsckMsgError, fsckLevel1, 0, }, + { E_MapLk, "Invalid map node linkage", fsckMsgError, fsckLevel1, 0, }, + { E_KeyLen, "Invalid key length", fsckMsgError, fsckLevel1, 0, }, + { E_KeyOrd, "Keys out of order", fsckMsgError, fsckLevel1, 0, }, + { E_BadMapN, "Invalid map node", fsckMsgError, fsckLevel1, 0, }, + { E_BadHdrN, "Invalid header node", fsckMsgError, fsckLevel1, 0, }, + { E_BTDepth, "Exceeded maximum B-tree depth", fsckMsgError, fsckLevel1, 0, }, + { E_CatRec, "Invalid catalog record type", fsckMsgError, fsckLevel1, 0, }, + { E_LenDir, "Invalid directory record length", fsckMsgError, fsckLevel1, 0, }, + + /* 530 - 539 */ + { E_LenThd, "Invalid thread record length", fsckMsgError, fsckLevel1, 0, }, + { E_LenFil, "Invalid file record length", fsckMsgError, fsckLevel1, 0, }, + { E_NoRtThd, "Missing thread record for root dir", fsckMsgError, fsckLevel1, 0, }, + { E_NoThd, "Missing thread record (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + { E_NoDir, "Missing directory record (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + { E_ThdKey, "Invalid key for thread record", fsckMsgError, fsckLevel1, 0, }, + { E_ThdCN, "Invalid parent CName in thread record", fsckMsgError, fsckLevel1, 0, }, + { E_LenCDR, "Invalid catalog record length", fsckMsgError, fsckLevel1, 0, }, + { E_DirLoop, "Loop in directory hierarchy", fsckMsgError, fsckLevel1, 0, }, + { E_RtDirCnt, "Invalid root directory count", fsckMsgError, fsckLevel1, 0, }, + + /* 540 - 549 */ + { E_RtFilCnt, "Invalid root file count", fsckMsgError, fsckLevel1, 0, }, + { E_DirCnt, "Invalid volume directory count", fsckMsgError, fsckLevel1, 0, }, + { E_FilCnt, "Invalid volume file count", fsckMsgError, fsckLevel1, 0, }, + { E_CatPEOF, "Invalid catalog PEOF", fsckMsgError, fsckLevel1, 0, }, + { E_ExtPEOF, "Invalid extent file PEOF", fsckMsgError, fsckLevel1, 0, }, + { E_CatDepth, "Nesting of folders has exceeded %d folders", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + { E_NoFThdFlg, "File thread flag not set in file record", fsckMsgError, fsckLevel1, 0, }, + { E_CatalogFlagsNotZero, "Reserved fields in the catalog record have incorrect data", fsckMsgError, fsckLevel1, 0, }, + { E_BadFileName, "Invalid file name", fsckMsgError, fsckLevel1, 0, }, + { E_InvalidClumpSize, "Invalid file clump size", fsckMsgError, fsckLevel1, 0, }, + + /* 550 - 559 */ + { E_InvalidBTreeHeader, "Invalid B-tree header", fsckMsgError, fsckLevel1, 0, }, + { E_LockedDirName, "Directory name locked", fsckMsgError, fsckLevel1, 0, }, + { E_EntryNotFound, "Catalog file entry not found for extent", fsckMsgError, fsckLevel1, 0, }, + { E_FreeBlocks, "Invalid volume free block count", fsckMsgError, fsckLevel1, 0, }, + { E_MDBDamaged, "Master Directory Block needs minor repair", fsckMsgError, fsckLevel1, 0, }, + { E_VolumeHeaderDamaged, "Volume header needs minor repair", fsckMsgError, fsckLevel1, 0, }, + { E_VBMDamaged, "Volume bitmap needs minor repair for under-allocation", fsckMsgError, fsckLevel1, 0, }, + { E_InvalidNodeSize, "Invalid B-tree node size", fsckMsgError, fsckLevel1, 0, }, + { E_LeafCnt, "Invalid leaf record count", fsckMsgError, fsckLevel1, 0, }, + { E_BadValue, "(It should be %s instead of %s)", fsckMsgDamageInfo,fsckLevel1, 2, (const int[]){ fsckTypeString, fsckTypeString } }, + + /* 560 - 569 */ + { E_InvalidID, "Invalid file or directory ID found", fsckMsgError, fsckLevel1, 0, }, + { E_VolumeHeaderTooNew, "I can't understand this version of HFS Plus", fsckMsgError, fsckLevel1, 0, }, + { E_DiskFull, "Disk full error", fsckMsgError, fsckLevel1, 0, }, + { E_InternalFileOverlap, "Internal files overlap (file %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + { E_InvalidVolumeHeader, "Invalid volume header", fsckMsgError, fsckLevel1, 0, }, + { E_InvalidMDBdrAlBlSt, "HFS wrapper volume needs repair", fsckMsgError, fsckLevel1, 0, }, + { E_InvalidWrapperExtents, "Wrapper catalog file location needs repair", fsckMsgError, fsckLevel1, 0, }, + { E_InvalidLinkCount, "Indirect node %d needs link count adjustment", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + { E_UnlinkedFile, "Orphaned open unlinked file %s", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeFile } }, + { E_InvalidPermissions, "Invalid BSD file type", fsckMsgError, fsckLevel1, 0, }, + + /* 570 - 579 */ + { E_InvalidUID_Unused, "Invalid BSD User ID", fsckMsgError, fsckLevel1, 0, }, + { E_IllegalName, "Illegal name", fsckMsgError, fsckLevel1, 0, }, + { E_IncorrectNumThdRcd, "Incorrect number of thread records", fsckMsgError, fsckLevel1, 0, }, + { E_SymlinkCreate, "Cannot create links to all corrupt files", fsckMsgError, fsckLevel1, 0, }, + { E_BadJournal, "Invalid content in journal", fsckMsgError, fsckLevel1, 0, }, + { E_IncorrectAttrCount, "Incorrect number of extended attributes", fsckMsgError, fsckLevel1, 0, }, + { E_IncorrectSecurityCount, "Incorrect number of Access Control Lists", fsckMsgError, fsckLevel1, 0, }, + { E_PEOAttr, "Incorrect block count for attribute %s of file %s", fsckMsgError, fsckLevel1, 2, (const int[]){ fsckTypeString, fsckTypeFile } }, + { E_LEOAttr, "Incorrect size for attribute %s of file %s", fsckMsgError, fsckLevel1, 2, (const int[]){ fsckTypeString, fsckTypeFile } }, + { E_AttrRec, "Invalid attribute record", fsckMsgError, fsckLevel1, 0, }, + + /* 580 - 589 */ + { E_FldCount, "Incorrect folder count in a directory (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + { E_HsFldCount, "HasFolderCount flag needs to be set (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + { E_BadPermPrivDir, "Incorrect permissions for private directory", fsckMsgError, fsckLevel1, 0, }, + { E_DirInodeBadFlags, "Incorrect flags for directory inode (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_DirInodeBadParent, "Invalid parent for directory inode (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_DirInodeBadName, "Invalid name for directory inode (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_DirHardLinkChain, "Incorrect number of directory hard links", fsckMsgError, fsckLevel1, 0, }, + { E_DirHardLinkOwnerFlags, "Incorrect owner flags for directory hard link (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + { E_DirHardLinkFinderInfo, "Invalid finder info for directory hard link (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + { E_DirLinkAncestorFlags, "Incorrect flags for directory hard link ancestor (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + + /* 590 - 599 */ + { E_BadParentHierarchy, "Bad parent directory hierarchy (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt } }, + { E_DirHardLinkNesting, "Maximum nesting of folders and directory hard links reached", fsckMsgError, fsckLevel1, 0, }, + { E_MissingPrivDir, "Missing private directory for directory hard links", fsckMsgError, fsckLevel1, 0, }, + { E_InvalidLinkChainPrev, "Previous ID in a hard link chain is incorrect (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_InvalidLinkChainNext, "Next ID in a hard link chain is incorrect (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_FileInodeBadFlags, "Incorrect flags for file inode (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_FileInodeBadParent, "Invalid parent for file inode (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_FileInodeBadName, "Invalid name for file inode (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_FileHardLinkChain, "Incorrect number of file hard links", fsckMsgError, fsckLevel1, 0, }, + { E_FileHardLinkFinderInfo, "Invalid finder info for file hard link (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + + /* 600 - 609 */ + { E_InvalidLinkChainFirst, "Invalid first link in hard link chain (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_FileLinkBadFlags, "Incorrect flags for file hard link (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_DirLinkBadFlags, "Incorrect flags for directory hard link (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_OrphanFileLink, "Orphaned file hard link (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_OrphanDirLink, "Orphaned directory hard link (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_OrphanFileInode, "Orphaned file inode (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_OrphanDirInode, "Orphaned directory inode (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_OvlExtID, "Overlapped extent allocation (id = %d)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_UnusedNodeNotZeroed, "Unused node is not erased (node = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_VBMDamagedOverAlloc, "Volume bitmap needs minor repair for orphaned blocks", fsckMsgError, fsckLevel1, 0, }, + + /* 610 - 619 */ + { E_BadHardLinkDate, "Bad hard link creation date", fsckMsgError, fsckLevel1, 0, }, + { E_DirtyJournal, "Journal need to be replayed but volume is read-only", fsckMsgError, fsckLevel1, 0, }, + { E_LinkChainNonLink, "File record has hard link chain flag (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_LinkHasData, "Hard link record has data extents (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_FileLinkCountError, "File has incorrect number of links (id = %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_BTreeSplitNode, "B-tree node is split across extents (file id %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_BadSymLink, "Bad information for symbolic link (file id %u)", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypeInt, } }, + { E_BadSymLinkLength, "Symbolic link (file id %u) has bad length (is %u, should be %u)", + fsckMsgError, fsckLevel1, 3, (const int[]){ fsckTypeInt, fsckTypeInt, fsckTypeInt} }, + { E_BadSymLinkName, "Bad symbolic link is `%s'", fsckMsgError, fsckLevel1, 1, (const int[]){ fsckTypePath, } }, + + /* And all-zeroes to indicate the end */ + { 0, }, +}; + diff --git a/fsck_hfs/fsck_keys.h b/fsck_hfs/fsck_keys.h new file mode 100644 index 0000000..c2a8c52 --- /dev/null +++ b/fsck_hfs/fsck_keys.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* fsck_keys.h + * + * This file contains the key-value strings that a client can expect + * to receive from fsck when it generates XML output. The clients + * should use this file to parse fsck output. + */ + +#ifndef _FSCK_KEYS_H +#define _FSCK_KEYS_H + +/* + * Keys for the plist generated by fsck + */ +#define kfsckVerbosity "fsck_verbosity" /* Verbosity level at which this message should be displayed to the user */ +#define kfsckType "fsck_msg_type" /* Type of fsck message (status, error, etc.) */ +#define kfsckMsgNumber "fsck_msg_number" /* Unique number associated with given message string */ +#define kfsckMsgString "fsck_msg_string" /* String that should be displayed to the user */ +#define kfsckParams "parameters" /* Array of parameters, if any, for the message string */ + +/* + * Keys for type of parameters, if any, for the message strings. + * The UI client can simply display the parameters to the user, + * or can handle them differently. + */ +#define kfsckParamIntegerKey "integer" /* positive integer */ +#define kfsckParamLongKey "long" /* positive long number */ +#define kfsckParamStringKey "string" /* UTF-8 string */ +#define kfsckParamPathKey "path" /* path to a file or directory in the volume */ +#define kfsckParamFileKey "file" /* name of file */ +#define kfsckParamDirectoryKey "directory" /* name of directory */ +#define kfsckParamVolumeKey "volumename" /* name of a volume */ +#define kfsckParamFSTypeKey "fstype" /* type of file system being checked */ + +/* + * The type of messages that can be generated by fsck_hfs. + * These are the values corresponding to fsck_msg_type. + */ +#define kfsckUnknown "UNKNOWN" /* type of message generated is unknown */ +#define kfsckVerify "VERIFY" /* fsck is performing a read-only operation on the volume */ +#define kfsckRepair "REPAIR" /* fsck is writing to file system to repair a corruption */ +#define kfsckSuccess "SUCCESS" /* verify found that the volume is clean, or repair was successful */ +#define kfsckFail "FAIL" /* verify found that the volume is corrupt, or verify did not complete due to error, or repair failed */ +#define kfsckError "ERROR" /* information of corruption found or condition that causes verify/repair to fail */ +#define kfsckDamageinfo "DAMAGEINFO" /* information about corrupt files/folders */ +#define kfsckInformation "INFORMATION" /* information about an error message or any fsck operation */ +#define kfsckProgress "PROGRESS" /* percentage progress of verify/repair operation */ + +/* Verbosity levels */ +#define kfsckLevel0 "0" /* level 0 messages should always be displayed to the user */ +#define kfsckLevel1 "1" /* level 1 messages should be only displayed in advanced mode */ + +#endif /* _FSCK_KEYS_H */ diff --git a/fsck_hfs/fsck_messages.c b/fsck_hfs/fsck_messages.c new file mode 100644 index 0000000..4047941 --- /dev/null +++ b/fsck_hfs/fsck_messages.c @@ -0,0 +1,1162 @@ +/* + * Copyright (c) 2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <stddef.h> +#include <stdarg.h> +#include <string.h> +#include <assert.h> +#include <Block.h> + +#include "fsck_messages.h" +#include "fsck_keys.h" +#include "fsck_msgnums.h" + +extern fsck_message_t fsck_messages_common[]; + +// The following structures are used internally, only +struct messages { + int low; + int high; + fsck_message_t *msgs; + struct messages *next, *prev; +}; + +#define cfFromFD 0x01 + +/* + * The internal verson of fsck_ctx_t -- this describes the output type, + * where it goes, etc. It's an opaque type so that it can change size + * in the future without affecting any clients of the code. + */ + +struct context { + FILE *fp; // output file structure + int flags; // various flags, mostly private + int verb; // the verbosity of the program -- controls what is output + enum fsck_output_type style; + enum fsck_default_answer_type resp; // none, no, or yes + int num; // number of messages in the array + fsck_message_t **msgs; + void (*writer)(fsck_ctx_t, const char*); // write strings to stdout + void (*logger)(fsck_ctx_t, const char *); // write strings to log file + char guiControl; + char xmlControl; + char writeToLog; // When 1, the string should be written to log file, otherwise to standard out. + fsckBlock_t preMessage; + fsckBlock_t postMessage; +}; + +/* + * printv(fsck_ctxt_t, const char *, va_list) + * Take the format and ap list, and turn them into a string. + * Then call the writer to print it out (or do whatever + * the writer wants with it, if it's an app-supplised function). + * + */ +static void +printv(fsck_ctx_t c, const char *fmt, va_list ap) +{ + struct context *ctx = (struct context *)c; + char buf[BUFSIZ + 1]; + size_t length; + va_list ap2; + + if (c == NULL) + return; + __va_copy(ap2, ap); // Just in case we need it + length = vsnprintf(buf, BUFSIZ, fmt, ap); + if (length > BUFSIZ) { + // We need to allocate space for it + size_t l2 = length + 1; + char *bufp = malloc(l2); + if (bufp == NULL) { + strcpy(buf, "* * * cannot allocate memory * * *\n"); + bufp = buf; + } else { + length = vsnprintf(bufp, length, fmt, ap2); + if (length >= l2) { // This should not happen! + strcpy(buf, " * * * cannot allocate memory * * *\n"); + free(bufp); + bufp = buf; + } else { + if (ctx->writer) (ctx->writer)(ctx, bufp); + free(bufp); + bufp = NULL; + } + } + if (bufp == NULL) + return; + } + + // If the current state of printing is logging to file, + // call the logger that writes strings only in traditional + // output forms. Otherwise, print the strings in the + // format option provided by the caller. + if (ctx->writeToLog == 1) { + if (ctx->logger) (ctx->logger)(ctx, buf); + } else { + if (ctx->writer) (ctx->writer)(ctx, buf); + } + return; +} + +/* + * printargs(fsck_ctx_t, const char *, ...) + * An argument-list verison of printv. It simply wraps up + * the argument list in a va_list, and then calls printv. + */ +static void +printargs(fsck_ctx_t c, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + printv(c, fmt, ap); +} + +/* + * stdprint(fsck_ctx_t, const char *) + * Default writer. Just prints to the set FILE*, or stdout + * if it's not set. + */ + +static void +stdprint(fsck_ctx_t c, const char *str) +{ + struct context *ctx = (struct context*)c; + if (c) { + fputs(str, ctx->fp ? ctx->fp : stdout); + fflush(ctx->fp ? ctx->fp : stdout); + } + +} +/* + * typestring(int type) + * Return a string value corresponding to the type. This is used + * to present it during XML output, as one of the appropriate + * tags. + */ +static const char * +typestring(int type) +{ + switch (type) { + case fsckMsgVerify: + return kfsckVerify; + case fsckMsgInfo: + return kfsckInformation; + case fsckMsgRepair: + return kfsckRepair; + case fsckMsgSuccess: + return kfsckSuccess; + case fsckMsgError: + return kfsckError; + case fsckMsgFail: + return kfsckFail; + case fsckMsgDamageInfo: + return kfsckDamageinfo; + case fsckMsgProgress: + return kfsckProgress; + case fsckMsgNotice: + return kfsckInformation; + default: + return kfsckUnknown; + } +} + +/* + * verbosity_string(int type) + * Return a string value corresponding to the verbosity. This is + * used to present it during XML output, as one of the appropriate + * tags. + */ +static const char * +verbosity_string(int level) +{ + switch(level) { + case fsckLevel0: + return kfsckLevel0; + case fsckLevel1: + default: + return kfsckLevel1; + } +} + +/* + * convertfmt(const char *in) + * This is an ugly little function whose job is to convert + * from a normal printf-style string (e.g., "How now %s cow?") + * into something that can be used with Cocoa formatting. This + * means replacing each "%<formatter>" with "%<number>$@"; the + * reason we do this is so that the internationalized strings can + * move parameters around as desired (e.g., in language A, the third + * parameter may need to be first). The caller needs to free the + * return value. + */ +static char * +convertfmt(const char *in) +{ + char *retval = NULL; + int numargs = 0; + char *cp; + enum { fNone, fPercent } fs; + + for (cp = (char*)in; cp; cp = strchr(cp, '%')) { + numargs++; + cp++; + } + + retval = calloc(1, strlen(in) + numargs * 5 + 1); + if (retval == NULL) + return NULL; + + fs = fNone; + numargs = 0; + for (cp = retval; *in; in++) { + if (fs == fNone) { + *cp++ = *in; + if (*in == '%') { + if (in[1] == '%') { + *cp++ = '%'; + in++; + } else { + fs = fPercent; + cp += sprintf(cp, "%d$@", ++numargs); + } + } + } else if (fs == fPercent) { + switch (*in) { + case 'd': case 'i': case 'o': case 'u': case 'x': case 'l': + case 'X': case 'D': case 'O': case 'U': case 'e': + case 'E': case 'f': case 'F': case 'g': case 'G': + case 'a': case 'A': case 'c': case 'C': case 's': + case 'S': case 'p': case 'n': + fs = fNone; + break; + } + } + } + *cp = 0; + return retval; +} + +/* + * fsckCreate() + * Allocates space for an fsck_ctx_t context. It also sets up + * the standard message blocks (defined earlier in this file). + * It will return NULL in the case of any error. + */ +fsck_ctx_t +fsckCreate(void) +{ + struct context *rv = NULL; + + rv = calloc(1, sizeof(*rv)); + if (rv == NULL) { + return NULL; + } + if (fsckAddMessages(rv, fsck_messages_common) == -1) { + fsckDestroy(rv); + return NULL; + } + fsckSetWriter(rv, &stdprint); + + return (fsck_ctx_t)rv; +} + +/* + * fsckSetBlock() + * Sets the block to be called for the specific phase -- currently, only + * before or after a message is to be printed/logged. The block is copied + * for later use. + */ +void +fsckSetBlock(fsck_ctx_t c, fsck_block_phase_t phase, fsckBlock_t bp) +{ + struct context *ctx = c; + if (c != NULL) { + switch (phase) { + case fsckPhaseBeforeMessage: + if (ctx->preMessage) { + Block_release(ctx->preMessage); + ctx->preMessage = NULL; + } + if (bp) + ctx->preMessage = (fsckBlock_t)Block_copy(bp); + break; + case fsckPhaseAfterMessage: + if (ctx->postMessage) { + Block_release(ctx->postMessage); + ctx->postMessage = NULL; + } + if (bp) + ctx->postMessage = (fsckBlock_t)Block_copy(bp); + break; + case fsckPhaseNone: + /* Just here for compiler warnings */ + break; + } + + } + return; +} + +/* + * fsckGetBlock() + * Return the pointer to the block for the specified phase. The block pointer + * is not copied. + */ +fsckBlock_t +fsckGetBlock(fsck_ctx_t c, fsck_block_phase_t phase) +{ + struct context *ctx = c; + fsckBlock_t retval = NULL; + if (c != NULL) { + switch (phase) { + case fsckPhaseBeforeMessage: + retval = ctx->preMessage; + break; + case fsckPhaseAfterMessage: + retval = ctx->postMessage; + break; + case fsckPhaseNone: + break; + } + } + return retval; +} + +/* + * fsckSetWriter(context, void (*)(fsck_ctx_t, const char *) + * Call a function for each message to be printed. + * This defaults to stdprint (see above). + */ +int +fsckSetWriter(fsck_ctx_t c, void (*fp)(fsck_ctx_t, const char*)) +{ + struct context *ctx = c; + if (c != NULL) { + ctx->writer = fp; + return 0; + } else { + return -1; + } +} + +/* Initialize the logger function that will write strings to log file */ +int +fsckSetLogger(fsck_ctx_t c, void (*fp)(fsck_ctx_t, const char*)) +{ + struct context *ctx = c; + if (c != NULL) { + ctx->logger = fp; + return 0; + } else { + return -1; + } +} + +/* + * fsckSetOutput(context, FILE*) + * Set the FILE* to be used for output. Returns + * 0 on success, and -1 if it has already been set. + */ +int +fsckSetOutput(fsck_ctx_t c, FILE *fp) +{ + struct context *ctx = c; + + if (c != NULL) { + ctx->fp = fp; + return 0; + } else + return -1; +} + +/* + * fsckSetFile(context, fd) + * Use a file descriptor, instead of a FILE*, for output. + * Because of how stdio works, you should not use 1 or 2 + * for this -- use fsckSetOutput() with stdout/stderr instead. + * If you do use this, then fsckDestroy() will close the FILE* + * it creates here. + * It returns -1 on error, and 0 on success. + */ +int +fsckSetFile(fsck_ctx_t c, int f) +{ + struct context *ctx = c; + + if (c != NULL) { + FILE *out = fdopen(f, "w"); + + if (out != NULL) { + ctx->fp = out; + ctx->flags |= cfFromFD; + return 0; + } + } + return -1; +} + +/* + * fsckSetVerbosity(context, level) + * Sets the verbosity level associated with this context. + * This is used to determine which messages are output -- only + * messages with a level equal to, or less than, the context's + * verbosity level are output. + */ +int +fsckSetVerbosity(fsck_ctx_t c, int v) +{ + struct context *ctx = c; + + if (c != NULL) { + ctx->verb = v; + return 0; + } + return -1; +} + +/* + * fsckGetVerbosity(context) + * Return the verbosity level previously set, or -1 on error. + */ +int +fsckGetVerbosity(fsck_ctx_t c) +{ + struct context *ctx = c; + + return ctx ? ctx->verb : -1; +} + +/* + * fsckSetOutputStyle(context, output_type) + * Set the output style to one of the defined style: + * Traditional (normal terminal-output); GUI (the parenthesized + * method used previously by DM/DU); and XML (the new plist + * format that is the raison d'etre for this code). It does not + * (yet) check if the input value is sane. + */ +int +fsckSetOutputStyle(fsck_ctx_t c, enum fsck_output_type s) +{ + struct context *ctx = c; + + if (c != NULL) { + ctx->style = s; + return 0; + } + return -1; +} + +/* + * fsckGetStyle(context) + * Return the output style set for this context, or + * fsckOUtputUndefined. + */ +enum fsck_output_type +fsckGetOutputStyle(fsck_ctx_t c) +{ + struct context *ctx = c; + + return ctx ? ctx->style : fsckOutputUndefined; +} + +/* + * fsckSetDefaultResponse(context, default_answer_tye) + * The purpose of this function is to allow fsck to run without + * interaction, and have a default answer (yes or no) for any + * question that might be presented. See fsckAskPrompt() + */ +int +fsckSetDefaultResponse(fsck_ctx_t c, enum fsck_default_answer_type r) +{ + struct context *ctx = c; + + if (ctx) { + ctx->resp = r; + return 0; + } + return -1; +} + +/* + * fsckAskPrompt(context, prompt, ...) + * Ask a question of the user, preceded by the given + * printf-format prompt. E.g., "CONTINUE? "); the + * question mark should be included if you want it + * displayed. If a default answer has been set, then + * it will be used; otherwise, it will try to get an + * answer from the user. Return values are 1 for "yes", + * 0 for "no"; -1 for an invalid default; and -2 for error. + */ +int +fsckAskPrompt(fsck_ctx_t c, const char *prompt, ...) +{ + struct context *ctx = c; + int rv = -2; + va_list ap; + + if (ctx == NULL) + return -1; + + va_start(ap, prompt); + + if (ctx->style == fsckOutputTraditional && ctx->fp) { + int count = 0; +doit: + printv(ctx, prompt, ap); + switch (ctx->resp) { + default: + rv = -1; + break; + case fsckDefaultNo: + rv = 0; + break; + case fsckDefaultYes: + rv = 1; + break; + } + if (rv == -1) { + char *resp = NULL; + size_t len; + + count++; + resp = fgetln(stdin, &len); + if (resp == NULL || len == 0) { + if (count > 10) { + // Only ask so many times... + rv = 0; + printargs(ctx, "%s", "\n"); + goto done; + } else { + goto doit; + } + } + switch (resp[0]) { + case 'y': + case 'Y': + rv = 1; + break; + case 'n': + case 'N': + rv = 0; + break; + default: + goto doit; + } + } else { + printargs(ctx, "%s", rv == 0 ? "NO\n" : "YES\n"); + } + } else { + switch (ctx->resp) { + default: + rv = -1; + break; + case fsckDefaultNo: + rv = 0; + break; + case fsckDefaultYes: + rv = 1; + break; + } + } +done: + return rv; +} + +/* + * fsckDestroy(context) + * Finish up with a context, and release any resources + * it had. + */ +void +fsckDestroy(fsck_ctx_t c) +{ + struct context *ctx = c; + + if (c == NULL) + return; + + if (ctx->msgs) + free(ctx->msgs); + + if (ctx->flags & cfFromFD) { + fclose(ctx->fp); + } + if (ctx->preMessage) { + Block_release(ctx->preMessage); + } + if (ctx->postMessage) { + Block_release(ctx->postMessage); + } + + free(ctx); + return; +} + +/* + * msgCompar(void*, void*) + * Used by fsckAddMessages() for qsort(). All it does is + * compare the message number for two fsck_messages. + */ +static int +msgCompar(const void *p1, const void *p2) +{ + fsck_message_t *const *k1 = p1, *const *k2 = p2; + + return ((*k1)->msgnum - (*k2)->msgnum); +} + +/* + * fsckAddMessages(context, message*) + * Add a block of messages to this context. We do not assume, + * or require, that they are in sorted order. This is probably + * not the best it could be, becasue first we look through the + * block once, counting how many messages there are; then we + * allocate extra space for the existing block, and copy in the + * messages to it. This means 2 passes through, which isn't ideal + * (however, it should be called very infrequently). After that, + * we sort the new block, sorting based on the message number. + * In the event of failure, it'll return -1. + * XXX We make no attempt to ensure that there are not duplicate + * message numbers! + */ +int +fsckAddMessages(fsck_ctx_t c, fsck_message_t *m) +{ + struct context *ctx = c; + fsck_message_t *ptr, **new; + int cnt, i; + + if (ctx == NULL || m == NULL || m->msg == NULL) + return 0; + + for (cnt = 0, ptr = m; ptr->msg; ptr++, cnt++) + ; + + new = realloc(ctx->msgs, sizeof(fsck_message_t*) * (ctx->num + cnt)); + if (new == NULL) + return -1; + ctx->msgs = new; + + for (i = 0; i < cnt; i++) { + ctx->msgs[i + ctx->num] = &m[i]; + } + ctx->num += cnt; + + qsort(ctx->msgs, ctx->num, sizeof(fsck_message_t*), msgCompar); + + return 0; +} + +/* + * bCompar(void *, void *) + * An fsck_message_t* comparision function for + * bsearch(). The first parameter is a pointer to + * the message number we're searching for; the second + * parameter is a pointer to an fsck_message_t. + * bsearch() needs to know whether that message is less than, + * equal to, or greater than the desired one. + */ +static int +bCompar(const void *kp, const void *ap) +{ + const int *ip = kp; + fsck_message_t * const *mp = ap; + + return (*ip - (*mp)->msgnum); +} + +/* + * findmessage(context, msgnum) + * Find the desired message number in the context. It uses + * bsearch() and... does very little itself. (An earlier version + * did a lot more.) + */ +static fsck_message_t * +findmessage(struct context *ctx, int msgnum) +{ + fsck_message_t **rv; + + if (ctx == NULL) + return NULL; + + rv = bsearch(&msgnum, ctx->msgs, ctx->num, sizeof(rv), bCompar); + + if (rv) + return *rv; + else + return NULL; +} + +/* + * fsckPrintToString(message, va_list) + * fsckPrintString(context, message, va_list) + * These two functions are used to print out a traditional message on the + * console. Note that it outputs "** " for the messages + * it does print out (Verify, Repair, Success, and Fail); + * other messages are not printed out. + * + * fsckPrintToString() is also used for message logging. + * + */ +static char * +fsckPrintToString(fsck_message_t *m, va_list ap) +{ + char *retval = NULL; + char *tmpstr = NULL; + char *astr = ""; // String at beginning + char *pstr = ""; // String at end + + /* No progress messages required in traditional output */ + if (m->type == fsckMsgProgress) { + return NULL; + } + switch (m->type) { + case fsckMsgVerify: + case fsckMsgRepair: + case fsckMsgSuccess: + case fsckMsgFail: + astr = "** "; + break; + + case fsckMsgError: + case fsckMsgDamageInfo: + case fsckMsgInfo: + astr = " "; + break; + case fsckMsgNotice: + pstr = astr = " *****"; + break; + } + vasprintf(&tmpstr, m->msg, ap); + if (tmpstr) { + asprintf(&retval, "%s%s%s\n", astr, tmpstr, pstr); + free(tmpstr); + } + return retval; +} + +static int +fsckPrintString(struct context *ctx, fsck_message_t *m, va_list ap) +{ + // Traditional fsck doesn't print this out + if (m->type != fsckMsgProgress) + { + char *str = fsckPrintToString(m, ap); + if (str) { + printargs(ctx, "%s", str); + free(str); + } + } + return 0; +} + +/* + * fsckPrintXML(context, message, va_list) + * Print out a message in XML (well, plist) format. + * This involves printint out a standard header and closer + * for each message, and calling fflush() when it's done. + */ +static int +fsckPrintXML(struct context *ctx, fsck_message_t *m, va_list ap) +{ + char *newmsg = convertfmt(m->msg); + /* See convertfmt() for details */ + if (newmsg == NULL) { + return -1; + } + printargs(ctx, "%s", "<plist version=\"1.0\">\n"); + printargs(ctx, "%s", "\t<dict>\n"); + printargs(ctx, "\t\t<key>%s</key> <string>%s</string>\n", + kfsckType, typestring(m->type)); + /* + * XXX - should be a "cleaner" way of doing this: we only want + * to print out these keys if it's NOT a progress indicator. + */ + if (m->msgnum != fsckProgress) { + printargs(ctx, "\t\t<key>%s</key> <integer>%s</integer>\n", + kfsckVerbosity, verbosity_string(m->level)); + printargs(ctx, "\t\t<key>%s</key> <integer>%u</integer>\n", + kfsckMsgNumber, m->msgnum); + printargs(ctx, "\t\t<key>%s</key> <string>%s</string>\n", + kfsckMsgString, newmsg); + } + if (m->numargs > 0) { + int i; + /* + * Each parameter has a type. This basically boils down to + * a string or an integer, but some kinds of strings are + * handled specially. Specifically, paths, volume names, + * etc. + */ + printargs(ctx, "\t\t<key>%s</key>\n", kfsckParams); + printargs(ctx, "%s", "\t\t<array>\n"); + for (i = 0; i < m->numargs; i++) { + if (m->argtype[i] == fsckTypeInt) { + int x = va_arg(ap, int); + printargs(ctx, "\t\t\t<integer>%d</integer>\n", x); + } else if (m->argtype[i] == fsckTypeLong) { + long x = va_arg(ap, long); + printargs(ctx, "\t\t\t<integer>%ld</integer>\n", x); + } else if (m->argtype[i] == fsckTypeFileSize) { + off_t x = va_arg(ap, off_t); + printargs(ctx, "\t\t\t<integer>%llu</integer>\n", x); + } else if (m->argtype[i] == fsckTypeString) { + char *p = va_arg(ap, char*); + printargs(ctx, "\t\t\t<string>%s</string>\n", p); + } else if (m->argtype[i] == fsckTypePath) { + char *p = va_arg(ap, char*); + printargs(ctx, "\t\t\t<dict><key>%s</key> <string>%s</string></dict>\n", kfsckParamPathKey, p); + } else if (m->argtype[i] == fsckTypeFile) { + char *p = va_arg(ap, char*); + printargs(ctx, "\t\t\t<dict><key>%s</key> <string>%s</string></dict>\n", kfsckParamFileKey, p); + } else if (m->argtype[i] == fsckTypeDirectory) { + char *p = va_arg(ap, char*); + printargs(ctx, "\t\t\t<dict><key>%s</key> <string>%s</string></dict>\n", kfsckParamDirectoryKey, p); + } else if (m->argtype[i] == fsckTypeVolume) { + char *p = va_arg(ap, char*); + printargs(ctx, "\t\t\t<dict><key>%s</key> <string>%s</string></dict>\n", kfsckParamVolumeKey, p); + } else if (m->argtype[i] == fsckTypeFSType) { + char *p = va_arg(ap, char*); + printargs(ctx, "\t\t\t<dict><key>%s</key> <string>%s</string></dict>\n", kfsckParamFSTypeKey, p); + } else if (m->argtype[i] == fsckTypeProgress) { + int x = va_arg(ap, int); + printargs(ctx, "\t\t\t<integer>%d</integer>\n", x); + } else { + /* XXX - what should default be --- string, integer, pointer? */ + void *p = va_arg(ap, void*); + printargs(ctx, "\t\t\t<integer>%p</integer>\n", p); + } + } + printargs(ctx, "%s", "\t\t</array>\n"); + } + printargs(ctx, "%s", "\t</dict>\n"); + printargs(ctx, "%s", "</plist>\n"); + free(newmsg); + return 0; +} + +/* + * fsckPrintGUI(context, message, va_list) + * Print out a message for the previous interface for DM/DU; + * this looks like: + * ('X', "message", z) + * where 'X' is a type ('S' for success, 'E' for error, and + * '%' for progress), and z is an argument count. (Okay, + * progress counts are just "(% z)", where "z" is a number + * between 0 and 100). If there are any arguments, they follow + * one per line. + */ +static int +fsckPrintGUI(struct context *ctx, fsck_message_t *m, va_list ap) +{ + char t; + int i; + char *newmsg = convertfmt(m->msg); + if (newmsg == NULL) + return -1; + + switch (m->type) { + case fsckMsgVerify: + case fsckMsgInfo: + case fsckMsgRepair: + case fsckMsgSuccess: + case fsckMsgNotice: + t = 'S'; break; + case fsckMsgError: + case fsckMsgFail: + case fsckMsgDamageInfo: + t = 'E'; break; + case fsckMsgProgress: + t = '%'; break; + default: + t = '?'; break; + } + if (m->msgnum != fsckProgress) { + printargs(ctx, "(%c,\"%s\",%d)\n", t, newmsg, m->numargs); + } + for (i = 0; i < m->numargs; i++) { + switch (m->argtype[i]) { + case fsckTypeInt: + printargs(ctx, "%d\n", (int)va_arg(ap, int)); break; + case fsckTypeLong: + printargs(ctx, "%ld\n", (long)va_arg(ap, long)); break; + case fsckTypeFileSize: + printargs(ctx, "%llu\n", (off_t)va_arg(ap, off_t)); break; + case fsckTypeProgress: + printargs(ctx, "(%d %%)\n", (int)va_arg(ap, int)); break; + case fsckTypeString: + case fsckTypePath: + case fsckTypeFile: + case fsckTypeDirectory: + case fsckTypeVolume: + case fsckTypeFSType: + printargs(ctx, "%s\n", (char*)va_arg(ap, char*)); break; + default: + printargs(ctx, "%p\n", (void*)va_arg(ap, void*)); break; + } + } + free(newmsg); + return 0; +} + +/* + * fsckPrintNothing(context, message, va_list) + * Don't actually print anything. Used for testing and debugging, nothing + * else. + */ +static int +fsckPrintNothing(struct context *ctx, fsck_message_t *m, va_list ap) +{ + return -1; +} + +/* + * fsckPrint(context, msgnum, ...) + * Print out a message identified by msgnum, using the data and + * context information in the contexxt. This will look up the message, + * and then print it out to the requested output stream using the style + * that was selected. It returns 0 on success, and -1 on failure. + * + * Note: WriteError() and RcdError() call fsckPrint internally, and + * therefore take care of generating the output correctly. + */ +int +fsckPrint(fsck_ctx_t c, int m, ...) +{ + int (*func)(struct context *, fsck_message_t *, va_list); + struct context *ctx = c; + fsck_message_t *msg; + va_list ap; + int retval = 0; + + va_start(ap, m); + + if (c == NULL) + return -1; + + msg = findmessage(ctx, m); + assert(msg != NULL); + if (msg == NULL) { + return -1; // Should log something + } + + switch (ctx->style) { + case fsckOutputTraditional: + func = fsckPrintString; + break; + case fsckOutputGUI: + func = fsckPrintGUI; + break; + case fsckOutputXML: + func = fsckPrintXML; + break; + default: + func = fsckPrintNothing; + break; + } + + if (ctx->preMessage) { + va_list vaBlock; + fsck_block_status_t rv; + + va_copy(vaBlock, ap); + rv = (ctx->preMessage)(c, m, vaBlock); + if (rv == fsckBlockAbort) { + retval = -1; + goto done; + } + if (rv == fsckBlockIgnore) { + retval = 0; + goto done; + } + } + + // Write string in traditional form to log file first + ctx->writeToLog = 1; + va_list logfile_ap; + va_copy(logfile_ap, ap); + retval = fsckPrintString(ctx, msg, logfile_ap); + ctx->writeToLog = 0; + + if (ctx->writer) { + // Now write string to standard output now as per caller's specifications + retval = (*func)(ctx, msg, ap); + } else { + retval = 0; // NULL fp means don't output anything + } + if (ctx->postMessage) { + va_list vaBlock; + fsck_block_status_t rv; + + va_copy(vaBlock, ap); + rv = (ctx->postMessage)(c, m, vaBlock); + if (rv == fsckBlockAbort) { + retval = -1; + goto done; + } + if (rv == fsckBlockIgnore) { + retval = 0; + goto done; + } + } + +done: + return retval; +} + +/* + * fsckMsgClass(context, msgnum) + * Return the message class (Verify, Successs, Failure, etc.) + * for a given message number. If the message number is unknown, + * it returns fsckMsgUnknown. + */ +enum fsck_msgtype +fsckMsgClass(fsck_ctx_t c, int msgNum) +{ + struct context *ctx = c; + fsck_message_t *m; + + if (c == NULL) + return fsckMsgUnknown; + + m = findmessage(ctx, msgNum); + if (m == NULL) + return fsckMsgUnknown; + + return m->type; +} + +/* + * The following section is used to make the internationalizable + * string file; this is a file that contains each message string, + * followed by an '=' and then the string again. This is then doctored + * by the internationalization folks. By putting it in here, this means + * we need to compile the source file (and any others that have the messages + * we care about) specially, and then be run as part of the build process. + */ +#ifdef FSCK_MAKESTRINGS +int +main(int ac, char **av) +{ + fsck_message_t *msg; + extern fsck_message_t hfs_errors[]; + extern fsck_message_t hfs_messages[]; + + printf("/* Standard messages */\n"); + for (msg = fsck_messages_common; + msg->msg != NULL; + msg++) { + char *newstr = convertfmt(msg->msg); + + if (newstr == NULL) { + printf("\"%s\" = \"%s\";\n", msg->msg, msg->msg); + } else { + printf("\"%s\" = \"%s\";\n", newstr, newstr); + free(newstr); + } + } + + printf("\n/* HFS-specific standard messages */\n"); + for (msg = hfs_messages; + msg->msg != NULL; + msg++) { + char *newstr = convertfmt(msg->msg); + + if (newstr == NULL) { + printf("\"%s\" = \"%s\";\n", msg->msg, msg->msg); + } else { + printf("\"%s\" = \"%s\";\n", newstr, newstr); + free(newstr); + } + } + + printf("\n/* HFS-specific errors */\n"); + for (msg = hfs_errors; + msg->msg != NULL; + msg++) { + char *newstr = convertfmt(msg->msg); + + if (newstr == NULL) { + printf("\"%s\" = \"%s\";\n", msg->msg, msg->msg); + } else { + printf("\"%s\" = \"%s\";\n", newstr, newstr); + free(newstr); + } + } + + return 0; +} +#endif /* FSCK_MAKESTRINGS */ + +/* + * This is used only for testing; it'll take some dumb arguments on + * the command line, and then print out some messages. It tests the + * allocation, initialization, and searching. + */ +#ifdef FSCK_TEST +main(int ac, char **av) +{ + fsck_ctx_t fctx; + enum fsck_output_type t = fsckOutputUndefined; + int (*func)(fsck_ctx_t, int, ...); + int i; + + fctx = fsckCreate(); + + if (ac == 2) { + if (!strcmp(av[1], "-g")) { + t = fsckOutputGUI; + fsckSetStyle(fctx, t); + fsckSetDefaultResponse(fctx, fsckDefaultYes); + } else if (!strcmp(av[1], "-s")) { + t = fsckOutputTraditional; + fsckSetStyle(fctx, t); + } else if (!strcmp(av[1], "-x")) { + t = fsckOutputXML; + fsckSetStyle(fctx, t); + fsckSetDefaultResponse(fctx, fsckDefaultYes); + } + } + + fsckSetOutput(fctx, stdout); + fsckPrint(fctx, fsckInformation, "fsck", "version"); + + i = fsckAskPrompt(fctx, "Unknown file %s; remove? [y|n] ", "/tmp/foo"); + if (i == 1) { + fprintf(stderr, "\n\nfile %s is to be removed\n\n", "/tmp/foo"); + } + fsckPrint(fctx, fsckProgress, 10); + fsckPrint(fctx, fsckVolumeNotRepaired); + + fsckDestroy(fctx); + + return 0; +} + +#endif /* FSCK_TEST */ diff --git a/fsck_hfs/fsck_messages.h b/fsck_hfs/fsck_messages.h new file mode 100644 index 0000000..ee2ba3e --- /dev/null +++ b/fsck_hfs/fsck_messages.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2008 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include <stdio.h> + +#ifndef _FSCK_MESSAGES_H +#define _FSCK_MESSAGES_H + +/* + * Internal structure for each fsck message. This is the same + * structure in which the message number, message string and + * their corresponding attributes are mapped in fsck_strings.c + * and fsck_hfs_strings.c + */ +struct fsck_message { + unsigned int msgnum; /* fsck message number as an unsigned value */ + char *msg; /* fsck message as a C string */ + int type; /* type of message (see fsck_msgtype below) */ + int level; /* verbosity level at which this message should be output/presented to the user */ + int numargs; /* number of arguments for this message string */ + const int *argtype; /* pointer to an array of argument types (see fsck_argtype below) */ +}; +typedef struct fsck_message fsck_message_t; + +typedef void *fsck_ctx_t; + +/* Type of fsck message string. + * These values are internal values used in the mapping array of + * message number and string to identify the type of message for + * each entry. + */ +enum fsck_msgtype { + fsckMsgUnknown = 0, + fsckMsgVerify, /* fsck is performing a read-only operation on the volume */ + fsckMsgRepair, /* fsck is writing to file system to repair a corruption */ + fsckMsgSuccess, /* verify found that the volume is clean, or repair was successful */ + fsckMsgFail, /* verify found that the volume is corrupt, or verify did not complete due to error, or repair failed */ + fsckMsgError, /* information of corruption found or condition that causes verify/repair to fail */ + fsckMsgDamageInfo, /* information about corrupt files/folders */ + fsckMsgInfo, /* information about an error message or any fsck operation */ + fsckMsgProgress, /* percentage progress of verify/repair operation */ + fsckMsgNotice, /* A traditional notice that doesn't fall into other categories */ +}; + +/* Type of parameter for fsck message string. + * These values are internal values used in the mapping array of + * message number and string to identify the type of parameter + * for each entry. + */ +enum fsck_arg_type { + fsckTypeUnknown = 0, + fsckTypeInt, /* positive integer */ + fsckTypeLong, /* positive long value */ + fsckTypeString, /* UTF-8 string */ + fsckTypePath, /* path to a file or directory on the volume */ + fsckTypeFile, /* name of file */ + fsckTypeDirectory, /* name of directory */ + fsckTypeVolume, /* name of a volume */ + fsckTypeProgress, /* percentage progress number */ + fsckTypeFSType, /* type of file system being checked */ + fsckTypeFileSize, /* A file size or offset */ +}; + +/* Verbosity of fsck message string. + * These values are internal values used in the mapping array of + * message number and string to identify the verbosity of each entry. + */ +enum fsck_message_levels { + fsckLevel0 = 0, /* level 0 messages should always be displayed to the user */ + fsckLevel1 = 1 /* level 1 messages should be only displayed in advanced mode */ +}; + +/* Type of fsck_hfs output */ +enum fsck_output_type { + fsckOutputUndefined = 0, + fsckOutputTraditional, /* standard string output */ + fsckOutputGUI, /* output for -g option */ + fsckOutputXML /* XML output for -x option */ +}; + +/* Types of default answers for user input questions in fsck */ +enum fsck_default_answer_type { + fsckDefaultNone = 0, + fsckDefaultNo, + fsckDefaultYes +}; + +/* + * Return value from a status block. The block is called + * in fsckPrint(), before and after a status message is + * printed. Returning fsckBlockContinue means to continue as + * it would otherwise; returning fsckBlockAbort means that + * fsckPrint should return an error at that point; and fsckBlockIgnore + * means that fsckPrint should return immediately, but without an error. + * + * The most common use of fsckBlockIgnore would be to suppress extraneous + * messages. + */ +enum fsck_block_status_type { + fsckBlockAbort = -1, + fsckBlockContinue = 0, + fsckBlockIgnore, +}; +typedef enum fsck_block_status_type fsck_block_status_t; + +/* + * Phases for the status block. The block is called in fsckPrint(), + * either before printing the message (with fsckPhaseBeforeMessage), or + * afterwards (with fsckPhaseAfterMessage). It's allowed ot have both + * set up with different blocks. + */ +enum fsck_block_phase_type { + fsckPhaseNone = 0, + fsckPhaseBeforeMessage, + fsckPhaseAfterMessage, +}; +typedef enum fsck_block_phase_type fsck_block_phase_t; + +/* + * The type of a status block. The first argument is the context + * for the messaging; the second argument is the message number; + * the third is a va_list of the arguments for the message. + */ + +typedef fsck_block_status_t (^fsckBlock_t)(fsck_ctx_t, int, va_list); + +extern fsckBlock_t fsckGetBlock(fsck_ctx_t, fsck_block_phase_t); +extern void fsckSetBlock(fsck_ctx_t, fsck_block_phase_t, fsckBlock_t); + +extern fsck_ctx_t fsckCreate(void); +extern int fsckSetOutput(fsck_ctx_t, FILE*); +extern int fsckSetFile(fsck_ctx_t, int); +extern int fsckSetWriter(fsck_ctx_t, void (*)(fsck_ctx_t, const char *)); +extern int fsckSetLogger(fsck_ctx_t, void (*)(fsck_ctx_t, const char *)); +extern int fsckSetVerbosity(fsck_ctx_t, int); +extern int fsckGetVerbosity(fsck_ctx_t); +extern int fsckSetOutputStyle(fsck_ctx_t, enum fsck_output_type); +extern enum fsck_output_type fsckGetOutputStyle(fsck_ctx_t); +extern int fsckSetDefaultResponse(fsck_ctx_t, enum fsck_default_answer_type); +extern int fsckAskPrompt(fsck_ctx_t, const char *, ...); +extern int fsckAddMessages(fsck_ctx_t, fsck_message_t *msgs); +extern int fsckPrint(fsck_ctx_t, int msgNum, ...); +extern enum fsck_msgtype fsckMsgClass(fsck_ctx_t, int msgNum); +extern void fsckDestroy(fsck_ctx_t); + +#endif /* _FSCK_MESSAGES_H */ diff --git a/fsck_hfs/fsck_msgnums.h b/fsck_hfs/fsck_msgnums.h new file mode 100644 index 0000000..eda63e6 --- /dev/null +++ b/fsck_hfs/fsck_msgnums.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2008, 2010-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* fsck_msgnums.h + * + * This file contain fsck status message numbers associated with + * each fsck message string. These status message numbers and their + * strings are file system independent. + */ + +#ifndef __FSCK_MSGNUMS_H +#define __FSCK_MSGNUMS_H + +/* Generic fsck status message numbers. These are file system + * independent messages that indicate the current state of verify or + * repair run or provide information about damaged files/folder. + * + * The corresponding strings and the mapping array of message number + * and other attributes exists in fsck_strings.c + */ +enum fsck_msgnum { + fsckUnknown = 100, + + fsckCheckingVolume = 101, /* Checking volume */ + fsckRecheckingVolume = 102, /* Rechecking volume */ + fsckRepairingVolume = 103, /* Repairing volume */ + fsckVolumeOK = 104, /* The volume %s appears to be OK */ + fsckRepairSuccessful = 105, /* The volume %s was repaired successfully */ + fsckVolumeVerifyIncomplete = 106, /* The volume %s could not be verified completely */ + fsckVolumeVerifyIncompleteNoRepair = 107, /* The volume %s could not be verified completely and can not be repaired */ + fsckVolumeCorruptNoRepair = 108, /* The volume %s was found corrupt and can not be repaired */ + fsckVolumeCorruptNeedsRepair = 109, /* The volume %s was found corrupt and needs to be repaired */ + fsckVolumeNotRepaired = 110, /* The volume %s could not be repaired */ + + fsckVolumeNotRepairedInUse = 111, /* The volume %s cannot be repaired when it is in use */ + fsckVolumeNotVerifiedInUse = 112, /* The volume %s cannot be verified when it is in use */ + fsckFileFolderDamage = 113, /* File/folder %s may be damaged */ + fsckFileFolderNotRepaired = 114, /* File/folder %s could not be repaired */ + fsckVolumeNotRepairedTries = 115, /* The volume %s could not be repaired after %d attempts */ + fsckLostFoundDirectory = 116, /* Look for missing items in %s directory */ + fsckCorruptFilesDirectory = 117, /* Look for links to corrupt files in %s directory */ + fsckInformation = 118, /* Using %s (version %s) for checking volume %s of type %s. */ + fsckProgress = 119, /* %d */ + fsckTrimming = 120, /* Trimming unused blocks */ + fsckVolumeName = 121, /* The volume name is %s */ + fsckVolumeModified = 122, /* The volume was modified */ + fsckLimitedRepairs = 123, /* Limited repair mode, not all repairs available */ +}; + +#endif diff --git a/fsck_hfs/fsck_strings.c b/fsck_hfs/fsck_strings.c new file mode 100644 index 0000000..789415e --- /dev/null +++ b/fsck_hfs/fsck_strings.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2008, 2010-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include "fsck_messages.h" +#include "fsck_msgnums.h" + +/* + * Standard fsck message strings (not specific to any filesystem). + * + * The message numbers (first field) for these are defined + * in fsck_msgnums.h; messages can be added to the array in + * any order, as fsckAddMessages will sort them based on the + * message number field. The array needs to end with an all-0 + * field, and no message string can be NULL. + * + * The last field in the structure is a pointer to a constant, + * variable-length array describing the arguments to the message. + * Most messages have no arguments; if a message does have arguments, + * it needs to be one of the types defined in fsck_msgnums.h (enum + * fsck_arg_type). The format specifier in the message string can be a + * SIMPLE printf-style: %d, %i, %u, %o, %x, %s, %c, %p; it needs to be + * converted at run-time to a Cocoa-style specifier, and the conversion + * routine does not handle all of the possible printf variations. + * (See convertfmt() in fsck_messages.c for details.) + */ + +fsck_message_t +fsck_messages_common[] = { + /* Message Number Message Type Verbosity Arguments */ + /* 101 - 110 */ + { fsckCheckingVolume, "Checking volume.", fsckMsgVerify, fsckLevel0, 0, }, + { fsckRecheckingVolume, "Rechecking volume.", fsckMsgVerify, fsckLevel0, 0, }, + { fsckRepairingVolume, "Repairing volume.", fsckMsgRepair, fsckLevel0, 0, }, + { fsckVolumeOK, "The volume %s appears to be OK.", fsckMsgSuccess, fsckLevel0, 1, (const int[]) { fsckTypeVolume } }, + { fsckRepairSuccessful, "The volume %s was repaired successfully.", fsckMsgSuccess, fsckLevel0, 1, (const int[]) { fsckTypeVolume } }, + { fsckVolumeVerifyIncomplete, "The volume %s could not be verified completely.", fsckMsgFail, fsckLevel0, 1, (const int[]) { fsckTypeVolume } }, + { fsckVolumeVerifyIncompleteNoRepair, "The volume %s could not be verified completely and can not be repaired.", fsckMsgFail, fsckLevel0, 1, (const int[]) { fsckTypeVolume } }, + { fsckVolumeCorruptNoRepair, "The volume %s was found corrupt and can not be repaired.", fsckMsgFail, fsckLevel0, 1, (const int[]) { fsckTypeVolume } }, + { fsckVolumeCorruptNeedsRepair, "The volume %s was found corrupt and needs to be repaired.", fsckMsgFail, fsckLevel0, 1, (const int[]) { fsckTypeVolume } }, + { fsckVolumeNotRepaired, "The volume %s could not be repaired.", fsckMsgFail, fsckLevel0, 1, (const int[]) { fsckTypeVolume } }, + + /* 111 - 122 */ + { fsckVolumeNotRepairedInUse, "The volume %s cannot be repaired when it is in use.", fsckMsgFail, fsckLevel0, 1, (const int[]) { fsckTypeVolume } }, + { fsckVolumeNotVerifiedInUse, "The volume %s cannot be verified when it is in use.", fsckMsgFail, fsckLevel0, 1, (const int[]) { fsckTypeVolume } }, + { fsckFileFolderDamage, "File/folder %s may be damaged.", fsckMsgDamageInfo, fsckLevel0, 1, (const int[]) { fsckTypePath } }, + { fsckFileFolderNotRepaired, "File/folder %s could not be repaired.", fsckMsgDamageInfo, fsckLevel0, 1, (const int[]) { fsckTypePath } }, + { fsckVolumeNotRepairedTries, "The volume %s could not be repaired after %d attempts.", fsckMsgFail, fsckLevel0, 2, (const int[]) { fsckTypeVolume, fsckTypeInt }}, + { fsckLostFoundDirectory, "Look for missing items in %s directory.", fsckMsgRepair, fsckLevel0, 1, (const int[]) { fsckTypeDirectory } }, + { fsckCorruptFilesDirectory, "Look for links to corrupt files in %s directory.", fsckMsgDamageInfo, fsckLevel0, 1, (const int[]) { fsckTypeDirectory }}, + { fsckInformation, "Executing %s (version %s).", fsckMsgInfo, fsckLevel1, 2, (const int[]) { fsckTypeString, fsckTypeString }}, + { fsckProgress, "%d %%", fsckMsgProgress, fsckLevel0, 1, (const int[]) { fsckTypeProgress } }, + { fsckTrimming, "Trimming unused blocks.", fsckMsgVerify, fsckLevel0, 0 }, + { fsckVolumeName, "The volume name is %s", fsckMsgInfo, fsckLevel0, 1, (const int[]) { fsckTypeVolume } }, + { fsckVolumeModified, "The volume was modified", fsckMsgNotice, fsckLevel0, 0 }, + { fsckLimitedRepairs, "Limited repair mode, not all repairs available", fsckMsgInfo, fsckLevel0, 0 }, + { 0, }, +}; + diff --git a/fsck_hfs/utilities.c b/fsck_hfs/utilities.c new file mode 100644 index 0000000..683a506 --- /dev/null +++ b/fsck_hfs/utilities.c @@ -0,0 +1,1024 @@ +/* + * Copyright (c) 1999-2000, 2002, 2004, 2007-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1980, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include <stddef.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/errno.h> +#include <sys/syslimits.h> +#include <pwd.h> + +#include <ctype.h> +#include <err.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> +#include <stdlib.h> +#include <sys/sysctl.h> + +#include "fsck_hfs.h" + +char *rawname __P((char *name)); +char *unrawname __P((char *name)); + + +int +reply(char *question) +{ + int persevere; + char c; + + if (preen) + pfatal("INTERNAL ERROR: GOT TO reply()"); + persevere = !strcmp(question, "CONTINUE"); + plog("\n"); + if (!persevere && (nflag || fswritefd < 0)) { + plog("%s? no\n\n", question); + return (0); + } + if (yflag || (persevere && nflag)) { + plog("%s? yes\n\n", question); + return (1); + } + do { + plog("%s? [yn] ", question); + (void) fflush(stdout); + c = getc(stdin); + while (c != '\n' && getc(stdin) != '\n') + if (feof(stdin)) + return (0); + } while (c != 'y' && c != 'Y' && c != 'n' && c != 'N'); + plog("\n"); + if (c == 'y' || c == 'Y') + return (1); + return (0); +} + + +void +ckfini(markclean) + int markclean; +{ +// register struct bufarea *bp, *nbp; +// int ofsmodified, cnt = 0; + + (void) CacheDestroy(&fscache); + + if (fswritefd < 0) { + (void)close(fsreadfd); + return; + } +#if 0 + flush(fswritefd, &sblk); + if (havesb && sblk.b_bno != SBOFF / dev_bsize && + !preen && reply("UPDATE STANDARD SUPERBLOCK")) { + sblk.b_bno = SBOFF / dev_bsize; + sbdirty(); + flush(fswritefd, &sblk); + } + flush(fswritefd, &cgblk); + free(cgblk.b_un.b_buf); + for (bp = bufhead.b_prev; bp && bp != &bufhead; bp = nbp) { + cnt++; + flush(fswritefd, bp); + nbp = bp->b_prev; + free(bp->b_un.b_buf); + free((char *)bp); + } + if (bufhead.b_size != cnt) + errx(EEXIT, "Panic: lost %d buffers", bufhead.b_size - cnt); + pbp = pdirbp = (struct bufarea *)0; + if (markclean && sblock.fs_clean == 0) { + sblock.fs_clean = 1; + sbdirty(); + ofsmodified = fsmodified; + flush(fswritefd, &sblk); + fsmodified = ofsmodified; + if (!preen) + plog("\n***** FILE SYSTEM MARKED CLEAN *****\n"); + } + if (debug) + plog("cache missed %ld of %ld (%d%%)\n", diskreads, + totalreads, (int)(diskreads * 100 / totalreads)); +#endif + (void)close(fsreadfd); + (void)close(fswritefd); +} + + +char * +blockcheck(char *origname) +{ + struct stat stslash, stblock, stchar; + char *newname, *raw; + int retried = 0; + + hotroot = 0; + if (stat("/", &stslash) < 0) { + perror("/"); + plog("Can't stat root\n"); + return (origname); + } + newname = origname; +retry: + if (stat(newname, &stblock) < 0) { + perror(newname); + plog("Can't stat %s\n", newname); + return (origname); + } + if ((stblock.st_mode & S_IFMT) == S_IFBLK) { + if (stslash.st_dev == stblock.st_rdev) + hotroot++; + raw = rawname(newname); + if (stat(raw, &stchar) < 0) { + perror(raw); + plog("Can't stat %s\n", raw); + return (origname); + } + if ((stchar.st_mode & S_IFMT) == S_IFCHR) { + return (raw); + } else { + plog("%s is not a character device\n", raw); + return (origname); + } + } else if ((stblock.st_mode & S_IFMT) == S_IFCHR && !retried) { + newname = unrawname(newname); + retried++; + goto retry; + } + /* + * Not a block or character device, just return name and + * let the caller decide whether to use it. + */ + return (origname); +} + + +char * +rawname(char *name) + +{ + static char rawbuf[32]; + char *dp; + + if ((dp = strrchr(name, '/')) == 0) + return (0); + *dp = 0; + (void)strlcpy(rawbuf, name, sizeof(rawbuf)); + *dp = '/'; + (void)strlcat(rawbuf, "/r", sizeof(rawbuf)); + (void)strlcat(rawbuf, &dp[1], sizeof(rawbuf)); + + return (rawbuf); +} + + +char * +unrawname(char *name) +{ + char *dp; + struct stat stb; + + if ((dp = strrchr(name, '/')) == 0) + return (name); + if (stat(name, &stb) < 0) + return (name); + if ((stb.st_mode & S_IFMT) != S_IFCHR) + return (name); + if (dp[1] != 'r') + return (name); + memmove(&dp[1], &dp[2], strlen(&dp[2]) + 1); + + return (name); +} + + +void +catch(sig) + int sig; +{ + if (!upgrading) + ckfini(0); + exit(12); +} + + +// +// Logging stuff... +// +// +#include <stdarg.h> +#include <pthread.h> +#include <time.h> + +#define FSCK_LOG_FILE "/var/log/fsck_hfs.log" + +extern char lflag; // indicates if we're doing a live fsck (defined in fsck_hfs.c) +extern char guiControl; // indicates if we're outputting for the gui (defined in fsck_hfs.c) + +FILE *log_file = NULL; + +/* Variables for in-memory log for strings that will be written to log file */ +char *in_mem_log = NULL; +char *cur_in_mem_log = NULL; +size_t in_mem_log_size = 0; + +/* Variables for in-memory log for strings that will be printed on standard out */ +char *in_mem_out = NULL; +char *cur_in_mem_out = NULL; +size_t in_mem_out_size = 0; + +int live_fsck = 0; + +#define DEFAULT_IN_MEM_SIZE 4096 + +static pthread_mutex_t mem_buf_lock = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t mem_buf_cond; + +static pthread_t printing_thread; +static pthread_t logging_thread; +static volatile int keep_going = 1; + +#undef fprintf +#undef printf + +// prototype +void print_to_mem(int type, int mem_type, const char *fmt, const char *str, va_list ap); + +#define DO_VPRINT 1 // types for print_to_mem +#define DO_STR 2 + +/* Types for mem_type */ +#define IN_MEM_LOG 1 // in-memory log strings +#define IN_MEM_OUT 2 // in-memory stdout strings + +static void * +fsck_logging_thread(void *arg) +{ + int copy_amt; + char buff[1024], *ptr; + + /* Handle writing to the log file */ + while(keep_going || cur_in_mem_log != in_mem_log) { + + pthread_mutex_lock(&mem_buf_lock); + while (keep_going != 0 && cur_in_mem_log == in_mem_log) { + int err; + + err = pthread_cond_wait(&mem_buf_cond, &mem_buf_lock); + if (err != 0) { + fprintf(stderr, "error %d from cond wait\n", err); + break; + } + } + + copy_amt = (cur_in_mem_log - in_mem_log); + if (copy_amt == 0) { + pthread_mutex_unlock(&mem_buf_lock); + continue; + } + + if (copy_amt >= sizeof(buff)) { + copy_amt = sizeof(buff) - 1; + memcpy(buff, in_mem_log, copy_amt); + + memmove(in_mem_log, &in_mem_log[copy_amt], (cur_in_mem_log - in_mem_log) - copy_amt); + cur_in_mem_log -= copy_amt; + } else { + memcpy(buff, in_mem_log, copy_amt); + cur_in_mem_log = in_mem_log; + } + + buff[copy_amt] = '\0'; + + pthread_mutex_unlock(&mem_buf_lock); + + for(ptr=buff; *ptr; ) { + char *start; + + start = ptr; + while(*ptr && *ptr != '\n') { + ptr++; + } + if (*ptr == '\n') { + *ptr++ = '\0'; + if (log_file) { + fprintf(log_file, "%s: %s\n", cdevname ? cdevname : "UNKNOWN-DEV", start); + } + } else { + if (log_file) { + fprintf(log_file, "%s", start); + } + } + + } + + fflush(stdout); + } + + return NULL; +} + +static void * +fsck_printing_thread(void *arg) +{ + int copy_amt; + char buff[1024], *ptr; + + /* Handle writing to the out file */ + while(keep_going || cur_in_mem_out != in_mem_out) { + + pthread_mutex_lock(&mem_buf_lock); + while (keep_going != 0 && cur_in_mem_out == in_mem_out) { + int err; + + err = pthread_cond_wait(&mem_buf_cond, &mem_buf_lock); + if (err != 0) { + fprintf(stderr, "error %d from cond wait\n", err); + break; + } + } + + copy_amt = (cur_in_mem_out - in_mem_out); + if (copy_amt == 0) { + pthread_mutex_unlock(&mem_buf_lock); + continue; + } + + if (copy_amt >= sizeof(buff)) { + copy_amt = sizeof(buff) - 1; + memcpy(buff, in_mem_out, copy_amt); + + memmove(in_mem_out, &in_mem_out[copy_amt], (cur_in_mem_out - in_mem_out) - copy_amt); + cur_in_mem_out -= copy_amt; + } else { + memcpy(buff, in_mem_out, copy_amt); + cur_in_mem_out = in_mem_out; + } + + buff[copy_amt] = '\0'; + + pthread_mutex_unlock(&mem_buf_lock); + + for(ptr=buff; *ptr; ) { + char *start; + + start = ptr; + while(*ptr && *ptr != '\n') { + ptr++; + } + if (*ptr == '\n') { + *ptr++ = '\0'; + printf("%s\n", start); + } else { + printf("%s", start); + } + + } + + fflush(stdout); + } + + return NULL; +} + + +int was_signaled = 0; + +void +shutdown_logging(void) +{ + keep_going = 0; + time_t t; + + /* Log fsck_hfs check completion time */ + t = time(NULL); + if (in_mem_log) { + print_to_mem(DO_STR, IN_MEM_LOG, "fsck_hfs completed at %s\n", ctime(&t), NULL); + } else { + fprintf(log_file, "%s: fsck_hfs completed at %s\n", cdevname ? cdevname : "UNKNOWN-DEV", ctime(&t)); + } + + if (was_signaled) { + // if we were signaled, we can't really call any of these + // functions from the context of a signal handler (which + // is how we're called if we don't have a signal handler). + // so we have our own signal handler which sets this var + // which tells us to just bail out. + return; + } + + if (log_file && !live_fsck) { + fflush(log_file); + fclose(log_file); + log_file = NULL; + } else if ((in_mem_out || in_mem_log) && live_fsck && log_file) { + // make sure the printing and logging threads are woken up... + pthread_mutex_lock(&mem_buf_lock); + pthread_cond_broadcast(&mem_buf_cond); + pthread_mutex_unlock(&mem_buf_lock); + + // then wait for them + pthread_join(printing_thread, NULL); + pthread_join(logging_thread, NULL); + + free(in_mem_out); + in_mem_out = cur_in_mem_out = NULL; + in_mem_out_size = 0; + + free(in_mem_log); + in_mem_log = cur_in_mem_log = NULL; + in_mem_log_size = 0; + + if (log_file) { + fflush(log_file); + fclose(log_file); + log_file = NULL; + } + } else if (in_mem_log) { + int ret; + + if (getuid() == 0) { + // just in case, flush any pending output + fflush(stdout); + fflush(stderr); + + // + // fork so that the child can wait around until the + // root volume is mounted read-write and we can add + // our output to the log + // + ret = fork(); + } else { + // if we're not root we don't need to fork + ret = 0; + } + if (ret == 0) { + int i; + char *fname = FSCK_LOG_FILE, path[PATH_MAX]; + + // Disk Management waits for fsck_hfs' stdout to close rather + // than the process death to understand if fsck_hfs has exited + // or not. Since we do not use stdout any further, close all + // the file descriptors so that Disk Management does not wait + // for 60 seconds unnecessarily on read-only boot volumes. + fclose(stdout); + fclose(stdin); + fclose(stderr); + + // non-root will never be able to write to /var/log + // so point the file somewhere else. + if (getuid() != 0) { + struct passwd *pwd; + fname = NULL; + // each user will get their own log as ~/Library/Logs/fsck_hfs.log + pwd = getpwuid(getuid()); + if (pwd) { + snprintf(path, sizeof(path), "%s/Library/Logs/fsck_hfs.log", pwd->pw_dir); + fname = &path[0]; + } + } + + for(i=0; i < 60; i++) { + log_file = fopen(fname, "a"); + if (log_file) { + fwrite(in_mem_log, cur_in_mem_log - in_mem_log, 1, log_file); + + fflush(log_file); + fclose(log_file); + log_file = NULL; + + free(in_mem_log); + in_mem_log = cur_in_mem_log = NULL; + in_mem_log_size = 0; + + break; + } else { + // hmmm, failed to open the output file so wait + // a while only if the fs is read-only and then + // try again + if (errno == EROFS) { + sleep(1); + } else { + break; + } + } + } + } + } +} + +static void +my_sighandler(int sig) +{ + was_signaled = 1; + cleanup_fs_fd(); + exit(sig); +} + + +void +setup_logging(void) +{ + static int at_exit_setup = 0; + time_t t; + + // if this is set, we don't have to do anything + if (at_exit_setup) { + return; + } + + if (guiControl) { + setlinebuf(stdout); + setlinebuf(stderr); + } + + // our copy of this variable since we may + // need to change it to make the right thing + // happen for fsck on the root volume. + live_fsck = (int)lflag; + + if (log_file == NULL) { + log_file = fopen(FSCK_LOG_FILE, "a"); + if (log_file) { + setlinebuf(log_file); + } else { + // + // if we can't open the output file it's either because + // we're being run on the root volume during early boot + // or we were not run as the root user and so we can't + // write to /var/log/fsck_hfs.log. in either case we + // turn off "live_fsck" so that the right thing happens + // in here with respect to where output goes. + // + live_fsck = 0; + } + + if (!live_fsck && log_file) { + t = time(NULL); + fprintf(log_file, "\n%s: fsck_hfs started at %s", cdevname ? cdevname : "UNKNOWN-DEV", ctime(&t)); + fflush(log_file); + + } else if (live_fsck || in_mem_log == NULL || in_mem_out == NULL) { + // + // hmm, we couldn't open the log file (or it's a + // live fsck). let's just squirrel away a copy + // of the data in memory and then deal with it + // later (or print it out from a separate thread + // if we're doing a live fsck). + // + in_mem_log = (char *)malloc(DEFAULT_IN_MEM_SIZE); + in_mem_out = (char *)malloc(DEFAULT_IN_MEM_SIZE); + if ((in_mem_log != NULL) && (in_mem_out != NULL)) { + in_mem_log_size = DEFAULT_IN_MEM_SIZE; + in_mem_log[0] = '\0'; + cur_in_mem_log = in_mem_log; + + in_mem_out_size = DEFAULT_IN_MEM_SIZE; + in_mem_out[0] = '\0'; + cur_in_mem_out = in_mem_out; + + t = time(NULL); + print_to_mem(DO_STR, IN_MEM_LOG, "\nfsck_hfs started at %s", ctime(&t), NULL); + + if (live_fsck && log_file) { + pthread_cond_init(&mem_buf_cond, NULL); + + signal(SIGINT, my_sighandler); + signal(SIGHUP, my_sighandler); + signal(SIGTERM, my_sighandler); + signal(SIGQUIT, my_sighandler); + signal(SIGBUS, my_sighandler); + signal(SIGSEGV, my_sighandler); + signal(SIGILL, my_sighandler); + + pthread_create(&printing_thread, NULL, fsck_printing_thread, NULL); + pthread_create(&logging_thread, NULL, fsck_logging_thread, NULL); + + } + } + } + + if (at_exit_setup == 0 && (log_file || in_mem_log || in_mem_out)) { + atexit(shutdown_logging); + at_exit_setup = 1; + } + } +} + + +void +print_to_mem(int type, int mem_type, const char *fmt, const char *str, va_list ap) +{ + int ret; + size_t size_remaining; + va_list ap_copy; + char *cur_in_mem; + char *in_mem_data; + size_t in_mem_data_size; + + if (type == DO_VPRINT) { + va_copy(ap_copy, ap); + } + + if (mem_type == IN_MEM_LOG) { + cur_in_mem = cur_in_mem_log; + in_mem_data = in_mem_log; + in_mem_data_size = in_mem_log_size; + } else { + cur_in_mem = cur_in_mem_out; + in_mem_data = in_mem_out; + in_mem_data_size = in_mem_out_size; + } + + /* Grab the lock only when adding output strings to the in-memory data */ + if (live_fsck && (mem_type == IN_MEM_OUT)) { + pthread_mutex_lock(&mem_buf_lock); + } + + size_remaining = in_mem_data_size - (ptrdiff_t)(cur_in_mem - in_mem_data); + if (type == DO_VPRINT) { + ret = vsnprintf(cur_in_mem, size_remaining, fmt, ap); + } else { + ret = snprintf(cur_in_mem, size_remaining, fmt, str); + } + if (ret > size_remaining) { + char *new_log; + size_t amt; + + if (ret >= DEFAULT_IN_MEM_SIZE) { + amt = (ret + 4095) & (~4095); // round up to a 4k boundary + } else { + amt = DEFAULT_IN_MEM_SIZE; + } + + new_log = realloc(in_mem_data, in_mem_data_size + amt); + if (new_log == NULL) { + if (live_fsck && (mem_type == IN_MEM_OUT)) { + pthread_cond_signal(&mem_buf_cond); + pthread_mutex_unlock(&mem_buf_lock); + } + goto done; + } + + in_mem_data_size += amt; + cur_in_mem = new_log + (cur_in_mem - in_mem_data); + in_mem_data = new_log; + size_remaining = in_mem_data_size - (ptrdiff_t)(cur_in_mem - new_log); + if (type == DO_VPRINT) { + ret = vsnprintf(cur_in_mem, size_remaining, fmt, ap_copy); + } else { + ret = snprintf(cur_in_mem, size_remaining, fmt, str); + } + if (ret <= size_remaining) { + cur_in_mem += ret; + } + } else { + cur_in_mem += ret; + } + + if (live_fsck && (mem_type == IN_MEM_OUT)) { + pthread_cond_signal(&mem_buf_cond); + pthread_mutex_unlock(&mem_buf_lock); + } + +done: + + if (mem_type == IN_MEM_LOG) { + cur_in_mem_log = cur_in_mem; + in_mem_log = in_mem_data; + in_mem_log_size = in_mem_data_size; + } else { + cur_in_mem_out = cur_in_mem; + in_mem_out = in_mem_data; + in_mem_out_size = in_mem_data_size; + } + + if (type == DO_VPRINT) { + va_end(ap_copy); + } +} + + +static int need_prefix=1; + +#define LOG_PREFIX \ + if (need_prefix) { \ + fprintf(log_file, "%s: ", cdevname); \ + if (strchr(fmt, '\n')) { \ + need_prefix = 1; \ + } else { \ + need_prefix = 0; \ + } \ + } else if (strchr(fmt, '\n')) { \ + need_prefix = 1; \ + } + +/* Print output string on given stream or store it into in-memory buffer */ +#define VOUT(stream, fmt, ap) \ + if (!live_fsck) { \ + vfprintf(stream, fmt, ap); \ + } else { \ + print_to_mem(DO_VPRINT, IN_MEM_OUT, fmt, NULL, ap); \ + } + +#define FOUT(fmt, str) \ + print_to_mem(DO_STR, IN_MEM_OUT, fmt, str, NULL); + +/* Store output string written to fsck_hfs.log into file or in-memory buffer */ +#define VLOG(fmt, ap) \ + va_start(ap, fmt); \ + VLOG_INTERNAL(fmt, ap); + +#define VLOG_INTERNAL(fmt, ap) \ + if (log_file && !live_fsck) { \ + LOG_PREFIX \ + vfprintf(log_file, fmt, ap); \ + } else { \ + print_to_mem(DO_VPRINT, IN_MEM_LOG, fmt, NULL, ap); \ + } + +#define FLOG(fmt, str) \ + if (log_file && !live_fsck) { \ + LOG_PREFIX; \ + fprintf(log_file, fmt, str); \ + } else { \ + print_to_mem(DO_STR, IN_MEM_LOG, fmt, str, NULL); \ + } + + +#if __STDC__ +#include <stdarg.h> +#else +#include <varargs.h> +#endif + +/* + * An unexpected inconsistency occurred. + * Die if preening, otherwise just print message and continue. + */ +void +#if __STDC__ +pfatal(const char *fmt, ...) +#else +pfatal(fmt, va_alist) + char *fmt; + va_dcl +#endif +{ + va_list ap; + + setup_logging(); + +#if __STDC__ + va_start(ap, fmt); +#else + va_start(ap); +#endif + if (!preen) { + (void)vfprintf(stderr, fmt, ap); + VLOG(fmt, ap); + va_end(ap); + return; + } + if (!live_fsck) + (void)fprintf(stderr, "%s: ", cdevname); + FLOG("%s: ", cdevname); + + if (!live_fsck) + (void)vfprintf(stderr, fmt, ap); + VLOG(fmt, ap); + + if (!live_fsck) + (void)fprintf(stderr, + "\n%s: UNEXPECTED INCONSISTENCY; RUN fsck_hfs MANUALLY.\n", + cdevname); + FLOG("\n%s: UNEXPECTED INCONSISTENCY; RUN fsck_hfs MANUALLY.\n", cdevname); + + exit(EEXIT); +} + +/* + * Pwarn just prints a message when not preening, + * or a warning (preceded by filename) when preening. + */ +void +#if __STDC__ +pwarn(const char *fmt, ...) +#else +pwarn(fmt, va_alist) + char *fmt; + va_dcl +#endif +{ + va_list ap; + + setup_logging(); + +#if __STDC__ + va_start(ap, fmt); +#else + va_start(ap); +#endif + if (preen) { + (void)fprintf(stderr, "%s: ", cdevname); + FLOG("%s: ", cdevname); + } + if (!live_fsck) + (void)vfprintf(stderr, fmt, ap); + VLOG(fmt, ap); + + va_end(ap); +} + +/* Write a string and parameters, if any, directly to the log file. + * These strings will not be printed to standard out/error. + */ +void +logstring(void *c, const char *str) +{ + llog("%s", str); +} + +/* Write a string and parameters, if any, directly to standard out/error. + * These strings will not be printed to log file. + */ +void +outstring(void *c, const char *str) +{ + olog("%s", str); +} + +/* Write to both standard out and log file */ +void +plog(const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + vplog(fmt, ap); + va_end(ap); +} + +/* Write to only standard out */ +void +olog(const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + + setup_logging(); + + /* For live fsck_hfs, add output strings to in-memory log, + * and for non-live fsck_hfs, print output to stdout. + */ + VOUT(stdout, fmt, ap); + + va_end(ap); +} + +/* Write to only log file */ +void +llog(const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + + setup_logging(); + need_prefix = 1; + VLOG(fmt, ap); + + va_end(ap); +} + +/* Write to both standard out and log file */ +void +vplog(const char *fmt, va_list ap) +{ + va_list copy_ap; + + va_copy(copy_ap, ap); + + setup_logging(); + + /* Always print prefix to strings written to log files */ + need_prefix = 1; + + /* Handle output strings, print to stdout or store in-memory */ + VOUT(stdout, fmt, ap); + + /* Add log strings to the log file. VLOG() handles live case internally */ + VLOG_INTERNAL(fmt, copy_ap); +} + +/* Write to both standard out and log file */ +void +fplog(FILE *stream, const char *fmt, ...) +{ + va_list ap, copy_ap; + va_start(ap, fmt); + va_copy(copy_ap, ap); + + setup_logging(); + need_prefix = 1; + + /* Handle output strings, print to given stream or store in-memory */ + VOUT(stream, fmt, ap); + + /* Add log strings to the log file. VLOG() handles live case internally */ + VLOG(fmt, copy_ap); + + va_end(ap); +} + +#define kProgressToggle "kern.progressmeterenable" +#define kProgress "kern.progressmeter" + +void +start_progress(void) +{ + int rv; + int enable = 1; + if (hotroot == 0) + return; + rv = sysctlbyname(kProgressToggle, NULL, NULL, &enable, sizeof(enable)); + if (debug && rv == -1 && errno != ENOENT) { + warn("sysctl(%s) failed", kProgressToggle); + } +} + +void +draw_progress(int pct) +{ + int rv; + if (hotroot == 0) + return; + rv = sysctlbyname(kProgress, NULL, NULL, &pct, sizeof(pct)); + if (debug && rv == -1 && errno != ENOENT) { + warn("sysctl(%s) failed", kProgress); + } +} + +void +end_progress(void) +{ + int rv; + int enable = 0; + if (hotroot == 0) + return; + rv = sysctlbyname(kProgressToggle, NULL, NULL, &enable, sizeof(enable)); + if (debug && rv == -1 && errno != ENOENT) { + warn("sysctl(%s) failed", kProgressToggle); + } +} + diff --git a/fstyp_hfs/fstyp_hfs.8 b/fstyp_hfs/fstyp_hfs.8 new file mode 100644 index 0000000..64bbf33 --- /dev/null +++ b/fstyp_hfs/fstyp_hfs.8 @@ -0,0 +1,40 @@ +.\" +.\" (c) 2005 Apple Computer, Inc. All rights reserved. +.\" +.\" @APPLE_LICENSE_HEADER_START@ +.\" +.\" The contents of this file constitute Original Code as defined in and +.\" are subject to the Apple Public Source License Version 1.1 (the +.\" "License"). You may not use this file except in compliance with the +.\" License. Please obtain a copy of the License at +.\" http://www.apple.com/publicsource and read it before using this file. +.\" +.\" This Original Code and all software distributed under the License are +.\" distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER +.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +.\" FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the +.\" License for the specific language governing rights and limitations +.\" under the License. +.\" +.\" @APPLE_LICENSE_HEADER_END@ +.\" +.Dd August 15, 2005 +.Dt FSTYP_HFS 8 +.Os +.Sh NAME +.Nm fstyp_hfs +.Nd check for an HFS volume +.Sh SYNOPSIS +.Nm +.Ar device +.Sh DESCRIPTION +The +.Nm +utility is not intended to be run directly, but rather called by +.Xr fstyp 8 +while it is trying to determine which file system type is present on the +given device. +It returns 1 if it thinks the device contains an HFS volume, and 0 otherwise. +.Sh SEE ALSO +.Xr fstyp 8 diff --git a/fstyp_hfs/fstyp_hfs.c b/fstyp_hfs/fstyp_hfs.c new file mode 100644 index 0000000..884f955 --- /dev/null +++ b/fstyp_hfs/fstyp_hfs.c @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include <stdio.h> +#include <stdlib.h> +#include <sys/disk.h> +#include <fcntl.h> +#include <errno.h> +#include <unistd.h> +#include <string.h> +#include <sys/stat.h> + +#define HFS_VOLHDR_OFFSET 1024 /* technote 1150 */ +#define HFS_VOLHDR_SIZE 512 /* technote 1150 */ + +#define E_OPENDEV -1 +#define E_READ -5 + +void usage(void); +char *rawname(char *name); +char *unrawname(char *name); +int checkVolHdr(const unsigned char *volhdr); +char *blockcheck(char *origname); + +char *progname; + +/* + * perhaps check the alternate volume header as well + + * prefer to use raw device. TODO: suppose block device is valid but + * the corresponding raw device is not valid, then we fail. this is + * probably no the desired behavior. + */ + +int +main(int argc, char **argv) +{ + unsigned char volhdr[HFS_VOLHDR_SIZE] = {0}; + int fd, retval; + char *devname; + + fd = -1; + retval = 0; + + if ((progname = strrchr(*argv, '/'))) + ++progname; + else + progname = *argv; + + if (argc != 2) { + usage(); + } else { + devname = blockcheck(argv[1]); + + if (devname != NULL) { + if ((fd = open(devname, O_RDONLY, 0)) < 0) { + retval = E_OPENDEV; + } else if (pread(fd, volhdr, HFS_VOLHDR_SIZE, HFS_VOLHDR_OFFSET) != HFS_VOLHDR_SIZE) { + retval = E_READ; + } else { + retval = checkVolHdr(volhdr); + } + + if (-1 != fd) { + close(fd); + fd = -1; + } + } + } + + return retval; +} + +void +usage(void) +{ + fprintf(stdout, "usage: %s device\n", progname); + return; +} + +/* copied from diskdev_cmds/fsck_hfs/utilities.c */ +char * +rawname(char *name) +{ + static char rawbuf[32]; + char *dp; + + if ((dp = strrchr(name, '/')) == 0) + return (0); + *dp = 0; + (void) strcpy(rawbuf, name); + *dp = '/'; + (void) strcat(rawbuf, "/r"); + (void) strcat(rawbuf, &dp[1]); + + return (rawbuf); +} + +/* copied from diskdev_cmds/fsck_hfs/utilities.c */ +char * +unrawname(char *name) +{ + char *dp; + struct stat stb; + + if ((dp = strrchr(name, '/')) == 0) + return (name); + if (stat(name, &stb) < 0) + return (name); + if ((stb.st_mode & S_IFMT) != S_IFCHR) + return (name); + if (dp[1] != 'r') + return (name); + (void) strcpy(&dp[1], &dp[2]); + + return (name); +} + +/* + * copied from diskdev_cmds/fsck_hfs/utilities.c, and modified: + * 1) remove "hotroot" + * 2) if error, return NULL + * 3) if not a char device, return NULL (effectively, this is treated + * as error even if accessing the block device might have been OK) + */ +char * +blockcheck(char *origname) +{ + struct stat stblock, stchar; + char *newname, *raw; + int retried; + + retried = 0; + newname = origname; +retry: + if (stat(newname, &stblock) < 0) { + perror(newname); + fprintf(stderr, "Can't stat %s\n", newname); + return NULL; + } + if ((stblock.st_mode & S_IFMT) == S_IFBLK) { + raw = rawname(newname); + if (stat(raw, &stchar) < 0) { + perror(raw); + fprintf(stderr, "Can't stat %s\n", raw); + return NULL; + } + if ((stchar.st_mode & S_IFMT) == S_IFCHR) { + return (raw); + } else { + fprintf(stderr, "%s is not a character device\n", raw); + return NULL; + } + } else if ((stblock.st_mode & S_IFMT) == S_IFCHR && !retried) { + newname = unrawname(newname); + retried++; + goto retry; + } + /* not a block or character device */ + return NULL; +} + +/* + * (sanity) check the volume header in volhdr + * + * return 1 if volhdr is an HFS volhdr, 0 otherwise + */ +int +checkVolHdr(const unsigned char *volhdr) +{ + int retval; + + retval = 0; + + if (strncmp((const char *)volhdr, "H+", 2) == 0) { + /* technote 1150: H+ is version 4 */ + retval = (volhdr[3] == 4); + } else if (strncmp((const char *)volhdr, "HX", 2) == 0) { + /* technote 1150: HX is version 5 */ + retval = (volhdr[3] == 5); + } + return retval; +} diff --git a/hfs.xcconfig b/hfs.xcconfig new file mode 100644 index 0000000..4941101 --- /dev/null +++ b/hfs.xcconfig @@ -0,0 +1,17 @@ +HFS_VERSION = 1.7 +HFS_COPYRIGHT_INFO_STRING = $HFS_VERSION, Copyright Apple Inc. 1999-2012 +FS_BUNDLE_PATH = /System/Library/Filesystems/hfs.fs +FS_BUNDLE_BIN_DIR = Contents/Resources +FS_BUNDLE_BIN_PATH = $FS_BUNDLE_PATH/$FS_BUNDLE_BIN_DIR +FS_BUNDLE_RESOURCES_PATH = $FS_BUNDLE_PATH/Contents/Resources +FS_BUNDLE_ENGLISH_PATH = $FS_BUNDLE_RESOURCES_PATH/English.lproj + +VERSIONING_SYSTEM = apple-generic +CURRENT_PROJECT_VERSION = $(RC_ProjectSourceVersion) +ALWAYS_SEARCH_USER_PATHS = NO +USE_HEADERMAP = NO +WARNING_CFLAGS = -Wall -W -Wno-missing-field-initializers +DEAD_CODE_STRIPPING = YES +DEBUG_INFORMATION_FORMAT = dwarf-with-dsym +SUPPORTED_PLATFORMS = macosx iphoneos +INSTALL_MODE_FLAG = a-w,a+rX diff --git a/hfs.xcodeproj/project.pbxproj b/hfs.xcodeproj/project.pbxproj index b11abf4..18a12b3 100644 --- a/hfs.xcodeproj/project.pbxproj +++ b/hfs.xcodeproj/project.pbxproj @@ -3,25 +3,117 @@ archiveVersion = 1; classes = { }; - objectVersion = 45; + objectVersion = 46; objects = { /* Begin PBXAggregateTarget section */ - FDD9FA3714A132E40043D4A9 /* hfs */ = { + 4DBD523B1548A488007AA736 /* Common */ = { isa = PBXAggregateTarget; - buildConfigurationList = FDD9FA3814A132E40043D4A9 /* Build configuration list for PBXAggregateTarget "hfs" */; + buildConfigurationList = 4DBD523C1548A488007AA736 /* Build configuration list for PBXAggregateTarget "Common" */; buildPhases = ( ); dependencies = ( - FDD9FA3C14A132F10043D4A9 /* PBXTargetDependency */, - FDD9FA3E14A132F20043D4A9 /* PBXTargetDependency */, + 4DBD523F1548A499007AA736 /* PBXTargetDependency */, + 4DBD52411548A49A007AA736 /* PBXTargetDependency */, + 4DBD52431548A49D007AA736 /* PBXTargetDependency */, + 4DBD52451548A4A0007AA736 /* PBXTargetDependency */, + 4DBD52471548A4A2007AA736 /* PBXTargetDependency */, + 4DBD52491548A4A4007AA736 /* PBXTargetDependency */, + 4DBD524B1548A4A7007AA736 /* PBXTargetDependency */, + 4DBD524D1548A4AA007AA736 /* PBXTargetDependency */, ); - name = hfs; - productName = hfs; + name = Common; + productName = Common; + }; + 4DD302571538DB2700001AA0 /* All_MacOSX */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 4DD302581538DB2700001AA0 /* Build configuration list for PBXAggregateTarget "All_MacOSX" */; + buildPhases = ( + ); + dependencies = ( + 4DBD52511548A4D2007AA736 /* PBXTargetDependency */, + 4DBD52531548A4D4007AA736 /* PBXTargetDependency */, + ); + name = All_MacOSX; + productName = All_MacOSX; + }; + 4DD3025A1538DB3A00001AA0 /* All_iOS */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 4DD3025B1538DB3A00001AA0 /* Build configuration list for PBXAggregateTarget "All_iOS" */; + buildPhases = ( + ); + dependencies = ( + 4DBD524F1548A4C8007AA736 /* PBXTargetDependency */, + ); + name = All_iOS; + productName = All_iOS; }; /* End PBXAggregateTarget section */ /* Begin PBXBuildFile section */ + 4D07DCC41538EF92002B57CB /* fstyp_hfs.c in Sources */ = {isa = PBXBuildFile; fileRef = 4D07DCC31538EF92002B57CB /* fstyp_hfs.c */; }; + 4D07DCC51538EFB7002B57CB /* fstyp_hfs.8 in Copy man8 */ = {isa = PBXBuildFile; fileRef = 4D07DCC21538EF92002B57CB /* fstyp_hfs.8 */; }; + 4D0E89AA1534FF48004CD678 /* mount_hfs.c in Sources */ = {isa = PBXBuildFile; fileRef = 4D0E89A71534FF48004CD678 /* mount_hfs.c */; }; + 4D0E89AB1534FF48004CD678 /* optical.c in Sources */ = {isa = PBXBuildFile; fileRef = 4D0E89A81534FF48004CD678 /* optical.c */; }; + 4DE6C7491535012B00C11066 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = C1B6FA2210CC0AF400778D48 /* CoreFoundation.framework */; }; + 4DE6C74C1535023700C11066 /* IOKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 4DE6C7461535012200C11066 /* IOKit.framework */; }; + 4DE6C756153502F800C11066 /* libutil.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 4DE6C74A1535018100C11066 /* libutil.dylib */; }; + 4DE6C76A1535050700C11066 /* hfs_endian.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DE6C7641535050700C11066 /* hfs_endian.c */; }; + 4DE6C76B1535050700C11066 /* makehfs.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DE6C7661535050700C11066 /* makehfs.c */; }; + 4DE6C76C1535050700C11066 /* newfs_hfs.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DE6C7681535050700C11066 /* newfs_hfs.c */; }; + 4DE6C76D1535052A00C11066 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = C1B6FA2210CC0AF400778D48 /* CoreFoundation.framework */; }; + 4DE6C76E1535052C00C11066 /* IOKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 4DE6C7461535012200C11066 /* IOKit.framework */; }; + 4DE6C76F1535052E00C11066 /* libutil.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 4DE6C74A1535018100C11066 /* libutil.dylib */; }; + 4DFD944F153600060039B6BA /* cache.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9416153600060039B6BA /* cache.c */; }; + 4DFD9450153600060039B6BA /* BlockCache.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9419153600060039B6BA /* BlockCache.c */; }; + 4DFD9451153600060039B6BA /* BTree.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD941A153600060039B6BA /* BTree.c */; }; + 4DFD9452153600060039B6BA /* BTreeAllocate.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD941C153600060039B6BA /* BTreeAllocate.c */; }; + 4DFD9453153600060039B6BA /* BTreeMiscOps.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD941D153600060039B6BA /* BTreeMiscOps.c */; }; + 4DFD9454153600060039B6BA /* BTreeNodeOps.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD941E153600060039B6BA /* BTreeNodeOps.c */; }; + 4DFD9455153600060039B6BA /* BTreeScanner.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9420153600060039B6BA /* BTreeScanner.c */; }; + 4DFD9456153600060039B6BA /* BTreeTreeOps.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9422153600060039B6BA /* BTreeTreeOps.c */; }; + 4DFD9457153600060039B6BA /* CatalogCheck.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9424153600060039B6BA /* CatalogCheck.c */; }; + 4DFD9459153600060039B6BA /* dirhardlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9429153600060039B6BA /* dirhardlink.c */; }; + 4DFD945A153600060039B6BA /* HardLinkCheck.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD942B153600060039B6BA /* HardLinkCheck.c */; }; + 4DFD945B153600060039B6BA /* hfs_endian.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD942C153600060039B6BA /* hfs_endian.c */; }; + 4DFD945D153600060039B6BA /* SAllocate.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD942F153600060039B6BA /* SAllocate.c */; }; + 4DFD945E153600060039B6BA /* SBTree.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9430153600060039B6BA /* SBTree.c */; }; + 4DFD945F153600060039B6BA /* SCatalog.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9431153600060039B6BA /* SCatalog.c */; }; + 4DFD9460153600060039B6BA /* SControl.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9433153600060039B6BA /* SControl.c */; }; + 4DFD9461153600060039B6BA /* SDevice.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9434153600060039B6BA /* SDevice.c */; }; + 4DFD9462153600060039B6BA /* SExtents.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9435153600060039B6BA /* SExtents.c */; }; + 4DFD9463153600060039B6BA /* SKeyCompare.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9436153600060039B6BA /* SKeyCompare.c */; }; + 4DFD9464153600060039B6BA /* SRebuildBTree.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9437153600060039B6BA /* SRebuildBTree.c */; }; + 4DFD9465153600060039B6BA /* SRepair.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9438153600060039B6BA /* SRepair.c */; }; + 4DFD9466153600060039B6BA /* SStubs.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD943A153600060039B6BA /* SStubs.c */; }; + 4DFD9467153600060039B6BA /* SUtils.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD943B153600060039B6BA /* SUtils.c */; }; + 4DFD9468153600060039B6BA /* SVerify1.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD943C153600060039B6BA /* SVerify1.c */; }; + 4DFD9469153600060039B6BA /* SVerify2.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD943D153600060039B6BA /* SVerify2.c */; }; + 4DFD946A153600060039B6BA /* uuid.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD943E153600060039B6BA /* uuid.c */; }; + 4DFD946B153600060039B6BA /* VolumeBitmapCheck.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD943F153600060039B6BA /* VolumeBitmapCheck.c */; }; + 4DFD946C153600060039B6BA /* fsck_debug.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9442153600060039B6BA /* fsck_debug.c */; }; + 4DFD946D153600060039B6BA /* fsck_hfs_strings.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9445153600060039B6BA /* fsck_hfs_strings.c */; }; + 4DFD946E153600060039B6BA /* fsck_hfs.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9447153600060039B6BA /* fsck_hfs.c */; }; + 4DFD946F153600060039B6BA /* fsck_messages.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD944A153600060039B6BA /* fsck_messages.c */; }; + 4DFD9470153600060039B6BA /* fsck_strings.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD944D153600060039B6BA /* fsck_strings.c */; }; + 4DFD9471153600060039B6BA /* utilities.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD944E153600060039B6BA /* utilities.c */; }; + 4DFD9472153601F50039B6BA /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = C1B6FA2210CC0AF400778D48 /* CoreFoundation.framework */; }; + 4DFD9473153601F80039B6BA /* IOKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 4DE6C7461535012200C11066 /* IOKit.framework */; }; + 4DFD94A5153649070039B6BA /* newfs_hfs.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DE6C7681535050700C11066 /* newfs_hfs.c */; }; + 4DFD94A6153649070039B6BA /* makehfs.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DE6C7661535050700C11066 /* makehfs.c */; }; + 4DFD94A7153649070039B6BA /* hfs_endian.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DE6C7641535050700C11066 /* hfs_endian.c */; }; + 4DFD94A9153649070039B6BA /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = C1B6FA2210CC0AF400778D48 /* CoreFoundation.framework */; }; + 4DFD94AA153649070039B6BA /* IOKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 4DE6C7461535012200C11066 /* IOKit.framework */; }; + 4DFD94AB153649070039B6BA /* libutil.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 4DE6C74A1535018100C11066 /* libutil.dylib */; }; + 4DFD94B315364B4B0039B6BA /* mount_hfs.8 in Copy man8 */ = {isa = PBXBuildFile; fileRef = 4D0E89A61534FF48004CD678 /* mount_hfs.8 */; }; + 4DFD94B415364B690039B6BA /* newfs_hfs.8 in Copy man8 */ = {isa = PBXBuildFile; fileRef = 4DE6C7671535050700C11066 /* newfs_hfs.8 */; }; + 4DFD94B515364B7B0039B6BA /* fsck_hfs.8 in Copy man8 */ = {isa = PBXBuildFile; fileRef = 4DFD9446153600060039B6BA /* fsck_hfs.8 */; }; + 4DFD94C215373C2C0039B6BA /* fsck_messages.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD944A153600060039B6BA /* fsck_messages.c */; }; + 4DFD94C315373C2C0039B6BA /* fsck_strings.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD944D153600060039B6BA /* fsck_strings.c */; }; + 4DFD94C415373C2C0039B6BA /* fsck_hfs_strings.c in Sources */ = {isa = PBXBuildFile; fileRef = 4DFD9445153600060039B6BA /* fsck_hfs_strings.c */; }; + 4DFD9538153746210039B6BA /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 4DFD9536153746210039B6BA /* InfoPlist.strings */; }; + 4DFD953C15377BD80039B6BA /* fsck_keys.h in Copy fsck_keys.h */ = {isa = PBXBuildFile; fileRef = 4DFD9449153600060039B6BA /* fsck_keys.h */; }; + 7279A68D1593AA5C00192947 /* fsck_journal.c in Sources */ = {isa = PBXBuildFile; fileRef = 7279A68B1593AA5C00192947 /* fsck_journal.c */; }; C1B6FA0810CC0A0A00778D48 /* hfsutil_jnl.c in Sources */ = {isa = PBXBuildFile; fileRef = C1B6FA0610CC0A0A00778D48 /* hfsutil_jnl.c */; }; C1B6FA0910CC0A0A00778D48 /* hfsutil_main.c in Sources */ = {isa = PBXBuildFile; fileRef = C1B6FA0710CC0A0A00778D48 /* hfsutil_main.c */; }; C1B6FA3010CC0B9500778D48 /* hfs.util.8 in Copy man8 */ = {isa = PBXBuildFile; fileRef = C1B6FA2F10CC0B8A00778D48 /* hfs.util.8 */; }; @@ -35,68 +127,240 @@ FDD9FA5914A1343D0043D4A9 /* SparseBundle.c in Sources */ = {isa = PBXBuildFile; fileRef = FDD9FA5114A1343D0043D4A9 /* SparseBundle.c */; }; FDD9FA5A14A135290043D4A9 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = C1B6FA2210CC0AF400778D48 /* CoreFoundation.framework */; }; FDD9FA5C14A135840043D4A9 /* libz.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = FDD9FA5B14A135840043D4A9 /* libz.dylib */; }; - FDD9FA6814A138D80043D4A9 /* Info.plist in Copy Contents */ = {isa = PBXBuildFile; fileRef = FDD9FA6214A138960043D4A9 /* Info.plist */; }; - FDD9FA6914A138DA0043D4A9 /* PkgInfo in Copy Contents */ = {isa = PBXBuildFile; fileRef = FDD9FA6414A138960043D4A9 /* PkgInfo */; }; - FDD9FA6B14A138EC0043D4A9 /* version.plist in Copy Contents */ = {isa = PBXBuildFile; fileRef = FDD9FA6514A138960043D4A9 /* version.plist */; }; - FDD9FA6C14A138EF0043D4A9 /* InfoPlist.strings in Copy InfoPlist.strings */ = {isa = PBXBuildFile; fileRef = FDD9FA6314A138960043D4A9 /* InfoPlist.strings */; }; /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ - FDD9FA3B14A132F10043D4A9 /* PBXContainerItemProxy */ = { + 4DBD523E1548A499007AA736 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4DFD95111537402A0039B6BA; + remoteInfo = hfs.fs; + }; + 4DBD52401548A49A007AA736 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; proxyType = 1; remoteGlobalIDString = 8DD76FA90486AB0100D96B5E; remoteInfo = hfs.util; }; - FDD9FA3D14A132F20043D4A9 /* PBXContainerItemProxy */ = { + 4DBD52421548A49D007AA736 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; proxyType = 1; remoteGlobalIDString = FDD9FA2B14A132BF0043D4A9; remoteInfo = CopyHFSMeta; }; + 4DBD52441548A4A0007AA736 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4D0E899B1534FE65004CD678; + remoteInfo = mount_hfs; + }; + 4DBD52461548A4A2007AA736 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4DE6C75A153504C100C11066; + remoteInfo = newfs_hfs; + }; + 4DBD52481548A4A4007AA736 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4DFD94A3153649070039B6BA; + remoteInfo = newfs_hfs_debug; + }; + 4DBD524A1548A4A7007AA736 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4DFD93F31535FF510039B6BA; + remoteInfo = fsck_hfs; + }; + 4DBD524C1548A4AA007AA736 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4D07DCB71538EF3A002B57CB; + remoteInfo = fstyp_hfs; + }; + 4DBD524E1548A4C8007AA736 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4DBD523B1548A488007AA736; + remoteInfo = Common; + }; + 4DBD52501548A4D2007AA736 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4DBD523B1548A488007AA736; + remoteInfo = Common; + }; + 4DBD52521548A4D4007AA736 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4DFD94BC15373C2C0039B6BA; + remoteInfo = fsck_makestrings; + }; /* End PBXContainerItemProxy section */ /* Begin PBXCopyFilesBuildPhase section */ - 8DD76FAF0486AB0100D96B5E /* Copy man8 */ = { + 4D07DCB61538EF3A002B57CB /* Copy man8 */ = { isa = PBXCopyFilesBuildPhase; - buildActionMask = 8; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man8/; + dstSubfolderSpec = 0; + files = ( + 4D07DCC51538EFB7002B57CB /* fstyp_hfs.8 in Copy man8 */, + ); + name = "Copy man8"; + runOnlyForDeploymentPostprocessing = 1; + }; + 4D0E899A1534FE65004CD678 /* Copy man8 */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; dstPath = /usr/share/man/man8; dstSubfolderSpec = 0; files = ( - C1B6FA3010CC0B9500778D48 /* hfs.util.8 in Copy man8 */, + 4DFD94B315364B4B0039B6BA /* mount_hfs.8 in Copy man8 */, ); name = "Copy man8"; runOnlyForDeploymentPostprocessing = 1; }; - FDD9FA6714A138AB0043D4A9 /* Copy Contents */ = { + 4DE6C759153504C100C11066 /* Copy man8 */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man8/; + dstSubfolderSpec = 0; + files = ( + 4DFD94B415364B690039B6BA /* newfs_hfs.8 in Copy man8 */, + ); + name = "Copy man8"; + runOnlyForDeploymentPostprocessing = 1; + }; + 4DFD93F21535FF510039B6BA /* Copy man8 */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man8/; + dstSubfolderSpec = 0; + files = ( + 4DFD94B515364B7B0039B6BA /* fsck_hfs.8 in Copy man8 */, + ); + name = "Copy man8"; + runOnlyForDeploymentPostprocessing = 1; + }; + 4DFD953B15377BC60039B6BA /* Copy fsck_keys.h */ = { isa = PBXCopyFilesBuildPhase; buildActionMask = 8; - dstPath = Contents; - dstSubfolderSpec = 1; + dstPath = /usr/local/include/fsck; + dstSubfolderSpec = 0; files = ( - FDD9FA6814A138D80043D4A9 /* Info.plist in Copy Contents */, - FDD9FA6914A138DA0043D4A9 /* PkgInfo in Copy Contents */, - FDD9FA6B14A138EC0043D4A9 /* version.plist in Copy Contents */, + 4DFD953C15377BD80039B6BA /* fsck_keys.h in Copy fsck_keys.h */, ); - name = "Copy Contents"; + name = "Copy fsck_keys.h"; runOnlyForDeploymentPostprocessing = 1; }; - FDD9FA6A14A138DC0043D4A9 /* Copy InfoPlist.strings */ = { + 8DD76FAF0486AB0100D96B5E /* Copy man8 */ = { isa = PBXCopyFilesBuildPhase; buildActionMask = 8; - dstPath = Contents/Resources/English.lproj; - dstSubfolderSpec = 1; + dstPath = /usr/share/man/man8; + dstSubfolderSpec = 0; files = ( - FDD9FA6C14A138EF0043D4A9 /* InfoPlist.strings in Copy InfoPlist.strings */, + C1B6FA3010CC0B9500778D48 /* hfs.util.8 in Copy man8 */, ); - name = "Copy InfoPlist.strings"; + name = "Copy man8"; runOnlyForDeploymentPostprocessing = 1; }; /* End PBXCopyFilesBuildPhase section */ /* Begin PBXFileReference section */ + 4D07DCB81538EF3A002B57CB /* fstyp_hfs */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = fstyp_hfs; sourceTree = BUILT_PRODUCTS_DIR; }; + 4D07DCC21538EF92002B57CB /* fstyp_hfs.8 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = fstyp_hfs.8; sourceTree = "<group>"; }; + 4D07DCC31538EF92002B57CB /* fstyp_hfs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fstyp_hfs.c; sourceTree = "<group>"; }; + 4D07DCED153C88B2002B57CB /* fsck.strings */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.strings; path = fsck.strings; sourceTree = BUILT_PRODUCTS_DIR; }; + 4D0E899C1534FE65004CD678 /* mount_hfs */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = mount_hfs; sourceTree = BUILT_PRODUCTS_DIR; }; + 4D0E89A51534FF48004CD678 /* hfs_endian.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hfs_endian.h; sourceTree = "<group>"; }; + 4D0E89A61534FF48004CD678 /* mount_hfs.8 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = mount_hfs.8; sourceTree = "<group>"; }; + 4D0E89A71534FF48004CD678 /* mount_hfs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mount_hfs.c; sourceTree = "<group>"; }; + 4D0E89A81534FF48004CD678 /* optical.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = optical.c; sourceTree = "<group>"; }; + 4D0E89A91534FF48004CD678 /* optical.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = optical.h; sourceTree = "<group>"; }; + 4DE6C7461535012200C11066 /* IOKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = IOKit.framework; path = /System/Library/Frameworks/IOKit.framework; sourceTree = "<absolute>"; }; + 4DE6C74A1535018100C11066 /* libutil.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libutil.dylib; path = /usr/lib/libutil.dylib; sourceTree = "<absolute>"; }; + 4DE6C75B153504C100C11066 /* newfs_hfs */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = newfs_hfs; sourceTree = BUILT_PRODUCTS_DIR; }; + 4DE6C7641535050700C11066 /* hfs_endian.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = hfs_endian.c; sourceTree = "<group>"; }; + 4DE6C7651535050700C11066 /* hfs_endian.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hfs_endian.h; sourceTree = "<group>"; }; + 4DE6C7661535050700C11066 /* makehfs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = makehfs.c; sourceTree = "<group>"; }; + 4DE6C7671535050700C11066 /* newfs_hfs.8 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = newfs_hfs.8; sourceTree = "<group>"; }; + 4DE6C7681535050700C11066 /* newfs_hfs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = newfs_hfs.c; sourceTree = "<group>"; }; + 4DE6C7691535050700C11066 /* newfs_hfs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = newfs_hfs.h; sourceTree = "<group>"; }; + 4DFD93F41535FF510039B6BA /* fsck_hfs */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = fsck_hfs; sourceTree = BUILT_PRODUCTS_DIR; }; + 4DFD9416153600060039B6BA /* cache.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = cache.c; sourceTree = "<group>"; }; + 4DFD9417153600060039B6BA /* cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cache.h; sourceTree = "<group>"; }; + 4DFD9419153600060039B6BA /* BlockCache.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = BlockCache.c; sourceTree = "<group>"; }; + 4DFD941A153600060039B6BA /* BTree.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = BTree.c; sourceTree = "<group>"; }; + 4DFD941B153600060039B6BA /* BTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BTree.h; sourceTree = "<group>"; }; + 4DFD941C153600060039B6BA /* BTreeAllocate.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = BTreeAllocate.c; sourceTree = "<group>"; }; + 4DFD941D153600060039B6BA /* BTreeMiscOps.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = BTreeMiscOps.c; sourceTree = "<group>"; }; + 4DFD941E153600060039B6BA /* BTreeNodeOps.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = BTreeNodeOps.c; sourceTree = "<group>"; }; + 4DFD941F153600060039B6BA /* BTreePrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BTreePrivate.h; sourceTree = "<group>"; }; + 4DFD9420153600060039B6BA /* BTreeScanner.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = BTreeScanner.c; sourceTree = "<group>"; }; + 4DFD9421153600060039B6BA /* BTreeScanner.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BTreeScanner.h; sourceTree = "<group>"; }; + 4DFD9422153600060039B6BA /* BTreeTreeOps.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = BTreeTreeOps.c; sourceTree = "<group>"; }; + 4DFD9423153600060039B6BA /* CaseFolding.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CaseFolding.h; sourceTree = "<group>"; }; + 4DFD9424153600060039B6BA /* CatalogCheck.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = CatalogCheck.c; sourceTree = "<group>"; }; + 4DFD9425153600060039B6BA /* CheckHFS.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CheckHFS.h; sourceTree = "<group>"; }; + 4DFD9426153600060039B6BA /* DecompData.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DecompData.h; sourceTree = "<group>"; }; + 4DFD9427153600060039B6BA /* DecompDataEnums.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DecompDataEnums.h; sourceTree = "<group>"; }; + 4DFD9428153600060039B6BA /* DecompMakeData.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = DecompMakeData.c; sourceTree = "<group>"; }; + 4DFD9429153600060039B6BA /* dirhardlink.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dirhardlink.c; sourceTree = "<group>"; }; + 4DFD942A153600060039B6BA /* FixDecompsNotes.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = FixDecompsNotes.txt; sourceTree = "<group>"; }; + 4DFD942B153600060039B6BA /* HardLinkCheck.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = HardLinkCheck.c; sourceTree = "<group>"; }; + 4DFD942C153600060039B6BA /* hfs_endian.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = hfs_endian.c; sourceTree = "<group>"; }; + 4DFD942D153600060039B6BA /* hfs_endian.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hfs_endian.h; sourceTree = "<group>"; }; + 4DFD942F153600060039B6BA /* SAllocate.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SAllocate.c; sourceTree = "<group>"; }; + 4DFD9430153600060039B6BA /* SBTree.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SBTree.c; sourceTree = "<group>"; }; + 4DFD9431153600060039B6BA /* SCatalog.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SCatalog.c; sourceTree = "<group>"; }; + 4DFD9432153600060039B6BA /* Scavenger.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Scavenger.h; sourceTree = "<group>"; }; + 4DFD9433153600060039B6BA /* SControl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SControl.c; sourceTree = "<group>"; }; + 4DFD9434153600060039B6BA /* SDevice.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SDevice.c; sourceTree = "<group>"; }; + 4DFD9435153600060039B6BA /* SExtents.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SExtents.c; sourceTree = "<group>"; }; + 4DFD9436153600060039B6BA /* SKeyCompare.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SKeyCompare.c; sourceTree = "<group>"; }; + 4DFD9437153600060039B6BA /* SRebuildBTree.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SRebuildBTree.c; sourceTree = "<group>"; }; + 4DFD9438153600060039B6BA /* SRepair.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SRepair.c; sourceTree = "<group>"; }; + 4DFD9439153600060039B6BA /* SRuntime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SRuntime.h; sourceTree = "<group>"; }; + 4DFD943A153600060039B6BA /* SStubs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SStubs.c; sourceTree = "<group>"; }; + 4DFD943B153600060039B6BA /* SUtils.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SUtils.c; sourceTree = "<group>"; }; + 4DFD943C153600060039B6BA /* SVerify1.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SVerify1.c; sourceTree = "<group>"; }; + 4DFD943D153600060039B6BA /* SVerify2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SVerify2.c; sourceTree = "<group>"; }; + 4DFD943E153600060039B6BA /* uuid.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = uuid.c; sourceTree = "<group>"; }; + 4DFD943F153600060039B6BA /* VolumeBitmapCheck.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = VolumeBitmapCheck.c; sourceTree = "<group>"; }; + 4DFD9441153600060039B6BA /* fsck_gui_interface_design.rtf */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.rtf; path = fsck_gui_interface_design.rtf; sourceTree = "<group>"; }; + 4DFD9442153600060039B6BA /* fsck_debug.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fsck_debug.c; sourceTree = "<group>"; }; + 4DFD9443153600060039B6BA /* fsck_debug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fsck_debug.h; sourceTree = "<group>"; }; + 4DFD9444153600060039B6BA /* fsck_hfs_msgnums.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fsck_hfs_msgnums.h; sourceTree = "<group>"; }; + 4DFD9445153600060039B6BA /* fsck_hfs_strings.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fsck_hfs_strings.c; sourceTree = "<group>"; }; + 4DFD9446153600060039B6BA /* fsck_hfs.8 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = fsck_hfs.8; sourceTree = "<group>"; }; + 4DFD9447153600060039B6BA /* fsck_hfs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fsck_hfs.c; sourceTree = "<group>"; }; + 4DFD9448153600060039B6BA /* fsck_hfs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fsck_hfs.h; sourceTree = "<group>"; }; + 4DFD9449153600060039B6BA /* fsck_keys.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fsck_keys.h; sourceTree = "<group>"; }; + 4DFD944A153600060039B6BA /* fsck_messages.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fsck_messages.c; sourceTree = "<group>"; }; + 4DFD944B153600060039B6BA /* fsck_messages.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fsck_messages.h; sourceTree = "<group>"; }; + 4DFD944C153600060039B6BA /* fsck_msgnums.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fsck_msgnums.h; sourceTree = "<group>"; }; + 4DFD944D153600060039B6BA /* fsck_strings.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fsck_strings.c; sourceTree = "<group>"; }; + 4DFD944E153600060039B6BA /* utilities.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = utilities.c; sourceTree = "<group>"; }; + 4DFD94AF153649070039B6BA /* newfs_hfs_debug */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = newfs_hfs_debug; sourceTree = BUILT_PRODUCTS_DIR; }; + 4DFD94E615373C2C0039B6BA /* fsck_makestrings */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = fsck_makestrings; sourceTree = BUILT_PRODUCTS_DIR; }; + 4DFD95121537402A0039B6BA /* hfs.fs */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = hfs.fs; sourceTree = BUILT_PRODUCTS_DIR; }; + 4DFD9537153746210039B6BA /* English */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = English; path = English.lproj/InfoPlist.strings; sourceTree = "<group>"; }; + 4DFD9539153746B30039B6BA /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; }; + 4DFD953D15377C7D0039B6BA /* hfs.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = hfs.xcconfig; sourceTree = "<group>"; }; + 7279A68B1593AA5C00192947 /* fsck_journal.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = fsck_journal.c; path = dfalib/fsck_journal.c; sourceTree = "<group>"; }; + 7279A68C1593AA5C00192947 /* fsck_journal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = fsck_journal.h; path = dfalib/fsck_journal.h; sourceTree = "<group>"; }; C1B6FA0610CC0A0A00778D48 /* hfsutil_jnl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = hfsutil_jnl.c; sourceTree = "<group>"; }; C1B6FA0710CC0A0A00778D48 /* hfsutil_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = hfsutil_main.c; sourceTree = "<group>"; }; C1B6FA2210CC0AF400778D48 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = /System/Library/Frameworks/CoreFoundation.framework; sourceTree = "<absolute>"; }; @@ -114,13 +378,62 @@ FDD9FA5014A1343D0043D4A9 /* Sparse.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Sparse.h; sourceTree = "<group>"; }; FDD9FA5114A1343D0043D4A9 /* SparseBundle.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = SparseBundle.c; sourceTree = "<group>"; }; FDD9FA5B14A135840043D4A9 /* libz.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libz.dylib; path = /usr/lib/libz.dylib; sourceTree = "<absolute>"; }; - FDD9FA6214A138960043D4A9 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; }; - FDD9FA6314A138960043D4A9 /* InfoPlist.strings */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; path = InfoPlist.strings; sourceTree = "<group>"; }; - FDD9FA6414A138960043D4A9 /* PkgInfo */ = {isa = PBXFileReference; lastKnownFileType = text; path = PkgInfo; sourceTree = "<group>"; }; - FDD9FA6514A138960043D4A9 /* version.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = version.plist; sourceTree = "<group>"; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ + 4D07DCB51538EF3A002B57CB /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4D0E89991534FE65004CD678 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 4DE6C7491535012B00C11066 /* CoreFoundation.framework in Frameworks */, + 4DE6C74C1535023700C11066 /* IOKit.framework in Frameworks */, + 4DE6C756153502F800C11066 /* libutil.dylib in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4DE6C758153504C100C11066 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 4DE6C76D1535052A00C11066 /* CoreFoundation.framework in Frameworks */, + 4DE6C76E1535052C00C11066 /* IOKit.framework in Frameworks */, + 4DE6C76F1535052E00C11066 /* libutil.dylib in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4DFD93F11535FF510039B6BA /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 4DFD9472153601F50039B6BA /* CoreFoundation.framework in Frameworks */, + 4DFD9473153601F80039B6BA /* IOKit.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4DFD94A8153649070039B6BA /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 4DFD94A9153649070039B6BA /* CoreFoundation.framework in Frameworks */, + 4DFD94AA153649070039B6BA /* IOKit.framework in Frameworks */, + 4DFD94AB153649070039B6BA /* libutil.dylib in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4DFD94DF15373C2C0039B6BA /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; 8DD76FAD0486AB0100D96B5E /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; @@ -144,20 +457,142 @@ 08FB7794FE84155DC02AAC07 /* hfs */ = { isa = PBXGroup; children = ( - 08FB7795FE84155DC02AAC07 /* Source */, + 4DFD953D15377C7D0039B6BA /* hfs.xcconfig */, + FDD9FA4614A1343D0043D4A9 /* CopyHFSMeta */, + FDD9FA3F14A1335D0043D4A9 /* hfs_util */, + 4D0E899E1534FE65004CD678 /* mount_hfs */, + 4DE6C75D153504C100C11066 /* newfs_hfs */, + 4DFD93F61535FF510039B6BA /* fsck_hfs */, + 4DFD95141537402A0039B6BA /* hfs.fs */, + 4D07DCBA1538EF3A002B57CB /* fstyp_hfs */, FDD9FA4014A133A50043D4A9 /* Frameworks */, C1B6FD2C10CC0DB200778D48 /* Products */, ); name = hfs; sourceTree = "<group>"; }; - 08FB7795FE84155DC02AAC07 /* Source */ = { + 4D07DCBA1538EF3A002B57CB /* fstyp_hfs */ = { isa = PBXGroup; children = ( - FDD9FA4614A1343D0043D4A9 /* CopyHFSMeta */, - FDD9FA3F14A1335D0043D4A9 /* hfs_util */, + 4D07DCC31538EF92002B57CB /* fstyp_hfs.c */, + 4D07DCC21538EF92002B57CB /* fstyp_hfs.8 */, + ); + path = fstyp_hfs; + sourceTree = "<group>"; + }; + 4D0E899E1534FE65004CD678 /* mount_hfs */ = { + isa = PBXGroup; + children = ( + 4D0E89A51534FF48004CD678 /* hfs_endian.h */, + 4D0E89A91534FF48004CD678 /* optical.h */, + 4D0E89A71534FF48004CD678 /* mount_hfs.c */, + 4D0E89A81534FF48004CD678 /* optical.c */, + 4D0E89A61534FF48004CD678 /* mount_hfs.8 */, + ); + path = mount_hfs; + sourceTree = "<group>"; + }; + 4DE6C75D153504C100C11066 /* newfs_hfs */ = { + isa = PBXGroup; + children = ( + 4DE6C7651535050700C11066 /* hfs_endian.h */, + 4DE6C7691535050700C11066 /* newfs_hfs.h */, + 4DE6C7681535050700C11066 /* newfs_hfs.c */, + 4DE6C7661535050700C11066 /* makehfs.c */, + 4DE6C7641535050700C11066 /* hfs_endian.c */, + 4DE6C7671535050700C11066 /* newfs_hfs.8 */, + ); + path = newfs_hfs; + sourceTree = "<group>"; + }; + 4DFD93F61535FF510039B6BA /* fsck_hfs */ = { + isa = PBXGroup; + children = ( + 7279A68B1593AA5C00192947 /* fsck_journal.c */, + 7279A68C1593AA5C00192947 /* fsck_journal.h */, + 4DFD9417153600060039B6BA /* cache.h */, + 4DFD9443153600060039B6BA /* fsck_debug.h */, + 4DFD9444153600060039B6BA /* fsck_hfs_msgnums.h */, + 4DFD9448153600060039B6BA /* fsck_hfs.h */, + 4DFD9449153600060039B6BA /* fsck_keys.h */, + 4DFD944B153600060039B6BA /* fsck_messages.h */, + 4DFD944C153600060039B6BA /* fsck_msgnums.h */, + 4DFD9416153600060039B6BA /* cache.c */, + 4DFD9442153600060039B6BA /* fsck_debug.c */, + 4DFD9445153600060039B6BA /* fsck_hfs_strings.c */, + 4DFD9447153600060039B6BA /* fsck_hfs.c */, + 4DFD944A153600060039B6BA /* fsck_messages.c */, + 4DFD944D153600060039B6BA /* fsck_strings.c */, + 4DFD944E153600060039B6BA /* utilities.c */, + 4DFD9418153600060039B6BA /* dfalib */, + 4DFD9440153600060039B6BA /* docs */, + 4DFD9446153600060039B6BA /* fsck_hfs.8 */, + ); + path = fsck_hfs; + sourceTree = "<group>"; + }; + 4DFD9418153600060039B6BA /* dfalib */ = { + isa = PBXGroup; + children = ( + 4DFD941B153600060039B6BA /* BTree.h */, + 4DFD941F153600060039B6BA /* BTreePrivate.h */, + 4DFD9421153600060039B6BA /* BTreeScanner.h */, + 4DFD9423153600060039B6BA /* CaseFolding.h */, + 4DFD9425153600060039B6BA /* CheckHFS.h */, + 4DFD9426153600060039B6BA /* DecompData.h */, + 4DFD9427153600060039B6BA /* DecompDataEnums.h */, + 4DFD942D153600060039B6BA /* hfs_endian.h */, + 4DFD9432153600060039B6BA /* Scavenger.h */, + 4DFD9439153600060039B6BA /* SRuntime.h */, + 4DFD9419153600060039B6BA /* BlockCache.c */, + 4DFD941A153600060039B6BA /* BTree.c */, + 4DFD941C153600060039B6BA /* BTreeAllocate.c */, + 4DFD941D153600060039B6BA /* BTreeMiscOps.c */, + 4DFD941E153600060039B6BA /* BTreeNodeOps.c */, + 4DFD9420153600060039B6BA /* BTreeScanner.c */, + 4DFD9422153600060039B6BA /* BTreeTreeOps.c */, + 4DFD9424153600060039B6BA /* CatalogCheck.c */, + 4DFD9429153600060039B6BA /* dirhardlink.c */, + 4DFD942B153600060039B6BA /* HardLinkCheck.c */, + 4DFD942C153600060039B6BA /* hfs_endian.c */, + 4DFD942F153600060039B6BA /* SAllocate.c */, + 4DFD9430153600060039B6BA /* SBTree.c */, + 4DFD9431153600060039B6BA /* SCatalog.c */, + 4DFD9433153600060039B6BA /* SControl.c */, + 4DFD9434153600060039B6BA /* SDevice.c */, + 4DFD9435153600060039B6BA /* SExtents.c */, + 4DFD9436153600060039B6BA /* SKeyCompare.c */, + 4DFD9437153600060039B6BA /* SRebuildBTree.c */, + 4DFD9438153600060039B6BA /* SRepair.c */, + 4DFD943A153600060039B6BA /* SStubs.c */, + 4DFD943B153600060039B6BA /* SUtils.c */, + 4DFD943C153600060039B6BA /* SVerify1.c */, + 4DFD943D153600060039B6BA /* SVerify2.c */, + 4DFD943E153600060039B6BA /* uuid.c */, + 4DFD943F153600060039B6BA /* VolumeBitmapCheck.c */, + 4DFD9428153600060039B6BA /* DecompMakeData.c */, + 4DFD942A153600060039B6BA /* FixDecompsNotes.txt */, + ); + path = dfalib; + sourceTree = "<group>"; + }; + 4DFD9440153600060039B6BA /* docs */ = { + isa = PBXGroup; + children = ( + 4DFD9441153600060039B6BA /* fsck_gui_interface_design.rtf */, ); - name = Source; + path = docs; + sourceTree = "<group>"; + }; + 4DFD95141537402A0039B6BA /* hfs.fs */ = { + isa = PBXGroup; + children = ( + 4DFD9539153746B30039B6BA /* Info.plist */, + 4DFD9536153746210039B6BA /* InfoPlist.strings */, + 4D07DCED153C88B2002B57CB /* fsck.strings */, + ); + name = hfs.fs; + path = fs; sourceTree = "<group>"; }; C1B6FD2C10CC0DB200778D48 /* Products */ = { @@ -165,6 +600,13 @@ children = ( C1B6FD2B10CC0DB200778D48 /* hfs.util */, FDD9FA2C14A132BF0043D4A9 /* CopyHFSMeta */, + 4D0E899C1534FE65004CD678 /* mount_hfs */, + 4DE6C75B153504C100C11066 /* newfs_hfs */, + 4DFD93F41535FF510039B6BA /* fsck_hfs */, + 4DFD94AF153649070039B6BA /* newfs_hfs_debug */, + 4DFD94E615373C2C0039B6BA /* fsck_makestrings */, + 4DFD95121537402A0039B6BA /* hfs.fs */, + 4D07DCB81538EF3A002B57CB /* fstyp_hfs */, ); name = Products; sourceTree = "<group>"; @@ -172,13 +614,9 @@ FDD9FA3F14A1335D0043D4A9 /* hfs_util */ = { isa = PBXGroup; children = ( - FDD9FA6214A138960043D4A9 /* Info.plist */, - FDD9FA6314A138960043D4A9 /* InfoPlist.strings */, - FDD9FA6414A138960043D4A9 /* PkgInfo */, C1B6FA2F10CC0B8A00778D48 /* hfs.util.8 */, C1B6FA0610CC0A0A00778D48 /* hfsutil_jnl.c */, C1B6FA0710CC0A0A00778D48 /* hfsutil_main.c */, - FDD9FA6514A138960043D4A9 /* version.plist */, ); path = hfs_util; sourceTree = "<group>"; @@ -187,7 +625,9 @@ isa = PBXGroup; children = ( C1B6FA2210CC0AF400778D48 /* CoreFoundation.framework */, + 4DE6C7461535012200C11066 /* IOKit.framework */, FDD9FA5B14A135840043D4A9 /* libz.dylib */, + 4DE6C74A1535018100C11066 /* libutil.dylib */, ); name = Frameworks; sourceTree = "<group>"; @@ -212,21 +652,140 @@ /* End PBXGroup section */ /* Begin PBXNativeTarget section */ - 8DD76FA90486AB0100D96B5E /* hfs_util */ = { + 4D07DCB71538EF3A002B57CB /* fstyp_hfs */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4D07DCC01538EF3A002B57CB /* Build configuration list for PBXNativeTarget "fstyp_hfs" */; + buildPhases = ( + 4D07DCB41538EF3A002B57CB /* Sources */, + 4D07DCB51538EF3A002B57CB /* Frameworks */, + 4D07DCB61538EF3A002B57CB /* Copy man8 */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = fstyp_hfs; + productName = fstyp_hfs; + productReference = 4D07DCB81538EF3A002B57CB /* fstyp_hfs */; + productType = "com.apple.product-type.tool"; + }; + 4D0E899B1534FE65004CD678 /* mount_hfs */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4D0E89A31534FE65004CD678 /* Build configuration list for PBXNativeTarget "mount_hfs" */; + buildPhases = ( + 4D0E89981534FE65004CD678 /* Sources */, + 4D0E89991534FE65004CD678 /* Frameworks */, + 4D0E899A1534FE65004CD678 /* Copy man8 */, + 4DFD953E153783DA0039B6BA /* Create symlink */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = mount_hfs; + productName = mount_hfs; + productReference = 4D0E899C1534FE65004CD678 /* mount_hfs */; + productType = "com.apple.product-type.tool"; + }; + 4DE6C75A153504C100C11066 /* newfs_hfs */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4DE6C762153504C100C11066 /* Build configuration list for PBXNativeTarget "newfs_hfs" */; + buildPhases = ( + 4DE6C757153504C100C11066 /* Sources */, + 4DE6C758153504C100C11066 /* Frameworks */, + 4DE6C759153504C100C11066 /* Copy man8 */, + 4DFD953F1537841C0039B6BA /* Create symlink */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = newfs_hfs; + productName = newfs_hfs; + productReference = 4DE6C75B153504C100C11066 /* newfs_hfs */; + productType = "com.apple.product-type.tool"; + }; + 4DFD93F31535FF510039B6BA /* fsck_hfs */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4DFD93FC1535FF510039B6BA /* Build configuration list for PBXNativeTarget "fsck_hfs" */; + buildPhases = ( + 4DFD93F01535FF510039B6BA /* Sources */, + 4DFD93F11535FF510039B6BA /* Frameworks */, + 4DFD93F21535FF510039B6BA /* Copy man8 */, + 4DFD953B15377BC60039B6BA /* Copy fsck_keys.h */, + 4DFD95401537844E0039B6BA /* Create symlink */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = fsck_hfs; + productName = fsck_hfs; + productReference = 4DFD93F41535FF510039B6BA /* fsck_hfs */; + productType = "com.apple.product-type.tool"; + }; + 4DFD94A3153649070039B6BA /* newfs_hfs_debug */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4DFD94AD153649070039B6BA /* Build configuration list for PBXNativeTarget "newfs_hfs_debug" */; + buildPhases = ( + 4DFD94A4153649070039B6BA /* Sources */, + 4DFD94A8153649070039B6BA /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = newfs_hfs_debug; + productName = newfs_hfs; + productReference = 4DFD94AF153649070039B6BA /* newfs_hfs_debug */; + productType = "com.apple.product-type.tool"; + }; + 4DFD94BC15373C2C0039B6BA /* fsck_makestrings */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4DFD94E415373C2C0039B6BA /* Build configuration list for PBXNativeTarget "fsck_makestrings" */; + buildPhases = ( + 4DFD94BD15373C2C0039B6BA /* Sources */, + 4DFD94DF15373C2C0039B6BA /* Frameworks */, + 4DD3027B1538DC3D00001AA0 /* Generate fsck.strings */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = fsck_makestrings; + productName = fsck_hfs; + productReference = 4DFD94E615373C2C0039B6BA /* fsck_makestrings */; + productType = "com.apple.product-type.tool"; + }; + 4DFD95111537402A0039B6BA /* hfs.fs */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4DFD951B1537402A0039B6BA /* Build configuration list for PBXNativeTarget "hfs.fs" */; + buildPhases = ( + 4DFD95101537402A0039B6BA /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = hfs.fs; + productName = hfs.fs; + productReference = 4DFD95121537402A0039B6BA /* hfs.fs */; + productType = "com.apple.product-type.bundle"; + }; + 8DD76FA90486AB0100D96B5E /* hfs.util */ = { isa = PBXNativeTarget; - buildConfigurationList = 1DEB928508733DD80010E9CD /* Build configuration list for PBXNativeTarget "hfs_util" */; + buildConfigurationList = 1DEB928508733DD80010E9CD /* Build configuration list for PBXNativeTarget "hfs.util" */; buildPhases = ( 8DD76FAB0486AB0100D96B5E /* Sources */, 8DD76FAD0486AB0100D96B5E /* Frameworks */, 8DD76FAF0486AB0100D96B5E /* Copy man8 */, - FDD9FA6714A138AB0043D4A9 /* Copy Contents */, - FDD9FA6A14A138DC0043D4A9 /* Copy InfoPlist.strings */, + 4DFD9541153785060039B6BA /* Create symlink */, ); buildRules = ( ); dependencies = ( ); - name = hfs_util; + name = hfs.util; productInstallPath = "$(HOME)/bin"; productName = hfs; productReference = C1B6FD2B10CC0DB200778D48 /* hfs.util */; @@ -253,26 +812,226 @@ /* Begin PBXProject section */ 08FB7793FE84155DC02AAC07 /* Project object */ = { isa = PBXProject; + attributes = { + LastUpgradeCheck = 0450; + }; buildConfigurationList = 1DEB928908733DD80010E9CD /* Build configuration list for PBXProject "hfs" */; - compatibilityVersion = "Xcode 3.1"; + compatibilityVersion = "Xcode 3.2"; developmentRegion = English; hasScannedForEncodings = 1; knownRegions = ( en, + English, ); mainGroup = 08FB7794FE84155DC02AAC07 /* hfs */; productRefGroup = C1B6FD2C10CC0DB200778D48 /* Products */; projectDirPath = ""; projectRoot = ""; targets = ( - FDD9FA3714A132E40043D4A9 /* hfs */, - 8DD76FA90486AB0100D96B5E /* hfs_util */, + 4DD302571538DB2700001AA0 /* All_MacOSX */, + 4DD3025A1538DB3A00001AA0 /* All_iOS */, + 4DBD523B1548A488007AA736 /* Common */, + 4DFD95111537402A0039B6BA /* hfs.fs */, + 8DD76FA90486AB0100D96B5E /* hfs.util */, FDD9FA2B14A132BF0043D4A9 /* CopyHFSMeta */, + 4D0E899B1534FE65004CD678 /* mount_hfs */, + 4DE6C75A153504C100C11066 /* newfs_hfs */, + 4DFD94A3153649070039B6BA /* newfs_hfs_debug */, + 4DFD93F31535FF510039B6BA /* fsck_hfs */, + 4D07DCB71538EF3A002B57CB /* fstyp_hfs */, + 4DFD94BC15373C2C0039B6BA /* fsck_makestrings */, ); }; /* End PBXProject section */ +/* Begin PBXResourcesBuildPhase section */ + 4DFD95101537402A0039B6BA /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4DFD9538153746210039B6BA /* InfoPlist.strings in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXShellScriptBuildPhase section */ + 4DD3027B1538DC3D00001AA0 /* Generate fsck.strings */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + "$(BUILT_PRODUCTS_DIR)/fsck_make_strings", + ); + name = "Generate fsck.strings"; + outputPaths = ( + "$(DSTROOT)$(FS_BUNDLE_ENGLISH_PATH)/fsck.strings", + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = /bin/sh; + shellScript = "${BUILT_PRODUCTS_DIR}/fsck_makestrings | iconv -f UTF-8 -t UTF-16 > ${DSTROOT}${FS_BUNDLE_ENGLISH_PATH}/fsck.strings"; + showEnvVarsInLog = 0; + }; + 4DFD953E153783DA0039B6BA /* Create symlink */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + "$(DSTROOT)$(FS_BUNDLE_BIN_PATH)/mount_hfs", + ); + name = "Create symlink"; + outputPaths = ( + "$(DSTROOT)/sbin/mount_hfs", + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = /bin/sh; + shellScript = "ln -sfhv ${FS_BUNDLE_BIN_PATH}/mount_hfs ${DSTROOT}/sbin/mount_hfs\nchgrp -h wheel ${DSTROOT}/sbin/mount_hfs\n"; + showEnvVarsInLog = 0; + }; + 4DFD953F1537841C0039B6BA /* Create symlink */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + "$(DSTROOT)$(FS_BUNDLE_BIN_PATH)/newfs_hfs", + ); + name = "Create symlink"; + outputPaths = ( + "$(DSTROOT)/sbin/newfs_hfs", + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = /bin/sh; + shellScript = "ln -sfhv ${FS_BUNDLE_BIN_PATH}/newfs_hfs ${DSTROOT}/sbin/newfs_hfs\nchgrp -h wheel ${DSTROOT}/sbin/newfs_hfs\n"; + showEnvVarsInLog = 0; + }; + 4DFD95401537844E0039B6BA /* Create symlink */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + "$(DSTROOT)$(FS_BUNDLE_BIN_PATH)/fsck_hfs", + ); + name = "Create symlink"; + outputPaths = ( + "$(DSTROOT)/sbin/fsck_hfs", + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = /bin/sh; + shellScript = "ln -sfhv ${FS_BUNDLE_BIN_PATH}/fsck_hfs ${DSTROOT}/sbin/fsck_hfs\nchgrp -h wheel ${DSTROOT}/sbin/fsck_hfs\n"; + showEnvVarsInLog = 0; + }; + 4DFD9541153785060039B6BA /* Create symlink */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + "$(DSTROOT)$(FS_BUNDLE_BIN_PATH)/hfs.util", + ); + name = "Create symlink"; + outputPaths = ( + "$(DSTROOT)$(FS_BUNDLE_PATH)/hfs.util", + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = /bin/sh; + shellScript = "ln -sfhv ${FS_BUNDLE_BIN_DIR}/hfs.util ${DSTROOT}${FS_BUNDLE_PATH}/hfs.util"; + showEnvVarsInLog = 0; + }; +/* End PBXShellScriptBuildPhase section */ + /* Begin PBXSourcesBuildPhase section */ + 4D07DCB41538EF3A002B57CB /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4D07DCC41538EF92002B57CB /* fstyp_hfs.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4D0E89981534FE65004CD678 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4D0E89AA1534FF48004CD678 /* mount_hfs.c in Sources */, + 4D0E89AB1534FF48004CD678 /* optical.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4DE6C757153504C100C11066 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4DE6C76C1535050700C11066 /* newfs_hfs.c in Sources */, + 4DE6C76B1535050700C11066 /* makehfs.c in Sources */, + 4DE6C76A1535050700C11066 /* hfs_endian.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4DFD93F01535FF510039B6BA /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4DFD946E153600060039B6BA /* fsck_hfs.c in Sources */, + 4DFD9471153600060039B6BA /* utilities.c in Sources */, + 4DFD944F153600060039B6BA /* cache.c in Sources */, + 4DFD946C153600060039B6BA /* fsck_debug.c in Sources */, + 4DFD946F153600060039B6BA /* fsck_messages.c in Sources */, + 4DFD9470153600060039B6BA /* fsck_strings.c in Sources */, + 4DFD946D153600060039B6BA /* fsck_hfs_strings.c in Sources */, + 4DFD945B153600060039B6BA /* hfs_endian.c in Sources */, + 4DFD9450153600060039B6BA /* BlockCache.c in Sources */, + 4DFD9451153600060039B6BA /* BTree.c in Sources */, + 4DFD9452153600060039B6BA /* BTreeAllocate.c in Sources */, + 4DFD9453153600060039B6BA /* BTreeMiscOps.c in Sources */, + 4DFD9454153600060039B6BA /* BTreeNodeOps.c in Sources */, + 4DFD9455153600060039B6BA /* BTreeScanner.c in Sources */, + 4DFD9456153600060039B6BA /* BTreeTreeOps.c in Sources */, + 4DFD9457153600060039B6BA /* CatalogCheck.c in Sources */, + 4DFD945A153600060039B6BA /* HardLinkCheck.c in Sources */, + 4DFD9459153600060039B6BA /* dirhardlink.c in Sources */, + 4DFD945E153600060039B6BA /* SBTree.c in Sources */, + 4DFD9460153600060039B6BA /* SControl.c in Sources */, + 4DFD9468153600060039B6BA /* SVerify1.c in Sources */, + 4DFD9469153600060039B6BA /* SVerify2.c in Sources */, + 4DFD9465153600060039B6BA /* SRepair.c in Sources */, + 4DFD9464153600060039B6BA /* SRebuildBTree.c in Sources */, + 4DFD9467153600060039B6BA /* SUtils.c in Sources */, + 4DFD9463153600060039B6BA /* SKeyCompare.c in Sources */, + 4DFD9461153600060039B6BA /* SDevice.c in Sources */, + 4DFD9462153600060039B6BA /* SExtents.c in Sources */, + 4DFD945D153600060039B6BA /* SAllocate.c in Sources */, + 4DFD945F153600060039B6BA /* SCatalog.c in Sources */, + 4DFD9466153600060039B6BA /* SStubs.c in Sources */, + 4DFD946B153600060039B6BA /* VolumeBitmapCheck.c in Sources */, + 4DFD946A153600060039B6BA /* uuid.c in Sources */, + 7279A68D1593AA5C00192947 /* fsck_journal.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4DFD94A4153649070039B6BA /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4DFD94A5153649070039B6BA /* newfs_hfs.c in Sources */, + 4DFD94A6153649070039B6BA /* makehfs.c in Sources */, + 4DFD94A7153649070039B6BA /* hfs_endian.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4DFD94BD15373C2C0039B6BA /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4DFD94C215373C2C0039B6BA /* fsck_messages.c in Sources */, + 4DFD94C315373C2C0039B6BA /* fsck_strings.c in Sources */, + 4DFD94C415373C2C0039B6BA /* fsck_hfs_strings.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; 8DD76FAB0486AB0100D96B5E /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -299,56 +1058,187 @@ /* End PBXSourcesBuildPhase section */ /* Begin PBXTargetDependency section */ - FDD9FA3C14A132F10043D4A9 /* PBXTargetDependency */ = { + 4DBD523F1548A499007AA736 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4DFD95111537402A0039B6BA /* hfs.fs */; + targetProxy = 4DBD523E1548A499007AA736 /* PBXContainerItemProxy */; + }; + 4DBD52411548A49A007AA736 /* PBXTargetDependency */ = { isa = PBXTargetDependency; - target = 8DD76FA90486AB0100D96B5E /* hfs_util */; - targetProxy = FDD9FA3B14A132F10043D4A9 /* PBXContainerItemProxy */; + target = 8DD76FA90486AB0100D96B5E /* hfs.util */; + targetProxy = 4DBD52401548A49A007AA736 /* PBXContainerItemProxy */; }; - FDD9FA3E14A132F20043D4A9 /* PBXTargetDependency */ = { + 4DBD52431548A49D007AA736 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = FDD9FA2B14A132BF0043D4A9 /* CopyHFSMeta */; - targetProxy = FDD9FA3D14A132F20043D4A9 /* PBXContainerItemProxy */; + targetProxy = 4DBD52421548A49D007AA736 /* PBXContainerItemProxy */; + }; + 4DBD52451548A4A0007AA736 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4D0E899B1534FE65004CD678 /* mount_hfs */; + targetProxy = 4DBD52441548A4A0007AA736 /* PBXContainerItemProxy */; + }; + 4DBD52471548A4A2007AA736 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4DE6C75A153504C100C11066 /* newfs_hfs */; + targetProxy = 4DBD52461548A4A2007AA736 /* PBXContainerItemProxy */; + }; + 4DBD52491548A4A4007AA736 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4DFD94A3153649070039B6BA /* newfs_hfs_debug */; + targetProxy = 4DBD52481548A4A4007AA736 /* PBXContainerItemProxy */; + }; + 4DBD524B1548A4A7007AA736 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4DFD93F31535FF510039B6BA /* fsck_hfs */; + targetProxy = 4DBD524A1548A4A7007AA736 /* PBXContainerItemProxy */; + }; + 4DBD524D1548A4AA007AA736 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4D07DCB71538EF3A002B57CB /* fstyp_hfs */; + targetProxy = 4DBD524C1548A4AA007AA736 /* PBXContainerItemProxy */; + }; + 4DBD524F1548A4C8007AA736 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4DBD523B1548A488007AA736 /* Common */; + targetProxy = 4DBD524E1548A4C8007AA736 /* PBXContainerItemProxy */; + }; + 4DBD52511548A4D2007AA736 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4DBD523B1548A488007AA736 /* Common */; + targetProxy = 4DBD52501548A4D2007AA736 /* PBXContainerItemProxy */; + }; + 4DBD52531548A4D4007AA736 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4DFD94BC15373C2C0039B6BA /* fsck_makestrings */; + targetProxy = 4DBD52521548A4D4007AA736 /* PBXContainerItemProxy */; }; /* End PBXTargetDependency section */ +/* Begin PBXVariantGroup section */ + 4DFD9536153746210039B6BA /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + 4DFD9537153746210039B6BA /* English */, + ); + name = InfoPlist.strings; + sourceTree = "<group>"; + }; +/* End PBXVariantGroup section */ + /* Begin XCBuildConfiguration section */ 1DEB928708733DD80010E9CD /* Release */ = { isa = XCBuildConfiguration; buildSettings = { - INSTALL_PATH = /System/Library/Filesystems/hfs.fs; + INSTALL_PATH = $FS_BUNDLE_BIN_PATH; PRODUCT_NAME = hfs.util; }; name = Release; }; 1DEB928B08733DD80010E9CD /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = 4DFD953D15377C7D0039B6BA /* hfs.xcconfig */; buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - CODE_SIGN_IDENTITY = "-"; - CURRENT_PROJECT_VERSION = "$(RC_ProjectSourceVersion)"; - DEAD_CODE_STRIPPING = YES; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - PROVISIONING_PROFILE = ""; - USE_HEADERMAP = NO; - VERSIONING_SYSTEM = "apple-generic"; - WARNING_CFLAGS = ( - "-Wall", - "-W", - ); }; name = Release; }; - FDD9FA3414A132BF0043D4A9 /* Release */ = { + 4D07DCBF1538EF3A002B57CB /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + INSTALL_PATH = /sbin; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 4D0E89A41534FE65004CD678 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + EXCLUDED_SOURCE_FILE_NAMES = ""; + "EXCLUDED_SOURCE_FILE_NAMES[sdk=iphoneos*]" = optical.c; + "EXCLUDED_SOURCE_FILE_NAMES[sdk=iphonesimulator*]" = optical.c; + INSTALL_PATH = $FS_BUNDLE_BIN_PATH; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 4DBD523D1548A488007AA736 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 4DD302591538DB2700001AA0 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 4DD3025C1538DB3A00001AA0 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 4DE6C763153504C100C11066 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + INSTALL_PATH = $FS_BUNDLE_BIN_PATH; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 4DFD93FB1535FF510039B6BA /* Release */ = { isa = XCBuildConfiguration; buildSettings = { - INSTALL_PATH = /System/Library/Filesystems/hfs.fs/Contents/Resources; + GCC_PREPROCESSOR_DEFINITIONS = ( + "BSD=1", + "CONFIG_HFS_TRIM=1", + "DEBUG_BUILD=0", + ); + INSTALL_PATH = $FS_BUNDLE_BIN_PATH; PRODUCT_NAME = "$(TARGET_NAME)"; }; name = Release; }; - FDD9FA3A14A132E40043D4A9 /* Release */ = { + 4DFD94AE153649070039B6BA /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_PREPROCESSOR_DEFINITIONS = "DEBUG_BUILD=1"; + PRODUCT_NAME = newfs_hfs_debug; + }; + name = Release; + }; + 4DFD94E515373C2C0039B6BA /* Release */ = { isa = XCBuildConfiguration; buildSettings = { + ARCHS = "$(NATIVE_ARCH_ACTUAL)"; + GCC_PREPROCESSOR_DEFINITIONS = ( + "BSD=1", + "FSCK_MAKESTRINGS=1", + ); + PRODUCT_NAME = fsck_makestrings; + SKIP_INSTALL = YES; + }; + name = Release; + }; + 4DFD951C1537402A0039B6BA /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + INFOPLIST_FILE = fs/Info.plist; + INSTALL_PATH = /System/Library/Filesystems; + PRODUCT_NAME = hfs; + STRINGS_FILE_OUTPUT_ENCODING = "UTF-8"; + WRAPPER_EXTENSION = fs; + }; + name = Release; + }; + FDD9FA3414A132BF0043D4A9 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + INSTALL_PATH = $FS_BUNDLE_BIN_PATH; PRODUCT_NAME = "$(TARGET_NAME)"; }; name = Release; @@ -356,7 +1246,7 @@ /* End XCBuildConfiguration section */ /* Begin XCConfigurationList section */ - 1DEB928508733DD80010E9CD /* Build configuration list for PBXNativeTarget "hfs_util" */ = { + 1DEB928508733DD80010E9CD /* Build configuration list for PBXNativeTarget "hfs.util" */ = { isa = XCConfigurationList; buildConfigurations = ( 1DEB928708733DD80010E9CD /* Release */, @@ -372,18 +1262,90 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - FDD9FA3514A132BF0043D4A9 /* Build configuration list for PBXNativeTarget "CopyHFSMeta" */ = { + 4D07DCC01538EF3A002B57CB /* Build configuration list for PBXNativeTarget "fstyp_hfs" */ = { isa = XCConfigurationList; buildConfigurations = ( - FDD9FA3414A132BF0043D4A9 /* Release */, + 4D07DCBF1538EF3A002B57CB /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4D0E89A31534FE65004CD678 /* Build configuration list for PBXNativeTarget "mount_hfs" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4D0E89A41534FE65004CD678 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4DBD523C1548A488007AA736 /* Build configuration list for PBXAggregateTarget "Common" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4DBD523D1548A488007AA736 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4DD302581538DB2700001AA0 /* Build configuration list for PBXAggregateTarget "All_MacOSX" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4DD302591538DB2700001AA0 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4DD3025B1538DB3A00001AA0 /* Build configuration list for PBXAggregateTarget "All_iOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4DD3025C1538DB3A00001AA0 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4DE6C762153504C100C11066 /* Build configuration list for PBXNativeTarget "newfs_hfs" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4DE6C763153504C100C11066 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4DFD93FC1535FF510039B6BA /* Build configuration list for PBXNativeTarget "fsck_hfs" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4DFD93FB1535FF510039B6BA /* Release */, ); defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - FDD9FA3814A132E40043D4A9 /* Build configuration list for PBXAggregateTarget "hfs" */ = { + 4DFD94AD153649070039B6BA /* Build configuration list for PBXNativeTarget "newfs_hfs_debug" */ = { isa = XCConfigurationList; buildConfigurations = ( - FDD9FA3A14A132E40043D4A9 /* Release */, + 4DFD94AE153649070039B6BA /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4DFD94E415373C2C0039B6BA /* Build configuration list for PBXNativeTarget "fsck_makestrings" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4DFD94E515373C2C0039B6BA /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4DFD951B1537402A0039B6BA /* Build configuration list for PBXNativeTarget "hfs.fs" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4DFD951C1537402A0039B6BA /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FDD9FA3514A132BF0043D4A9 /* Build configuration list for PBXNativeTarget "CopyHFSMeta" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FDD9FA3414A132BF0043D4A9 /* Release */, ); defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; diff --git a/hfs.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/hfs.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 0000000..b0c7fde --- /dev/null +++ b/hfs.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Workspace + version = "1.0"> + <FileRef + location = "self:hfs.xcodeproj"> + </FileRef> +</Workspace> diff --git a/hfs.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings b/hfs.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings new file mode 100644 index 0000000..08de0be --- /dev/null +++ b/hfs.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>IDEWorkspaceSharedSettings_AutocreateContextsIfNeeded</key> + <false/> +</dict> +</plist> diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/All_MacOSX.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/All_MacOSX.xcscheme new file mode 100644 index 0000000..408a19d --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/All_MacOSX.xcscheme @@ -0,0 +1,62 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Scheme + LastUpgradeVersion = "0440" + version = "1.7"> + <BuildAction + parallelizeBuildables = "YES" + buildImplicitDependencies = "YES"> + <BuildActionEntries> + <BuildActionEntry + buildForTesting = "YES" + buildForRunning = "YES" + buildForProfiling = "YES" + buildForArchiving = "YES" + buildForAnalyzing = "YES"> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DD302571538DB2700001AA0" + BuildableName = "All_MacOSX" + BlueprintName = "All_MacOSX" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildActionEntry> + </BuildActionEntries> + </BuildAction> + <TestAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + shouldUseLaunchSchemeArgsEnv = "YES" + buildConfiguration = "Release"> + <Testables> + </Testables> + </TestAction> + <LaunchAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + launchStyle = "0" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + ignoresPersistentStateOnLaunch = "NO" + debugDocumentVersioning = "YES" + allowLocationSimulation = "YES"> + <AdditionalOptions> + </AdditionalOptions> + </LaunchAction> + <ProfileAction + shouldUseLaunchSchemeArgsEnv = "YES" + savedToolIdentifier = "" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + debugDocumentVersioning = "YES"> + </ProfileAction> + <AnalyzeAction + buildConfiguration = "Release"> + </AnalyzeAction> + <ArchiveAction + buildConfiguration = "Release" + revealArchiveInOrganizer = "YES"> + </ArchiveAction> + <InstallAction + buildConfiguration = "Release"> + </InstallAction> +</Scheme> diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/All_iOS.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/All_iOS.xcscheme new file mode 100644 index 0000000..f044f4b --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/All_iOS.xcscheme @@ -0,0 +1,62 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Scheme + LastUpgradeVersion = "0440" + version = "1.7"> + <BuildAction + parallelizeBuildables = "YES" + buildImplicitDependencies = "YES"> + <BuildActionEntries> + <BuildActionEntry + buildForTesting = "YES" + buildForRunning = "YES" + buildForProfiling = "YES" + buildForArchiving = "YES" + buildForAnalyzing = "YES"> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DD3025A1538DB3A00001AA0" + BuildableName = "All_iOS" + BlueprintName = "All_iOS" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildActionEntry> + </BuildActionEntries> + </BuildAction> + <TestAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + shouldUseLaunchSchemeArgsEnv = "YES" + buildConfiguration = "Release"> + <Testables> + </Testables> + </TestAction> + <LaunchAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + launchStyle = "0" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + ignoresPersistentStateOnLaunch = "NO" + debugDocumentVersioning = "YES" + allowLocationSimulation = "YES"> + <AdditionalOptions> + </AdditionalOptions> + </LaunchAction> + <ProfileAction + shouldUseLaunchSchemeArgsEnv = "YES" + savedToolIdentifier = "" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + debugDocumentVersioning = "YES"> + </ProfileAction> + <AnalyzeAction + buildConfiguration = "Release"> + </AnalyzeAction> + <ArchiveAction + buildConfiguration = "Release" + revealArchiveInOrganizer = "YES"> + </ArchiveAction> + <InstallAction + buildConfiguration = "Release"> + </InstallAction> +</Scheme> diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/CopyHFSMeta.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/CopyHFSMeta.xcscheme new file mode 100644 index 0000000..0beac40 --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/CopyHFSMeta.xcscheme @@ -0,0 +1,89 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Scheme + LastUpgradeVersion = "0440" + version = "1.7"> + <BuildAction + parallelizeBuildables = "YES" + buildImplicitDependencies = "YES"> + <BuildActionEntries> + <BuildActionEntry + buildForTesting = "YES" + buildForRunning = "YES" + buildForProfiling = "YES" + buildForArchiving = "YES" + buildForAnalyzing = "YES"> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "FDD9FA2B14A132BF0043D4A9" + BuildableName = "CopyHFSMeta" + BlueprintName = "CopyHFSMeta" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildActionEntry> + </BuildActionEntries> + </BuildAction> + <TestAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + shouldUseLaunchSchemeArgsEnv = "YES" + buildConfiguration = "Release"> + <Testables> + </Testables> + <MacroExpansion> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "FDD9FA2B14A132BF0043D4A9" + BuildableName = "CopyHFSMeta" + BlueprintName = "CopyHFSMeta" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </MacroExpansion> + </TestAction> + <LaunchAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + launchStyle = "0" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + ignoresPersistentStateOnLaunch = "NO" + debugDocumentVersioning = "YES" + allowLocationSimulation = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "FDD9FA2B14A132BF0043D4A9" + BuildableName = "CopyHFSMeta" + BlueprintName = "CopyHFSMeta" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + <AdditionalOptions> + </AdditionalOptions> + </LaunchAction> + <ProfileAction + shouldUseLaunchSchemeArgsEnv = "YES" + savedToolIdentifier = "" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + debugDocumentVersioning = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "FDD9FA2B14A132BF0043D4A9" + BuildableName = "CopyHFSMeta" + BlueprintName = "CopyHFSMeta" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + </ProfileAction> + <AnalyzeAction + buildConfiguration = "Release"> + </AnalyzeAction> + <ArchiveAction + buildConfiguration = "Release" + revealArchiveInOrganizer = "YES"> + </ArchiveAction> + <InstallAction + buildConfiguration = "Release"> + </InstallAction> +</Scheme> diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/fsck_hfs.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/fsck_hfs.xcscheme new file mode 100644 index 0000000..fb61635 --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/fsck_hfs.xcscheme @@ -0,0 +1,89 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Scheme + LastUpgradeVersion = "0440" + version = "1.7"> + <BuildAction + parallelizeBuildables = "YES" + buildImplicitDependencies = "YES"> + <BuildActionEntries> + <BuildActionEntry + buildForTesting = "YES" + buildForRunning = "YES" + buildForProfiling = "YES" + buildForArchiving = "YES" + buildForAnalyzing = "YES"> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD93F31535FF510039B6BA" + BuildableName = "fsck_hfs" + BlueprintName = "fsck_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildActionEntry> + </BuildActionEntries> + </BuildAction> + <TestAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + shouldUseLaunchSchemeArgsEnv = "YES" + buildConfiguration = "Release"> + <Testables> + </Testables> + <MacroExpansion> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD93F31535FF510039B6BA" + BuildableName = "fsck_hfs" + BlueprintName = "fsck_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </MacroExpansion> + </TestAction> + <LaunchAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + launchStyle = "0" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + ignoresPersistentStateOnLaunch = "NO" + debugDocumentVersioning = "YES" + allowLocationSimulation = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD93F31535FF510039B6BA" + BuildableName = "fsck_hfs" + BlueprintName = "fsck_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + <AdditionalOptions> + </AdditionalOptions> + </LaunchAction> + <ProfileAction + shouldUseLaunchSchemeArgsEnv = "YES" + savedToolIdentifier = "" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + debugDocumentVersioning = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD93F31535FF510039B6BA" + BuildableName = "fsck_hfs" + BlueprintName = "fsck_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + </ProfileAction> + <AnalyzeAction + buildConfiguration = "Release"> + </AnalyzeAction> + <ArchiveAction + buildConfiguration = "Release" + revealArchiveInOrganizer = "YES"> + </ArchiveAction> + <InstallAction + buildConfiguration = "Release"> + </InstallAction> +</Scheme> diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/fsck_makestrings.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/fsck_makestrings.xcscheme new file mode 100644 index 0000000..206b5af --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/fsck_makestrings.xcscheme @@ -0,0 +1,89 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Scheme + LastUpgradeVersion = "0440" + version = "1.7"> + <BuildAction + parallelizeBuildables = "YES" + buildImplicitDependencies = "YES"> + <BuildActionEntries> + <BuildActionEntry + buildForTesting = "YES" + buildForRunning = "YES" + buildForProfiling = "YES" + buildForArchiving = "YES" + buildForAnalyzing = "YES"> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD94BC15373C2C0039B6BA" + BuildableName = "fsck_makestrings" + BlueprintName = "fsck_makestrings" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildActionEntry> + </BuildActionEntries> + </BuildAction> + <TestAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + shouldUseLaunchSchemeArgsEnv = "YES" + buildConfiguration = "Release"> + <Testables> + </Testables> + <MacroExpansion> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD94BC15373C2C0039B6BA" + BuildableName = "fsck_makestrings" + BlueprintName = "fsck_makestrings" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </MacroExpansion> + </TestAction> + <LaunchAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + launchStyle = "0" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + ignoresPersistentStateOnLaunch = "NO" + debugDocumentVersioning = "YES" + allowLocationSimulation = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD94BC15373C2C0039B6BA" + BuildableName = "fsck_makestrings" + BlueprintName = "fsck_makestrings" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + <AdditionalOptions> + </AdditionalOptions> + </LaunchAction> + <ProfileAction + shouldUseLaunchSchemeArgsEnv = "YES" + savedToolIdentifier = "" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + debugDocumentVersioning = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD94BC15373C2C0039B6BA" + BuildableName = "fsck_makestrings" + BlueprintName = "fsck_makestrings" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + </ProfileAction> + <AnalyzeAction + buildConfiguration = "Release"> + </AnalyzeAction> + <ArchiveAction + buildConfiguration = "Release" + revealArchiveInOrganizer = "YES"> + </ArchiveAction> + <InstallAction + buildConfiguration = "Release"> + </InstallAction> +</Scheme> diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/hfs.fs.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/hfs.fs.xcscheme new file mode 100644 index 0000000..7092565 --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/hfs.fs.xcscheme @@ -0,0 +1,62 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Scheme + LastUpgradeVersion = "0440" + version = "1.7"> + <BuildAction + parallelizeBuildables = "YES" + buildImplicitDependencies = "YES"> + <BuildActionEntries> + <BuildActionEntry + buildForTesting = "YES" + buildForRunning = "YES" + buildForProfiling = "YES" + buildForArchiving = "YES" + buildForAnalyzing = "YES"> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD95111537402A0039B6BA" + BuildableName = "hfs.fs" + BlueprintName = "hfs.fs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildActionEntry> + </BuildActionEntries> + </BuildAction> + <TestAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + shouldUseLaunchSchemeArgsEnv = "YES" + buildConfiguration = "Release"> + <Testables> + </Testables> + </TestAction> + <LaunchAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + launchStyle = "0" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + ignoresPersistentStateOnLaunch = "NO" + debugDocumentVersioning = "YES" + allowLocationSimulation = "YES"> + <AdditionalOptions> + </AdditionalOptions> + </LaunchAction> + <ProfileAction + shouldUseLaunchSchemeArgsEnv = "YES" + savedToolIdentifier = "" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + debugDocumentVersioning = "YES"> + </ProfileAction> + <AnalyzeAction + buildConfiguration = "Release"> + </AnalyzeAction> + <ArchiveAction + buildConfiguration = "Release" + revealArchiveInOrganizer = "YES"> + </ArchiveAction> + <InstallAction + buildConfiguration = "Release"> + </InstallAction> +</Scheme> diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/hfs_util.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/hfs_util.xcscheme new file mode 100644 index 0000000..4fe8abf --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/hfs_util.xcscheme @@ -0,0 +1,89 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Scheme + LastUpgradeVersion = "0440" + version = "1.7"> + <BuildAction + parallelizeBuildables = "YES" + buildImplicitDependencies = "YES"> + <BuildActionEntries> + <BuildActionEntry + buildForTesting = "YES" + buildForRunning = "YES" + buildForProfiling = "YES" + buildForArchiving = "YES" + buildForAnalyzing = "YES"> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "8DD76FA90486AB0100D96B5E" + BuildableName = "hfs.util" + BlueprintName = "hfs.util" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildActionEntry> + </BuildActionEntries> + </BuildAction> + <TestAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + shouldUseLaunchSchemeArgsEnv = "YES" + buildConfiguration = "Release"> + <Testables> + </Testables> + <MacroExpansion> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "8DD76FA90486AB0100D96B5E" + BuildableName = "hfs.util" + BlueprintName = "hfs.util" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </MacroExpansion> + </TestAction> + <LaunchAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + launchStyle = "0" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + ignoresPersistentStateOnLaunch = "NO" + debugDocumentVersioning = "YES" + allowLocationSimulation = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "8DD76FA90486AB0100D96B5E" + BuildableName = "hfs.util" + BlueprintName = "hfs.util" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + <AdditionalOptions> + </AdditionalOptions> + </LaunchAction> + <ProfileAction + shouldUseLaunchSchemeArgsEnv = "YES" + savedToolIdentifier = "" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + debugDocumentVersioning = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "8DD76FA90486AB0100D96B5E" + BuildableName = "hfs.util" + BlueprintName = "hfs.util" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + </ProfileAction> + <AnalyzeAction + buildConfiguration = "Release"> + </AnalyzeAction> + <ArchiveAction + buildConfiguration = "Release" + revealArchiveInOrganizer = "YES"> + </ArchiveAction> + <InstallAction + buildConfiguration = "Release"> + </InstallAction> +</Scheme> diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/mount_hfs.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/mount_hfs.xcscheme new file mode 100644 index 0000000..d1b328f --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/mount_hfs.xcscheme @@ -0,0 +1,89 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Scheme + LastUpgradeVersion = "0440" + version = "1.7"> + <BuildAction + parallelizeBuildables = "YES" + buildImplicitDependencies = "YES"> + <BuildActionEntries> + <BuildActionEntry + buildForTesting = "YES" + buildForRunning = "YES" + buildForProfiling = "YES" + buildForArchiving = "YES" + buildForAnalyzing = "YES"> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4D0E899B1534FE65004CD678" + BuildableName = "mount_hfs" + BlueprintName = "mount_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildActionEntry> + </BuildActionEntries> + </BuildAction> + <TestAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + shouldUseLaunchSchemeArgsEnv = "YES" + buildConfiguration = "Release"> + <Testables> + </Testables> + <MacroExpansion> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4D0E899B1534FE65004CD678" + BuildableName = "mount_hfs" + BlueprintName = "mount_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </MacroExpansion> + </TestAction> + <LaunchAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + launchStyle = "0" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + ignoresPersistentStateOnLaunch = "NO" + debugDocumentVersioning = "YES" + allowLocationSimulation = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4D0E899B1534FE65004CD678" + BuildableName = "mount_hfs" + BlueprintName = "mount_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + <AdditionalOptions> + </AdditionalOptions> + </LaunchAction> + <ProfileAction + shouldUseLaunchSchemeArgsEnv = "YES" + savedToolIdentifier = "" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + debugDocumentVersioning = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4D0E899B1534FE65004CD678" + BuildableName = "mount_hfs" + BlueprintName = "mount_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + </ProfileAction> + <AnalyzeAction + buildConfiguration = "Release"> + </AnalyzeAction> + <ArchiveAction + buildConfiguration = "Release" + revealArchiveInOrganizer = "YES"> + </ArchiveAction> + <InstallAction + buildConfiguration = "Release"> + </InstallAction> +</Scheme> diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/newfs_hfs.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/newfs_hfs.xcscheme new file mode 100644 index 0000000..191648d --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/newfs_hfs.xcscheme @@ -0,0 +1,89 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Scheme + LastUpgradeVersion = "0440" + version = "1.7"> + <BuildAction + parallelizeBuildables = "YES" + buildImplicitDependencies = "YES"> + <BuildActionEntries> + <BuildActionEntry + buildForTesting = "YES" + buildForRunning = "YES" + buildForProfiling = "YES" + buildForArchiving = "YES" + buildForAnalyzing = "YES"> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DE6C75A153504C100C11066" + BuildableName = "newfs_hfs" + BlueprintName = "newfs_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildActionEntry> + </BuildActionEntries> + </BuildAction> + <TestAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + shouldUseLaunchSchemeArgsEnv = "YES" + buildConfiguration = "Release"> + <Testables> + </Testables> + <MacroExpansion> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DE6C75A153504C100C11066" + BuildableName = "newfs_hfs" + BlueprintName = "newfs_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </MacroExpansion> + </TestAction> + <LaunchAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + launchStyle = "0" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + ignoresPersistentStateOnLaunch = "NO" + debugDocumentVersioning = "YES" + allowLocationSimulation = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DE6C75A153504C100C11066" + BuildableName = "newfs_hfs" + BlueprintName = "newfs_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + <AdditionalOptions> + </AdditionalOptions> + </LaunchAction> + <ProfileAction + shouldUseLaunchSchemeArgsEnv = "YES" + savedToolIdentifier = "" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + debugDocumentVersioning = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DE6C75A153504C100C11066" + BuildableName = "newfs_hfs" + BlueprintName = "newfs_hfs" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + </ProfileAction> + <AnalyzeAction + buildConfiguration = "Release"> + </AnalyzeAction> + <ArchiveAction + buildConfiguration = "Release" + revealArchiveInOrganizer = "YES"> + </ArchiveAction> + <InstallAction + buildConfiguration = "Release"> + </InstallAction> +</Scheme> diff --git a/hfs.xcodeproj/xcshareddata/xcschemes/newfs_hfs_debug.xcscheme b/hfs.xcodeproj/xcshareddata/xcschemes/newfs_hfs_debug.xcscheme new file mode 100644 index 0000000..c2d4e26 --- /dev/null +++ b/hfs.xcodeproj/xcshareddata/xcschemes/newfs_hfs_debug.xcscheme @@ -0,0 +1,89 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Scheme + LastUpgradeVersion = "0440" + version = "1.7"> + <BuildAction + parallelizeBuildables = "YES" + buildImplicitDependencies = "YES"> + <BuildActionEntries> + <BuildActionEntry + buildForTesting = "YES" + buildForRunning = "YES" + buildForProfiling = "YES" + buildForArchiving = "YES" + buildForAnalyzing = "YES"> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD94A3153649070039B6BA" + BuildableName = "newfs_hfs_debug" + BlueprintName = "newfs_hfs_debug" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildActionEntry> + </BuildActionEntries> + </BuildAction> + <TestAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + shouldUseLaunchSchemeArgsEnv = "YES" + buildConfiguration = "Release"> + <Testables> + </Testables> + <MacroExpansion> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD94A3153649070039B6BA" + BuildableName = "newfs_hfs_debug" + BlueprintName = "newfs_hfs_debug" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </MacroExpansion> + </TestAction> + <LaunchAction + selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" + selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + launchStyle = "0" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + ignoresPersistentStateOnLaunch = "NO" + debugDocumentVersioning = "YES" + allowLocationSimulation = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD94A3153649070039B6BA" + BuildableName = "newfs_hfs_debug" + BlueprintName = "newfs_hfs_debug" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + <AdditionalOptions> + </AdditionalOptions> + </LaunchAction> + <ProfileAction + shouldUseLaunchSchemeArgsEnv = "YES" + savedToolIdentifier = "" + useCustomWorkingDirectory = "NO" + buildConfiguration = "Release" + debugDocumentVersioning = "YES"> + <BuildableProductRunnable> + <BuildableReference + BuildableIdentifier = "primary" + BlueprintIdentifier = "4DFD94A3153649070039B6BA" + BuildableName = "newfs_hfs_debug" + BlueprintName = "newfs_hfs_debug" + ReferencedContainer = "container:hfs.xcodeproj"> + </BuildableReference> + </BuildableProductRunnable> + </ProfileAction> + <AnalyzeAction + buildConfiguration = "Release"> + </AnalyzeAction> + <ArchiveAction + buildConfiguration = "Release" + revealArchiveInOrganizer = "YES"> + </ArchiveAction> + <InstallAction + buildConfiguration = "Release"> + </InstallAction> +</Scheme> diff --git a/hfs_util/PkgInfo b/hfs_util/PkgInfo deleted file mode 100644 index b2c4b86..0000000 --- a/hfs_util/PkgInfo +++ /dev/null @@ -1 +0,0 @@ -fs ???? \ No newline at end of file diff --git a/hfs_util/hfs.util.8 b/hfs_util/hfs.util.8 index 05efbcd..45fa380 100644 --- a/hfs_util/hfs.util.8 +++ b/hfs_util/hfs.util.8 @@ -138,7 +138,6 @@ Note that for the .Ar device references above, you must only supply the last component of the path to the device in question, such as disk0s2 rather than /dev/disk0s2. - .Sh SEE ALSO .Xr diskarbitrationd 8 .Sh HISTORY diff --git a/hfs_util/hfsutil_jnl.c b/hfs_util/hfsutil_jnl.c index c9fcae7..aab17de 100644 --- a/hfs_util/hfsutil_jnl.c +++ b/hfs_util/hfsutil_jnl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2009 Apple Inc. All rights reserved. + * Copyright (c) 1999-2012 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -340,287 +340,298 @@ get_embedded_offset(char *devname) static const char *journal_fname = ".journal"; static const char *jib_fname = ".journal_info_block"; -int -DoMakeJournaled(char *volname, int jsize) -{ - int fd, i, block_size, journal_size = 8*1024*1024; - char *buf; - int ret; - fstore_t fst; - int32_t jstart_block, jinfo_block; - int sysctl_info[8]; - JournalInfoBlock jib; - struct statfs sfs; - static char tmpname[MAXPATHLEN]; - off_t start_block, embedded_offset; +int +DoMakeJournaled(char *volname, int jsize) { + int fd, i, block_size, journal_size = 8*1024*1024; + char *buf; + int ret; + fstore_t fst; + int32_t jstart_block, jinfo_block; + int sysctl_info[8]; + JournalInfoBlock jib; + struct statfs sfs; + static char tmpname[MAXPATHLEN]; + off_t start_block, embedded_offset; + + if (statfs(volname, &sfs) != 0) { + fprintf(stderr, "Can't stat volume %s (%s).\n", volname, strerror(errno)); + return 10; + } - if (statfs(volname, &sfs) != 0) { - fprintf(stderr, "Can't stat volume %s (%s).\n", volname, strerror(errno)); - return 10; - } + // Make sure that we're HFS+. First we check the fstypename. + // If that's ok then we try to create a symlink (which won't + // work on plain hfs volumes but will work on hfs+ volumes). + // + if (strcmp(sfs.f_fstypename, "devfs") == 0) { + fprintf (stderr, "%s is a device node. Journal enable only works on a mounted HFS+ volume.\n", volname); + return 10; + } + snprintf(tmpname, sizeof(tmpname), "%s/is_vol_hfs_plus", volname); + if (strcmp(sfs.f_fstypename, "hfs") != 0 || + ((ret = symlink(tmpname, tmpname)) != 0 && errno == ENOTSUP)) { + fprintf(stderr, "%s is not an HFS+ volume. Journaling only works on HFS+ volumes.\n", + volname); + return 10; + } + unlink(tmpname); + + if (sfs.f_flags & MNT_JOURNALED) { + fprintf(stderr, "Volume %s is already journaled.\n", volname); + return 1; + } + + if (jsize != 0) { + journal_size = jsize; + } else { + int scale; + + // + // we want at least 8 megs of journal for each 100 gigs of + // disk space. We cap the size at 512 megs though. + // + scale = ((long long)sfs.f_bsize * (long long)((unsigned int)sfs.f_blocks)) / (100*1024*1024*1024ULL); + journal_size *= (scale + 1); + if (journal_size > 512 * 1024 * 1024) { + journal_size = 512 * 1024 * 1024; + } + } - // Make sure that we're HFS+. First we check the fstypename. - // If that's ok then we try to create a symlink (which won't - // work on plain hfs volumes but will work on hfs+ volumes). - // - if (strcmp(sfs.f_fstypename, "devfs") == 0) { - fprintf (stderr, "%s is a device node. Journal enable only works on a mounted HFS+ volume.\n", volname); + if (chdir(volname) != 0) { + fprintf(stderr, "Can't locate volume %s to make it journaled (%s).\n", + volname, strerror(errno)); return 10; - } - snprintf(tmpname, sizeof(tmpname), "%s/is_vol_hfs_plus", volname); - if (strcmp(sfs.f_fstypename, "hfs") != 0 || - ((ret = symlink(tmpname, tmpname)) != 0 && errno == ENOTSUP)) { - fprintf(stderr, "%s is not an HFS+ volume. Journaling only works on HFS+ volumes.\n", - volname); - return 10; - } - unlink(tmpname); + } - if (sfs.f_flags & MNT_JOURNALED) { - fprintf(stderr, "Volume %s is already journaled.\n", volname); - return 1; - } - if (jsize != 0) { - journal_size = jsize; - } else { - int scale; + embedded_offset = get_embedded_offset(volname); + if (embedded_offset < 0) { + fprintf(stderr, "Can't calculate the embedded offset (if any) for %s.\n", volname); + fprintf(stderr, "Journal creation failure.\n"); + return 15; + } + // printf("Embedded offset == 0x%llx\n", embedded_offset); - // - // we want at least 8 megs of journal for each 100 gigs of - // disk space. We cap the size at 512 megs though. - // - scale = ((long long)sfs.f_bsize * (long long)((unsigned int)sfs.f_blocks)) / (100*1024*1024*1024ULL); - journal_size *= (scale + 1); - if (journal_size > 512 * 1024 * 1024) { - journal_size = 512 * 1024 * 1024; + fd = open(journal_fname, O_CREAT|O_TRUNC|O_RDWR, 000); + if (fd < 0) { + fprintf(stderr, "Can't create journal file on volume %s (%s)\n", + volname, strerror(errno)); + return 5; } - } - if (chdir(volname) != 0) { - fprintf(stderr, "Can't locate volume %s to make it journaled (%s).\n", - volname, strerror(errno)); - return 10; - } + if (fcntl(fd, F_NOCACHE, 1)) { + fprintf(stderr, "Can't create journal file (NC) on volume %s (%s)\n", + volname, strerror(errno)); + return 5; + } - embedded_offset = get_embedded_offset(volname); - if (embedded_offset < 0) { - fprintf(stderr, "Can't calculate the embedded offset (if any) for %s.\n", volname); - fprintf(stderr, "Journal creation failure.\n"); - return 15; - } - // printf("Embedded offset == 0x%llx\n", embedded_offset); + // make sure that it has no r/w/x privs (only could happen if + // the file already existed since open() doesn't reset the mode + // bits). + // + fchmod(fd, 0); - fd = open(journal_fname, O_CREAT|O_TRUNC|O_RDWR, 000); - if (fd < 0) { - fprintf(stderr, "Can't create journal file on volume %s (%s)\n", - volname, strerror(errno)); - return 5; - } + block_size = sfs.f_bsize; + if ((journal_size % block_size) != 0) { + fprintf(stderr, "Journal size %dk is not a multiple of volume %s block size (%d).\n", + journal_size/1024, volname, block_size); + close(fd); + unlink(journal_fname); + return 5; + } - // make sure that it has no r/w/x privs (only could happen if - // the file already existed since open() doesn't reset the mode - // bits). - // - fchmod(fd, 0); +retry: + memset(&fst, 0, sizeof(fst)); + fst.fst_flags = F_ALLOCATECONTIG|F_ALLOCATEALL; + fst.fst_length = journal_size; + fst.fst_posmode = F_PEOFPOSMODE; + + ret = fcntl(fd, F_PREALLOCATE, &fst); + if (ret < 0) { + if (journal_size >= 2*1024*1024) { + fprintf(stderr, "Not enough contiguous space for a %d k journal. Retrying.\n", + journal_size/1024); + journal_size /= 2; + ftruncate(fd, 0); // make sure the file is zero bytes long. + goto retry; + } else { + fprintf(stderr, "Disk too fragmented to enable journaling.\n"); + fprintf(stderr, "Please run a defragmenter on %s.\n", volname); + close(fd); + unlink(journal_fname); + return 10; + } + } - block_size = sfs.f_bsize; - if ((journal_size % block_size) != 0) { - fprintf(stderr, "Journal size %dk is not a multiple of volume %s block size (%d).\n", - journal_size/1024, volname, block_size); - close(fd); - unlink(journal_fname); - return 5; - } + printf("Allocated %lldK for journal file.\n", fst.fst_bytesalloc/1024LL); + buf = (char *)calloc(block_size, 1); + if (buf) { + for(i=0; i < journal_size/block_size; i++) { + ret = write(fd, buf, block_size); + if (ret != block_size) { + break; + } + } - retry: - memset(&fst, 0, sizeof(fst)); - fst.fst_flags = F_ALLOCATECONTIG|F_ALLOCATEALL; - fst.fst_length = journal_size; - fst.fst_posmode = F_PEOFPOSMODE; - - ret = fcntl(fd, F_PREALLOCATE, &fst); - if (ret < 0) { - if (journal_size >= 2*1024*1024) { - fprintf(stderr, "Not enough contiguous space for a %d k journal. Retrying.\n", - journal_size/1024); - journal_size /= 2; - ftruncate(fd, 0); // make sure the file is zero bytes long. - goto retry; + if (i*block_size != journal_size) { + fprintf(stderr, "Failed to write %dk to journal on volume %s (%s)\n", + journal_size/1024, volname, strerror(errno)); + } } else { - fprintf(stderr, "Disk too fragmented to enable journaling.\n"); - fprintf(stderr, "Please run a defragmenter on %s.\n", volname); - close(fd); - unlink(journal_fname); - return 10; + printf("Could not allocate memory to write to the journal on volume %s (%s)\n", + volname, strerror(errno)); } - } - printf("Allocated %lldK for journal file.\n", fst.fst_bytesalloc/1024LL); - buf = (char *)calloc(block_size, 1); - if (buf) { - for(i=0; i < journal_size/block_size; i++) { - ret = write(fd, buf, block_size); - if (ret != block_size) { - break; - } + fsync(fd); + close(fd); + hide_file(journal_fname); + + start_block = get_start_block(journal_fname, block_size); + if (start_block == (off_t)-1) { + fprintf(stderr, "Failed to get start block for %s (%s)\n", + journal_fname, strerror(errno)); + unlink(journal_fname); + return 20; } - - if (i*block_size != journal_size) { - fprintf(stderr, "Failed to write %dk to journal on volume %s (%s)\n", - journal_size/1024, volname, strerror(errno)); + jstart_block = (start_block / block_size) - (embedded_offset / block_size); + + memset(&jib, 'Z', sizeof(jib)); + jib.flags = kJIJournalInFSMask; + jib.offset = (off_t)((unsigned int)jstart_block) * (off_t)((unsigned int)block_size); + jib.size = (off_t)((unsigned int)journal_size); + + fd = open(jib_fname, O_CREAT|O_TRUNC|O_RDWR, 000); + if (fd < 0) { + fprintf(stderr, "Could not create journal info block file on volume %s (%s)\n", + volname, strerror(errno)); + unlink(journal_fname); + return 5; } - } else { - printf("Could not allocate memory to write to the journal on volume %s (%s)\n", - volname, strerror(errno)); - } - fsync(fd); - close(fd); - hide_file(journal_fname); + if (fcntl(fd, F_NOCACHE, 1)) { + fprintf(stderr, "Could not create journal info block (NC) file on volume %s (%s)\n", + volname, strerror(errno)); + return 5; + } - start_block = get_start_block(journal_fname, block_size); - if (start_block == (off_t)-1) { - fprintf(stderr, "Failed to get start block for %s (%s)\n", - journal_fname, strerror(errno)); - unlink(journal_fname); - return 20; - } - jstart_block = (start_block / block_size) - (embedded_offset / block_size); + // swap the data before we copy it + jib.flags = OSSwapBigToHostInt32(jib.flags); + jib.offset = OSSwapBigToHostInt64(jib.offset); + jib.size = OSSwapBigToHostInt64(jib.size); - memset(&jib, 'Z', sizeof(jib)); - jib.flags = kJIJournalInFSMask; - jib.offset = (off_t)((unsigned int)jstart_block) * (off_t)((unsigned int)block_size); - jib.size = (off_t)((unsigned int)journal_size); + memcpy(buf, &jib, sizeof(jib)); - fd = open(jib_fname, O_CREAT|O_TRUNC|O_RDWR, 000); - if (fd < 0) { - fprintf(stderr, "Could not create journal info block file on volume %s (%s)\n", - volname, strerror(errno)); - unlink(journal_fname); - return 5; - } - - // swap the data before we copy it - jib.flags = OSSwapBigToHostInt32(jib.flags); - jib.offset = OSSwapBigToHostInt64(jib.offset); - jib.size = OSSwapBigToHostInt64(jib.size); - - memcpy(buf, &jib, sizeof(jib)); + // now put it back the way it was + jib.size = OSSwapBigToHostInt64(jib.size); + jib.offset = OSSwapBigToHostInt64(jib.offset); + jib.flags = OSSwapBigToHostInt32(jib.flags); - // now put it back the way it was - jib.size = OSSwapBigToHostInt64(jib.size); - jib.offset = OSSwapBigToHostInt64(jib.offset); - jib.flags = OSSwapBigToHostInt32(jib.flags); + if (write(fd, buf, block_size) != block_size) { + fprintf(stderr, "Failed to write journal info block on volume %s (%s)!\n", + volname, strerror(errno)); + unlink(journal_fname); + return 10; + } - if (write(fd, buf, block_size) != block_size) { - fprintf(stderr, "Failed to write journal info block on volume %s (%s)!\n", - volname, strerror(errno)); - unlink(journal_fname); - return 10; - } + fsync(fd); + close(fd); + hide_file(jib_fname); + + start_block = get_start_block(jib_fname, block_size); + if (start_block == (off_t)-1) { + fprintf(stderr, "Failed to get start block for %s (%s)\n", + jib_fname, strerror(errno)); + unlink(journal_fname); + unlink(jib_fname); + return 20; + } + jinfo_block = (start_block / block_size) - (embedded_offset / block_size); - fsync(fd); - close(fd); - hide_file(jib_fname); - - start_block = get_start_block(jib_fname, block_size); - if (start_block == (off_t)-1) { - fprintf(stderr, "Failed to get start block for %s (%s)\n", - jib_fname, strerror(errno)); - unlink(journal_fname); - unlink(jib_fname); - return 20; - } - jinfo_block = (start_block / block_size) - (embedded_offset / block_size); - - - // - // Now make the volume journaled! - // - memset(sysctl_info, 0, sizeof(sysctl_info)); - sysctl_info[0] = CTL_VFS; - sysctl_info[1] = sfs.f_fsid.val[1]; - sysctl_info[2] = HFS_ENABLE_JOURNALING; - sysctl_info[3] = jinfo_block; - sysctl_info[4] = jstart_block; - sysctl_info[5] = journal_size; - - //printf("fs type: 0x%x\n", sysctl_info[1]); - //printf("jinfo block : 0x%x\n", jinfo_block); - //printf("jstart block: 0x%x\n", jstart_block); - //printf("journal size: 0x%x\n", journal_size); - - ret = sysctl((void *)sysctl_info, 6, NULL, NULL, NULL, 0); - if (ret != 0) { - fprintf(stderr, "Failed to make volume %s journaled (%s)\n", - volname, strerror(errno)); - unlink(journal_fname); - unlink(jib_fname); - return 20; - } - - return 0; + + // + // Now make the volume journaled! + // + memset(sysctl_info, 0, sizeof(sysctl_info)); + sysctl_info[0] = CTL_VFS; + sysctl_info[1] = sfs.f_fsid.val[1]; + sysctl_info[2] = HFS_ENABLE_JOURNALING; + sysctl_info[3] = jinfo_block; + sysctl_info[4] = jstart_block; + sysctl_info[5] = journal_size; + + //printf("fs type: 0x%x\n", sysctl_info[1]); + //printf("jinfo block : 0x%x\n", jinfo_block); + //printf("jstart block: 0x%x\n", jstart_block); + //printf("journal size: 0x%x\n", journal_size); + + ret = sysctl((void *)sysctl_info, 6, NULL, NULL, NULL, 0); + if (ret != 0) { + fprintf(stderr, "Failed to make volume %s journaled (%s)\n", + volname, strerror(errno)); + unlink(journal_fname); + unlink(jib_fname); + return 20; + } + + return 0; } int -DoUnJournal(char *volname) -{ - int result; - int sysctl_info[8]; - struct statfs sfs; - char jbuf[MAXPATHLEN]; - - if (statfs(volname, &sfs) != 0) { - fprintf(stderr, "Can't stat volume %s (%s).\n", volname, strerror(errno)); - return 10; - } +DoUnJournal(char *volname) { + int result; + int sysctl_info[8]; + struct statfs sfs; + char jbuf[MAXPATHLEN]; + + if (statfs(volname, &sfs) != 0) { + fprintf(stderr, "Can't stat volume %s (%s).\n", volname, strerror(errno)); + return 10; + } - if (strcmp(sfs.f_fstypename, "hfs") != 0) { - fprintf(stderr, "Volume %s (%s) is not a HFS volume.\n", volname, sfs.f_mntfromname); + if (strcmp(sfs.f_fstypename, "hfs") != 0) { + fprintf(stderr, "Volume %s (%s) is not a HFS volume.\n", volname, sfs.f_mntfromname); return 1; - } + } - if ((sfs.f_flags & MNT_JOURNALED) == 0) { - fprintf(stderr, "Volume %s (%s) is not journaled.\n", volname, sfs.f_mntfromname); - return 1; - } + if ((sfs.f_flags & MNT_JOURNALED) == 0) { + fprintf(stderr, "Volume %s (%s) is not journaled.\n", volname, sfs.f_mntfromname); + return 1; + } - if (chdir(volname) != 0) { - fprintf(stderr, "Can't locate volume %s to turn off journaling (%s).\n", - volname, strerror(errno)); - return 10; - } - - memset(sysctl_info, 0, sizeof(sysctl_info)); - sysctl_info[0] = CTL_VFS; - sysctl_info[1] = sfs.f_fsid.val[1]; - sysctl_info[2] = HFS_DISABLE_JOURNALING; - - result = sysctl((void *)sysctl_info, 3, NULL, NULL, NULL, 0); - if (result != 0) { - fprintf(stderr, "Failed to make volume %s UN-journaled (%s)\n", - volname, strerror(errno)); - return 20; - } + if (chdir(volname) != 0) { + fprintf(stderr, "Can't locate volume %s to turn off journaling (%s).\n", + volname, strerror(errno)); + return 10; + } - snprintf(jbuf, sizeof(jbuf), "%s/%s", volname, journal_fname); - if (unlink(jbuf) != 0) { - fprintf(stderr, "Failed to remove the journal %s (%s)\n", - jbuf, strerror(errno)); - } + memset(sysctl_info, 0, sizeof(sysctl_info)); + sysctl_info[0] = CTL_VFS; + sysctl_info[1] = sfs.f_fsid.val[1]; + sysctl_info[2] = HFS_DISABLE_JOURNALING; - snprintf(jbuf, sizeof(jbuf), "%s/%s", volname, jib_fname); - if (unlink(jbuf) != 0) { - fprintf(stderr, "Failed to remove the journal info block %s (%s)\n", - jbuf, strerror(errno)); - } + result = sysctl((void *)sysctl_info, 3, NULL, NULL, NULL, 0); + if (result != 0) { + fprintf(stderr, "Failed to make volume %s UN-journaled (%s)\n", + volname, strerror(errno)); + return 20; + } - printf("Journaling disabled on %s mounted at %s.\n", sfs.f_mntfromname, volname); - - return 0; + snprintf(jbuf, sizeof(jbuf), "%s/%s", volname, journal_fname); + if (unlink(jbuf) != 0) { + fprintf(stderr, "Failed to remove the journal %s (%s)\n", + jbuf, strerror(errno)); + } + + snprintf(jbuf, sizeof(jbuf), "%s/%s", volname, jib_fname); + if (unlink(jbuf) != 0) { + fprintf(stderr, "Failed to remove the journal info block %s (%s)\n", + jbuf, strerror(errno)); + } + + printf("Journaling disabled on %s mounted at %s.\n", sfs.f_mntfromname, volname); + + return 0; } diff --git a/hfs_util/version.plist b/hfs_util/version.plist deleted file mode 100644 index ce9ebcf..0000000 --- a/hfs_util/version.plist +++ /dev/null @@ -1,10 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd"> -<plist version="0.9"> -<dict> - <key>CFBundleIdentifier</key> - <string>com.apple.filesystems.hfs</string> - <key>CFBundleVersion</key> - <string>0.1</string> -</dict> -</plist> diff --git a/mount_hfs/hfs_endian.h b/mount_hfs/hfs_endian.h new file mode 100644 index 0000000..5200e26 --- /dev/null +++ b/mount_hfs/hfs_endian.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __HFS_ENDIAN_H__ +#define __HFS_ENDIAN_H__ + +/* + * hfs_endian.h + * + * This file prototypes endian swapping routines for the HFS/HFS Plus + * volume format. + */ +#include <libkern/OSByteOrder.h> + +/*********************/ +/* BIG ENDIAN Macros */ +/*********************/ +#if BYTE_ORDER == BIG_ENDIAN + /* HFS is always big endian, make swaps into no-ops */ + #define SWAP_BE16(__a) (__a) + #define SWAP_BE32(__a) (__a) + #define SWAP_BE64(__a) (__a) + +/************************/ +/* LITTLE ENDIAN Macros */ +/************************/ +#elif BYTE_ORDER == LITTLE_ENDIAN + /* HFS is always big endian, make swaps actually swap */ + #define SWAP_BE16(__a) OSSwapBigToHostInt16 (__a) + #define SWAP_BE32(__a) OSSwapBigToHostInt32 (__a) + #define SWAP_BE64(__a) OSSwapBigToHostInt64 (__a) + +#else +#warning Unknown byte order +#error +#endif + +#endif /* __HFS_ENDIAN__ */ diff --git a/mount_hfs/mount_hfs.8 b/mount_hfs/mount_hfs.8 new file mode 100644 index 0000000..eaca8df --- /dev/null +++ b/mount_hfs/mount_hfs.8 @@ -0,0 +1,106 @@ +.\" Copyright (c) 2002 Apple Computer, Inc. All rights reserved. +.\" +.\" The contents of this file constitute Original Code as defined in and +.\" are subject to the Apple Public Source License Version 1.1 (the +.\" "License"). You may not use this file except in compliance with the +.\" License. Please obtain a copy of the License at +.\" http://www.apple.com/publicsource and read it before using this file. +.\" +.\" This Original Code and all software distributed under the License are +.\" distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER +.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +.\" FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the +.\" License for the specific language governing rights and limitations +.\" under the License. +.\" +.\" @(#)mount_hfs.8 +.Dd March 14, 2001 +.Dt MOUNT_HFS 8 +.Os "Mac OS X" +.Sh NAME +.Nm mount_hfs +.Nd mount an HFS/HFS+ file system +.Sh SYNOPSIS +.Nm mount_hfs +.Op Fl e Ar encoding +.Op Fl u Ar user +.Op Fl g Ar group +.Op Fl m Ar mask +.Op Fl o Ar options +.Op Fl j +.Op Fl w +.Op Fl x +.Ar special +.Ar directory +.Sh DESCRIPTION +The +.Nm mount_hfs +command attaches the HFS file system residing on the device +.Pa special +to the global file system namespace at the location indicated by +.Pa directory . +This command is normally executed by +.Xr mount 8 +at boot time. +.Pp +The options are as follows: +.Bl -tag -width indent +.It Fl e Ar encoding (standard HFS volumes only) +Specify the Macintosh encoding. The following encodings are supported: +.Pp +Arabic, ChineseSimp, ChineseTrad, Croatian, Cyrillic, Greek, Hebrew, +Icelandic, Japanese, Korean, Roman (default), Romanian, Thai, Turkish +.It Fl u Ar user +Set the owner of the files in the file system to +.Pa user . +The default owner is the owner of the directory on which +the file system is being mounted. +The +.Pa user +may be a user-name, or a numeric value. +.It Fl g Ar group +Set the group of the files in the file system to +.Pa group . +The default group is the group of the directory on which +the file system is being mounted. +The +.Pa group +may be a group-name, or a numeric value. +.It Fl m Ar mask +Specify the maximum file permissions for files in the file system. +(For example, a +.Pa mask +of 755 specifies that, by default, the owner should have read, write, +and execute permissions for files, but others should only have read +and execute permissions. See chmod(1) for more information about +octal file modes.) Only the nine low-order bits of +.Pa mask +are used. The default +.Pa mask +is taken from the directory on which the file system is being mounted. +.It Fl o +Options are specified with a +.Fl o +flag followed by a comma separated string of options. +See the +.Xr mount 8 +man page for possible options and their meanings. +.It Fl j +Ignore the journal for this mount. +.It Fl w +Mount the HFS wrapper volume. +.It Fl x +Disable execute permissions on a standard HFS file system. +.El +.Sh SEE ALSO +.Xr mount 2 , +.Xr unmount 2 , +.Xr fstab 5 , +.Xr mount 8 +.Sh BUGS +Some HFS file systems with highly fragmented catalog files may not mount. +.Sh HISTORY +The +.Nm mount_hfs +utility first appeared in Mac OS X Server 1.0. diff --git a/mount_hfs/mount_hfs.c b/mount_hfs/mount_hfs.c new file mode 100644 index 0000000..8f086e8 --- /dev/null +++ b/mount_hfs/mount_hfs.c @@ -0,0 +1,865 @@ +/* + * Copyright (c) 1999-2013 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include <sys/types.h> + +#include <sys/attr.h> +#include <sys/mount.h> +#include <sys/stat.h> +#include <sys/sysctl.h> +#include <sys/time.h> +#include <sys/uio.h> +#include <sys/vnode.h> +#include <sys/wait.h> +#include <sys/ioctl.h> +#include <sys/disk.h> + +#include <ctype.h> +#include <err.h> +#include <errno.h> +#include <fcntl.h> +#include <grp.h> +#include <limits.h> +#include <pwd.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <signal.h> + +#include <hfs/hfs_mount.h> +#include <hfs/hfs_format.h> + +#include <TargetConditionals.h> + +/* Sensible wrappers over the byte-swapping routines */ +#include "hfs_endian.h" +#if !TARGET_OS_EMBEDDED +#include "optical.h" +#endif + +#include <mntopts.h> + + +struct mntopt mopts[] = { + MOPT_STDOPTS, + MOPT_IGNORE_OWNERSHIP, + MOPT_PERMISSIONS, + MOPT_UPDATE, + { NULL } +}; + +#define HFS_MOUNT_TYPE "hfs" + +gid_t a_gid __P((char *)); +uid_t a_uid __P((char *)); +mode_t a_mask __P((char *)); +struct hfs_mnt_encoding * a_encoding __P((char *)); +int get_encoding_pref __P((const char *)); +int get_encoding_bias __P((void)); +unsigned int get_default_encoding(void); + +void usage __P((void)); + + +int is_hfs_std = 0; +int wrapper_requested = 0; + +typedef struct CreateDateAttrBuf { + u_int32_t size; + struct timespec creationTime; +} CreateDateAttrBuf; + +#define HFS_BLOCK_SIZE 512 + +/* + * This is the straight GMT conversion constant: + * 00:00:00 January 1, 1970 - 00:00:00 January 1, 1904 + * (3600 * 24 * ((365 * (1970 - 1904)) + (((1970 - 1904) / 4) + 1))) + */ +#define MAC_GMT_FACTOR 2082844800UL + +#define KEXT_LOAD_COMMAND "/sbin/kextload" + +#define ENCODING_MODULE_PATH "/System/Library/Filesystems/hfs.fs/Encodings/" + +#define MXENCDNAMELEN 16 /* Maximun length of encoding name string */ + +struct hfs_mnt_encoding { + char encoding_name[MXENCDNAMELEN]; /* encoding type name */ + u_int32_t encoding_id; /* encoding type number */ +}; + + +/* + * Lookup table for hfs encoding names + * Note: Names must be in alphabetical order + */ +struct hfs_mnt_encoding hfs_mnt_encodinglist[] = { + { "Arabic", 4 }, + { "Armenian", 24 }, + { "Bengali", 13 }, + { "Burmese", 19 }, + { "Celtic", 39 }, + { "CentralEurRoman", 29 }, + { "ChineseSimp", 25 }, + { "ChineseTrad", 2 }, + { "Croatian", 36 }, + { "Cyrillic", 7 }, + { "Devanagari", 9 }, + { "Ethiopic", 28 }, + { "Farsi", 140 }, + { "Gaelic", 40 }, + { "Georgian", 23 }, + { "Greek", 6 }, + { "Gujarati", 11 }, + { "Gurmukhi", 10 }, + { "Hebrew", 5 }, + { "Icelandic", 37 }, + { "Japanese", 1 }, + { "Kannada", 16 }, + { "Khmer", 20 }, + { "Korean", 3 }, + { "Laotian", 22 }, + { "Malayalam", 17 }, + { "Mongolian", 27 }, + { "Oriya", 12 }, + { "Roman", 0 }, /* default */ + { "Romanian", 38 }, + { "Sinhalese", 18 }, + { "Tamil", 14 }, + { "Telugu", 15 }, + { "Thai", 21 }, + { "Tibetan", 26 }, + { "Turkish", 35 }, + { "Ukrainian", 152 }, + { "Vietnamese", 30 }, +}; + + +/* + If path is a path to a block device, then return a path to the + corresponding raw device. Else return path unchanged. +*/ +const char *rawdevice(const char *path) +{ + const char *devdisk = "/dev/disk"; + static char raw[MAXPATHLEN]; + + if (!strncmp(path, devdisk, strlen(devdisk))) { + /* The +5 below is strlen("/dev/"), so path+5 points to "disk..." */ + int sn_len = snprintf(raw, sizeof(raw), "/dev/r%s", path+5); + if (sn_len < 0) { + /* error in building string. return original. */ + return path; + } + + if ((unsigned long) sn_len < sizeof(raw)) { + return raw; + } + } + return path; +} + + +/* + GetMasterBlock + + Return a pointer to the Master Directory Block or Volume Header Block + for the volume. In the case of an HFS volume with embedded HFS Plus + volume, this returns the HFS (wrapper) volume's Master Directory Block. + That is, the 512 bytes at offset 1024 bytes from the start of the given + device/partition. + + The master block is cached globally. If it has previously been read in, + the cached copy will be returned. If this routine is called multiple times, + it must be called with the same device string. + + Arguments: + device Path name to disk device (eg., "/dev/disk0s2") + + Returns: + A pointer to the MDB or VHB. This pointer may be in the middle of a + malloc'ed block. There may be more than 512 bytes of malloc'ed memory + at the returned address. + + Errors: + On error, this routine returns NULL. +*/ +void *GetMasterBlock(const char *device) +{ + static char *masterBlock = NULL; + char *buf = NULL; + int err; + int fd = -1; + uint32_t blockSize; + ssize_t amount; + off_t offset; + + /* + * If we already read the master block, then just return it. + */ + if (masterBlock != NULL) { + return masterBlock; + } + + device = rawdevice(device); + + fd = open(device, O_RDONLY | O_NDELAY, 0); + if (fd < 0) { + fprintf(stderr, "GetMasterBlock: Error %d opening %s\n", errno, device); + goto done; + } + + /* + * Get the block size so we can read an entire block. + */ + err = ioctl(fd, DKIOCGETBLOCKSIZE, &blockSize); + if (err == -1) { + fprintf(stderr, "GetMasterBlock: Error %d getting block size\n", errno); + goto done; + } + + /* + * Figure out the offset of the start of the block which contains + * byte offset 1024 (the start of the master block). This is 1024 + * rounded down to a multiple of blockSize. But since blockSize is + * always a power of two, this will be either 0 (if blockSize > 1024) + * or 1024 (if blockSize <= 1024). + */ + offset = blockSize > 1024 ? 0 : 1024; + + /* + * Allocate a buffer and read the block. + */ + buf = malloc(blockSize); + if (buf == NULL) { + fprintf(stderr, "GetMasterBlock: Could not malloc %u bytes\n", blockSize); + goto done; + } + amount = pread(fd, buf, blockSize, offset); + if (amount != blockSize) { + fprintf(stderr, "GetMasterBlock: Error %d from read; amount=%ld, wanted=%u\n", errno, amount, blockSize); + goto done; + } + + /* + * Point at the part of the buffer containing the master block. + * Then return that pointer. + * + * Note: if blockSize <= 1024, then offset = 1024, and the master + * block is at the start of the buffer. If blockSize > 1024, then + * offset = 0, and the master block is at offset 1024 from the start + * of the buffer. + */ + masterBlock = buf + 1024 - offset; + buf = NULL; /* Don't free memory that masterBlock points into. */ + +done: + if (fd >= 0) + close(fd); + if (buf != NULL) + free(buf); + return masterBlock; +} + + +u_int32_t getVolumeCreateDate(const char *device) +{ + HFSMasterDirectoryBlock * mdbPtr; + u_int32_t volume_create_time = 0; + + mdbPtr = GetMasterBlock(device); + if (mdbPtr == NULL) goto exit; + + /* get the create date from the MDB (embedded case) or Volume Header */ + if ((mdbPtr->drSigWord == SWAP_BE16 (kHFSSigWord)) && + (mdbPtr->drEmbedSigWord == SWAP_BE16 (kHFSPlusSigWord))) { + /* Embedded volume*/ + volume_create_time = SWAP_BE32 (mdbPtr->drCrDate); + + } else if (mdbPtr->drSigWord == kHFSPlusSigWord ) { + HFSPlusVolumeHeader * volHdrPtr = (HFSPlusVolumeHeader *) mdbPtr; + + volume_create_time = SWAP_BE32 (volHdrPtr->createDate); + } else { + goto exit; /* cound not match signature */ + } + + if (volume_create_time > MAC_GMT_FACTOR) + volume_create_time -= MAC_GMT_FACTOR; + else + volume_create_time = 0; /* don't let date go negative! */ + +exit: + return volume_create_time; +} + +void syncCreateDate(const char *mntpt, u_int32_t localCreateTime) +{ + int result; + char path[256]; + struct attrlist attributes; + CreateDateAttrBuf attrReturnBuffer; + int64_t gmtCreateTime; + int32_t gmtOffset; + int32_t newCreateTime; + + snprintf(path, sizeof(path), "%s/", mntpt); + + attributes.bitmapcount = ATTR_BIT_MAP_COUNT; + attributes.reserved = 0; + attributes.commonattr = ATTR_CMN_CRTIME; + attributes.volattr = 0; + attributes.dirattr = 0; + attributes.fileattr = 0; + attributes.forkattr = 0; + + result = getattrlist(path, &attributes, &attrReturnBuffer, sizeof(attrReturnBuffer), 0 ); + if (result) return; + + gmtCreateTime = attrReturnBuffer.creationTime.tv_sec; + gmtOffset = gmtCreateTime - (int64_t) localCreateTime + 900; + if (gmtOffset > 0) { + gmtOffset = 1800 * (gmtOffset / 1800); + } else { + gmtOffset = -1800 * ((-gmtOffset + 1799) / 1800); + } + + newCreateTime = localCreateTime + gmtOffset; + + /* + * if the root directory's create date doesn't match + * and its within +/- 15 seconds, then update it + */ + if ((newCreateTime != attrReturnBuffer.creationTime.tv_sec) && + (( newCreateTime - attrReturnBuffer.creationTime.tv_sec) > -15) && + ((newCreateTime - attrReturnBuffer.creationTime.tv_sec) < 15)) { + + attrReturnBuffer.creationTime.tv_sec = (time_t) newCreateTime; + (void) setattrlist (path, + &attributes, + &attrReturnBuffer.creationTime, + sizeof(attrReturnBuffer.creationTime), + 0); + } +} + +/* + * load_encoding + * loads an hfs encoding converter module into the kernel + * + * Note: unloading of encoding converter modules is done in the kernel + */ +static int +load_encoding(struct hfs_mnt_encoding *encp) +{ + int pid; + int loaded; + union wait status; + struct stat sb; + char kmodfile[MAXPATHLEN]; + + /* MacRoman encoding (0) is built into the kernel */ + if (encp->encoding_id == 0) + return (0); + + snprintf(kmodfile, sizeof(kmodfile), "%sHFS_Mac%s.kext", ENCODING_MODULE_PATH, encp->encoding_name); + if (stat(kmodfile, &sb) == -1) { + fprintf(stdout, "unable to find: %s\n", kmodfile); + return (-1); + } + + loaded = 0; + pid = fork(); + if (pid == 0) { + (void) execl(KEXT_LOAD_COMMAND, KEXT_LOAD_COMMAND, kmodfile, NULL); + + exit(1); /* We can only get here if the exec failed */ + } else if (pid != -1) { + if ((waitpid(pid, (int *)&status, 0) == pid) && WIFEXITED(status)) { + /* we attempted a load */ + loaded = 1; + } + } + + if (!loaded) { + fprintf(stderr, "unable to load: %s\n", kmodfile); + return (-1); + } + return (0); +} + +int +main(argc, argv) + int argc; + char **argv; +{ + struct hfs_mount_args args; + int ch, mntflags; + char *dev, dir[MAXPATHLEN]; + int mountStatus; + struct timeval dummy_timeval; /* gettimeofday() crashes if the first argument is NULL */ + u_int32_t localCreateTime; + struct hfs_mnt_encoding *encp; + +#if TARGET_OS_EMBEDDED + mntflags = MNT_NOATIME; +#else + mntflags = 0; +#endif + encp = NULL; + (void)memset(&args, '\0', sizeof(struct hfs_mount_args)); + + /* + * For a mount update, the following args must be explictly + * passed in as options to change their value. On a new + * mount, default values will be computed for all args. + */ + args.flags = VNOVAL; + args.hfs_uid = (uid_t)VNOVAL; + args.hfs_gid = (gid_t)VNOVAL; + args.hfs_mask = (mode_t)VNOVAL; + args.hfs_encoding = (u_int32_t)VNOVAL; + + optind = optreset = 1; /* Reset for parse of new argv. */ + while ((ch = getopt(argc, argv, "xu:g:m:e:o:wt:jc")) != EOF) { + switch (ch) { + case 't': { + char *ptr; + unsigned long tbufsize = strtoul(optarg, &ptr, 0); + if (tbufsize >= UINT_MAX) { + tbufsize = UINT_MAX; + } + args.journal_tbuffer_size = (unsigned int) strtoul(optarg, &ptr, 0); + if ((args.journal_tbuffer_size == 0 || + ((uint32_t) args.journal_tbuffer_size) == UINT_MAX) && errno != 0) { + fprintf(stderr, "%s: Invalid tbuffer size %s\n", argv[0], optarg); + exit(5); + } else { + if (*ptr == 'k') + args.journal_tbuffer_size *= 1024; + else if (*ptr == 'm') + args.journal_tbuffer_size *= 1024*1024; + } + if (args.flags == VNOVAL){ + args.flags = 0; + } + args.flags |= HFSFSMNT_EXTENDED_ARGS; + break; + } + case 'j': + /* disable the journal */ + if(args.flags == VNOVAL){ + args.flags = 0; + } + args.flags |= HFSFSMNT_EXTENDED_ARGS; + args.journal_disable = 1; + break; + case 'c': + // XXXdbg JOURNAL_NO_GROUP_COMMIT == 0x0001 + args.journal_flags = 0x0001; + break; + case 'x': + if (args.flags == VNOVAL) + args.flags = 0; + args.flags |= HFSFSMNT_NOXONFILES; + break; + case 'u': + args.hfs_uid = a_uid(optarg); + break; + case 'g': + args.hfs_gid = a_gid(optarg); + break; + case 'm': + args.hfs_mask = a_mask(optarg); + break; + case 'e': + encp = a_encoding(optarg); + break; + case 'o': + { + int dummy; + getmntopts(optarg, mopts, &mntflags, &dummy); + } + break; + case 'w': + if (args.flags == VNOVAL) + args.flags = 0; + args.flags |= HFSFSMNT_WRAPPER; + wrapper_requested = 1; + break; + case '?': + usage(); + break; + default: +#if DEBUG + printf("mount_hfs: ERROR: unrecognized ch = '%c'\n", ch); +#endif + usage(); + }; /* switch */ + } + + if ((mntflags & MNT_IGNORE_OWNERSHIP) && !(mntflags & MNT_UPDATE)) { + /* + * The defaults to be supplied in lieu of the on-disk permissions + * (could be overridden by explicit -u, -g, or -m options): + */ + if (args.hfs_uid == (uid_t)VNOVAL) args.hfs_uid = UNKNOWNUID; + if (args.hfs_gid == (gid_t)VNOVAL) args.hfs_gid = UNKNOWNGID; +#if OVERRIDE_UNKNOWN_PERMISSIONS + if (args.hfs_mask == (mode_t)VNOVAL) args.hfs_mask = ACCESSPERMS; /* 0777 */ +#endif + } + argc -= optind; + argv += optind; + + if (argc != 2) { +#if DEBUG + printf("mount_hfs: ERROR: argc == %d != 2\n", argc); +#endif + usage(); + } + + dev = argv[0]; + + if (realpath(argv[1], dir) == NULL) + err(1, "realpath %s", dir); + + args.fspec = dev; + + /* HFS volumes need timezone info to convert local to GMT */ + (void) gettimeofday( &dummy_timeval, &args.hfs_timezone ); + + /* load requested encoding (if any) for hfs volume */ + if (encp != NULL) { + if (load_encoding(encp) != 0) + exit(1); /* load failure */ + args.hfs_encoding = encp->encoding_id; + } + + /* + * For a new mount (non-update case) fill in default values for all args + */ + if ((mntflags & MNT_UPDATE) == 0) { + + struct stat sb; + + if (args.flags == VNOVAL) + args.flags = 0; + + if ((args.hfs_encoding == (u_int32_t)VNOVAL) && (encp == NULL)) { + int encoding; + + /* Find a suitable encoding preference. */ + if ((encoding = get_encoding_pref(dev)) != -1) { + /* + * Note: the encoding kext was loaded by + * hfs.util during the file system probe. + */ + args.hfs_encoding = encoding; + } else { + args.hfs_encoding = 0; + } + } + /* when the mountpoint is root, use default values */ + if (strcmp(dir, "/") == 0) { + sb.st_mode = 0777; + sb.st_uid = 0; + sb.st_gid = 0; + + /* otherwise inherit from the mountpoint */ + } else if (stat(dir, &sb) == -1) + err(1, "stat %s", dir); + + if (args.hfs_uid == (uid_t)VNOVAL) + args.hfs_uid = sb.st_uid; + + if (args.hfs_gid == (gid_t)VNOVAL) + args.hfs_gid = sb.st_gid; + + if (args.hfs_mask == (mode_t)VNOVAL) + args.hfs_mask = sb.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO); + } +#if DEBUG + printf("mount_hfs: calling mount: \n" ); + printf("\tdevice = %s\n", dev); + printf("\tmount point = %s\n", dir); + printf("\tmount flags = 0x%08x\n", mntflags); + printf("\targ flags = 0x%x\n", args.flags); + printf("\tuid = %d\n", args.hfs_uid); + printf("\tgid = %d\n", args.hfs_gid); + printf("\tmode = %o\n", args.hfs_mask); + printf("\tencoding = %ld\n", args.hfs_encoding); + +#endif + +#if !TARGET_OS_EMBEDDED + /* + * We shouldn't really be calling up to other layers, but + * an exception was made in this case to fix the situation + * where HFS was writable on optical media. + */ + + if ((_optical_is_writable(dev) & _OPTICAL_WRITABLE_PACKET)) { + mntflags |= MNT_RDONLY; + } +#endif + + if (is_hfs_std) + mntflags |= MNT_RDONLY; + + if ((mntflags & MNT_RDONLY) == 0) { + /* + * get the volume's create date so we can synchronize + * it with the root directory create date + */ + localCreateTime = getVolumeCreateDate(dev); + } + else { + localCreateTime = 0; + } + + if ((mountStatus = mount(HFS_MOUNT_TYPE, dir, mntflags, &args)) < 0) { +#if DEBUG + printf("mount_hfs: error on mount(): error = %d.\n", mountStatus); +#endif + err(1, NULL); + }; + + /* + * synchronize the root directory's create date + * with the volume's create date + */ + if (localCreateTime) + syncCreateDate(dir, localCreateTime); + + exit(0); +} + + +gid_t +a_gid(s) + char *s; +{ + struct group *gr; + char *gname, *orig = s; + gid_t gid = 0; + + if (*s == '-') + s++; + for (gname = s; *s && isdigit(*s); ++s); + if (!*s) { + gid = atoi(gname); + } else { + gr = getgrnam(orig); + if (gr == NULL) + errx(1, "unknown group id: %s", orig); + gid = gr->gr_gid; + } + return (gid); +} + +uid_t +a_uid(s) + char *s; +{ + struct passwd *pw; + char *uname, *orig = s; + uid_t uid = 0; + + if (*s == '-') + s++; + for (uname = s; *s && isdigit(*s); ++s); + if (!*s) { + uid = atoi(uname); + } else { + pw = getpwnam(orig); + if (pw == NULL) + errx(1, "unknown user id: %s", orig); + uid = pw->pw_uid; + } + return (uid); +} + +mode_t +a_mask(s) + char *s; +{ + int done, rv; + char *ep; + + done = 0; + rv = -1; + if (*s >= '0' && *s <= '7') { + done = 1; + rv = strtol(optarg, &ep, 8); + } + if (!done || rv < 0 || *ep) + errx(1, "invalid file mode: %s", s); + return (rv); +} + +struct hfs_mnt_encoding * +a_encoding(s) + char *s; +{ + char *uname; + int i; + u_int32_t encoding; + struct hfs_mnt_encoding *p, *q, *enclist; + int elements = sizeof(hfs_mnt_encodinglist) / sizeof(struct hfs_mnt_encoding); + int compare; + + /* Use a binary search to find an encoding match */ + p = hfs_mnt_encodinglist; + q = p + (elements - 1); + while (p <= q) { + enclist = p + ((q - p) >> 1); /* divide by 2 */ + compare = strcmp(s, enclist->encoding_name); + if (compare < 0) + q = enclist - 1; + else if (compare > 0) + p = enclist + 1; + else + return (enclist); + } + + for (uname = s; *s && isdigit(*s); ++s); + + if (*s) goto unknown; + + encoding = atoi(uname); + for (i=0, enclist = hfs_mnt_encodinglist; i < elements; i++, enclist++) { + if (enclist->encoding_id == encoding) + return (enclist); + } + +unknown: + errx(1, "unknown encoding: %s", uname); + return (NULL); +} + + +/* + * Get file system's encoding preference. + */ +int +get_encoding_pref(const char *device) +{ + struct hfs_mnt_encoding *enclist; + HFSMasterDirectoryBlock * mdbp; + int encoding = -1; + int elements; + int i; + + mdbp = GetMasterBlock(device); + if (mdbp == NULL) + return 0; + + if (SWAP_BE16(mdbp->drSigWord) != kHFSSigWord || + (SWAP_BE16(mdbp->drEmbedSigWord) == kHFSPlusSigWord && (!wrapper_requested))) { + return (-1); + } + else { + is_hfs_std = 1; + } + encoding = GET_HFS_TEXT_ENCODING(SWAP_BE32(mdbp->drFndrInfo[4])); + + if (encoding == -1) { + encoding = get_encoding_bias(); + if (encoding == 0 || encoding == -1) + encoding = get_default_encoding(); + } + + /* Check if this is a supported encoding. */ + elements = sizeof(hfs_mnt_encodinglist) / sizeof(struct hfs_mnt_encoding); + for (i=0, enclist = hfs_mnt_encodinglist; i < elements; i++, enclist++) { + if (enclist->encoding_id == encoding) + return (encoding); + } + + return (0); +} + +/* + * Get kernel's encoding bias. + */ +int +get_encoding_bias() +{ + int mib[3]; + size_t buflen = sizeof(int); + struct vfsconf vfc; + int hint = 0; + + if (getvfsbyname("hfs", &vfc) < 0) + goto error; + + mib[0] = CTL_VFS; + mib[1] = vfc.vfc_typenum; + mib[2] = HFS_ENCODINGBIAS; + + if (sysctl(mib, 3, &hint, &buflen, NULL, 0) < 0) + goto error; + return (hint); +error: + return (-1); +} + +#define __kCFUserEncodingFileName ("/.CFUserTextEncoding") + +unsigned int +get_default_encoding() +{ + struct passwd *passwdp; + + if ((passwdp = getpwuid(0))) { /* root account */ + char buffer[MAXPATHLEN + 1]; + int fd; + + strlcpy(buffer, passwdp->pw_dir, sizeof(buffer)); + strlcat(buffer, __kCFUserEncodingFileName, sizeof(buffer)); + + if ((fd = open(buffer, O_RDONLY, 0)) > 0) { + ssize_t readSize; + + readSize = read(fd, buffer, MAXPATHLEN); + buffer[(readSize < 0 ? 0 : readSize)] = '\0'; + close(fd); + return strtol(buffer, NULL, 0); + } + } + return (0); /* Fallback to smRoman */ +} + + +void +usage() +{ + (void)fprintf(stderr, + "usage: mount_hfs [-xw] [-u user] [-g group] [-m mask] [-e encoding] [-t tbuffer-size] [-j] [-c] [-o options] special-device filesystem-node\n"); + (void)fprintf(stderr, " -j disables journaling; -c disables group-commit for journaling\n"); + + exit(1); +} diff --git a/mount_hfs/optical.c b/mount_hfs/optical.c new file mode 100644 index 0000000..e46dde7 --- /dev/null +++ b/mount_hfs/optical.c @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2007 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "optical.h" + +#include <paths.h> +#include <string.h> +#include <CoreFoundation/CoreFoundation.h> +#include <IOKit/IOKitLib.h> +#include <IOKit/storage/IOMedia.h> +#include <IOKit/storage/IOBDMedia.h> +#include <IOKit/storage/IOCDMedia.h> +#include <IOKit/storage/IODVDMedia.h> + +static io_service_t +__io_media_copy_whole_media(io_service_t media) +{ + io_service_t parent; + CFTypeRef property; + + IOObjectRetain(media); + + while (media) { + if (IOObjectConformsTo(media, kIOMediaClass)) { + property = IORegistryEntryCreateCFProperty(media, CFSTR(kIOMediaWholeKey), kCFAllocatorDefault, 0); + if (property) { + CFRelease(property); + if (property == kCFBooleanTrue) + break; + } + } + parent = IO_OBJECT_NULL; + IORegistryEntryGetParentEntry(media, kIOServicePlane, &parent); + IOObjectRelease(media); + media = parent; + } + + return media; +} + +static io_service_t +__io_media_create_from_bsd_name(const char *name) +{ + if (!strncmp(_PATH_DEV, name, strlen(_PATH_DEV))) + name += strlen(_PATH_DEV); + + return IOServiceGetMatchingService(kIOMasterPortDefault, IOBSDNameMatching(kIOMasterPortDefault, 0, name)); +} + +static int +__io_media_is_writable(io_service_t media) +{ + int writable = 0; + CFTypeRef property; + + property = IORegistryEntryCreateCFProperty(media, CFSTR(kIOMediaWritableKey), kCFAllocatorDefault, 0); + if (property) { + CFRelease(property); + if (property == kCFBooleanTrue) { + writable = _OPTICAL_WRITABLE_SECTOR; + + if (IOObjectConformsTo(media, kIOBDMediaClass)) { + property = IORegistryEntryCreateCFProperty(media, CFSTR(kIOBDMediaTypeKey), kCFAllocatorDefault, 0); + if (property) { + if (CFEqual(property, CFSTR(kIOBDMediaTypeR))) + writable = _OPTICAL_WRITABLE_PACKET | _OPTICAL_WRITABLE_ONCE; /* BD-R */ + else if (CFEqual(property, CFSTR(kIOBDMediaTypeRE))) + writable = _OPTICAL_WRITABLE_PACKET; /* BD-RE */ + CFRelease(property); + } + } else if (IOObjectConformsTo(media, kIOCDMediaClass)) { + property = IORegistryEntryCreateCFProperty(media, CFSTR(kIOCDMediaTypeKey), kCFAllocatorDefault, 0); + if (property) { + if (CFEqual(property, CFSTR(kIOCDMediaTypeR))) + writable = _OPTICAL_WRITABLE_PACKET | _OPTICAL_WRITABLE_ONCE; /* CD-R */ + else if (CFEqual(property, CFSTR(kIOCDMediaTypeRW))) + writable = _OPTICAL_WRITABLE_PACKET; /* CD-RW */ + + CFRelease(property); + } + } else if (IOObjectConformsTo(media, kIODVDMediaClass)) { + property = IORegistryEntryCreateCFProperty(media, CFSTR(kIODVDMediaTypeKey), kCFAllocatorDefault, 0); + if (property) { + if (CFEqual(property, CFSTR(kIODVDMediaTypeR))) + writable = _OPTICAL_WRITABLE_PACKET | _OPTICAL_WRITABLE_ONCE; /* DVD-R */ + else if (CFEqual(property, CFSTR(kIODVDMediaTypeRW))) + writable = _OPTICAL_WRITABLE_PACKET; /* DVD-RW */ + else if (CFEqual(property, CFSTR(kIODVDMediaTypePlusR))) + writable = _OPTICAL_WRITABLE_PACKET | _OPTICAL_WRITABLE_ONCE; /* DVD+R */ + else if (CFEqual(property, CFSTR(kIODVDMediaTypePlusRW))) + writable = _OPTICAL_WRITABLE_PACKET; /* DVD+RW */ + else if (CFEqual(property, CFSTR(kIODVDMediaTypeHDR))) + writable = _OPTICAL_WRITABLE_PACKET | _OPTICAL_WRITABLE_ONCE; /* HD DVD-R */ + + CFRelease(property); + } + } + } + } + + return writable; +} + +int +_optical_is_writable(const char *dev) +{ + int writable = 0; + io_service_t media; + io_service_t whole; + + media = __io_media_create_from_bsd_name(dev); + if (media) { + writable = __io_media_is_writable(media); + if (writable) { + whole = __io_media_copy_whole_media(media); + if (whole) { + writable = __io_media_is_writable(whole); + + IOObjectRelease(whole); + } + } + IOObjectRelease(media); + } + + return writable; +} diff --git a/mount_hfs/optical.h b/mount_hfs/optical.h new file mode 100644 index 0000000..73ef3a8 --- /dev/null +++ b/mount_hfs/optical.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2007 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#define _OPTICAL_WRITABLE_SECTOR 0x0001 +#define _OPTICAL_WRITABLE_PACKET 0x0002 +#define _OPTICAL_WRITABLE_ONCE 0x0008 + +int _optical_is_writable(const char *dev); diff --git a/newfs_hfs/hfs_endian.c b/newfs_hfs/hfs_endian.c new file mode 100644 index 0000000..1370737 --- /dev/null +++ b/newfs_hfs/hfs_endian.c @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * hfs_endian.c + * + * This file implements endian swapping routines for the HFS/HFS Plus + * volume format. + */ + +#include <sys/types.h> +#include <sys/stat.h> + +#include <libkern/OSByteOrder.h> +#include <hfs/hfs_format.h> + +#include "hfs_endian.h" + +#undef ENDIAN_DEBUG +#if 0 +/* Private swapping routines */ +int hfs_swap_HFSPlusBTInternalNode (BlockDescriptor *src, HFSCatalogNodeID fileID, int unswap); +int hfs_swap_HFSBTInternalNode (BlockDescriptor *src, HFSCatalogNodeID fileID, int unswap); +#endif + +static void hfs_swap_HFSPlusForkData (HFSPlusForkData *src); + +/* + * hfs_swap_HFSMasterDirectoryBlock + * + * Specially modified to swap parts of the finder info + */ +void +hfs_swap_HFSMasterDirectoryBlock ( + void *buf +) +{ + HFSMasterDirectoryBlock *src = (HFSMasterDirectoryBlock *)buf; + + src->drSigWord = SWAP_BE16 (src->drSigWord); + src->drCrDate = SWAP_BE32 (src->drCrDate); + src->drLsMod = SWAP_BE32 (src->drLsMod); + src->drAtrb = SWAP_BE16 (src->drAtrb); + src->drNmFls = SWAP_BE16 (src->drNmFls); + src->drVBMSt = SWAP_BE16 (src->drVBMSt); + src->drAllocPtr = SWAP_BE16 (src->drAllocPtr); + src->drNmAlBlks = SWAP_BE16 (src->drNmAlBlks); + src->drAlBlkSiz = SWAP_BE32 (src->drAlBlkSiz); + src->drClpSiz = SWAP_BE32 (src->drClpSiz); + src->drAlBlSt = SWAP_BE16 (src->drAlBlSt); + src->drNxtCNID = SWAP_BE32 (src->drNxtCNID); + src->drFreeBks = SWAP_BE16 (src->drFreeBks); + + /* Don't swap drVN */ + + src->drVolBkUp = SWAP_BE32 (src->drVolBkUp); + src->drVSeqNum = SWAP_BE16 (src->drVSeqNum); + src->drWrCnt = SWAP_BE32 (src->drWrCnt); + src->drXTClpSiz = SWAP_BE32 (src->drXTClpSiz); + src->drCTClpSiz = SWAP_BE32 (src->drCTClpSiz); + src->drNmRtDirs = SWAP_BE16 (src->drNmRtDirs); + src->drFilCnt = SWAP_BE32 (src->drFilCnt); + src->drDirCnt = SWAP_BE32 (src->drDirCnt); + + /* Swap just the 'blessed folder' in drFndrInfo */ + src->drFndrInfo[0] = SWAP_BE32 (src->drFndrInfo[0]); + + src->drEmbedSigWord = SWAP_BE16 (src->drEmbedSigWord); + src->drEmbedExtent.startBlock = SWAP_BE16 (src->drEmbedExtent.startBlock); + src->drEmbedExtent.blockCount = SWAP_BE16 (src->drEmbedExtent.blockCount); + + src->drXTFlSize = SWAP_BE32 (src->drXTFlSize); + src->drXTExtRec[0].startBlock = SWAP_BE16 (src->drXTExtRec[0].startBlock); + src->drXTExtRec[0].blockCount = SWAP_BE16 (src->drXTExtRec[0].blockCount); + src->drXTExtRec[1].startBlock = SWAP_BE16 (src->drXTExtRec[1].startBlock); + src->drXTExtRec[1].blockCount = SWAP_BE16 (src->drXTExtRec[1].blockCount); + src->drXTExtRec[2].startBlock = SWAP_BE16 (src->drXTExtRec[2].startBlock); + src->drXTExtRec[2].blockCount = SWAP_BE16 (src->drXTExtRec[2].blockCount); + + src->drCTFlSize = SWAP_BE32 (src->drCTFlSize); + src->drCTExtRec[0].startBlock = SWAP_BE16 (src->drCTExtRec[0].startBlock); + src->drCTExtRec[0].blockCount = SWAP_BE16 (src->drCTExtRec[0].blockCount); + src->drCTExtRec[1].startBlock = SWAP_BE16 (src->drCTExtRec[1].startBlock); + src->drCTExtRec[1].blockCount = SWAP_BE16 (src->drCTExtRec[1].blockCount); + src->drCTExtRec[2].startBlock = SWAP_BE16 (src->drCTExtRec[2].startBlock); + src->drCTExtRec[2].blockCount = SWAP_BE16 (src->drCTExtRec[2].blockCount); +} + +/* + * hfs_swap_HFSPlusVolumeHeader + */ +void +hfs_swap_HFSPlusVolumeHeader ( + void *buf +) +{ + HFSPlusVolumeHeader *src = (HFSPlusVolumeHeader *)buf; + + src->signature = SWAP_BE16 (src->signature); + src->version = SWAP_BE16 (src->version); + src->attributes = SWAP_BE32 (src->attributes); + src->lastMountedVersion = SWAP_BE32 (src->lastMountedVersion); + + src->journalInfoBlock = SWAP_BE32 (src->journalInfoBlock); + + src->createDate = SWAP_BE32 (src->createDate); + src->modifyDate = SWAP_BE32 (src->modifyDate); + src->backupDate = SWAP_BE32 (src->backupDate); + src->checkedDate = SWAP_BE32 (src->checkedDate); + src->fileCount = SWAP_BE32 (src->fileCount); + src->folderCount = SWAP_BE32 (src->folderCount); + src->blockSize = SWAP_BE32 (src->blockSize); + src->totalBlocks = SWAP_BE32 (src->totalBlocks); + src->freeBlocks = SWAP_BE32 (src->freeBlocks); + src->nextAllocation = SWAP_BE32 (src->nextAllocation); + src->rsrcClumpSize = SWAP_BE32 (src->rsrcClumpSize); + src->dataClumpSize = SWAP_BE32 (src->dataClumpSize); + src->nextCatalogID = SWAP_BE32 (src->nextCatalogID); + src->writeCount = SWAP_BE32 (src->writeCount); + src->encodingsBitmap = SWAP_BE64 (src->encodingsBitmap); + + /* Don't swap finderInfo */ + + hfs_swap_HFSPlusForkData (&src->allocationFile); + hfs_swap_HFSPlusForkData (&src->extentsFile); + hfs_swap_HFSPlusForkData (&src->catalogFile); + hfs_swap_HFSPlusForkData (&src->attributesFile); + hfs_swap_HFSPlusForkData (&src->startupFile); +} + +/* + * hfs_swap_HFSPlusForkData + * + * There's still a few spots where we still need to swap the fork data. + */ +void +hfs_swap_HFSPlusForkData ( + HFSPlusForkData *src +) +{ + int i; + + src->logicalSize = SWAP_BE64 (src->logicalSize); + + src->clumpSize = SWAP_BE32 (src->clumpSize); + src->totalBlocks = SWAP_BE32 (src->totalBlocks); + + for (i = 0; i < kHFSPlusExtentDensity; i++) { + src->extents[i].startBlock = SWAP_BE32 (src->extents[i].startBlock); + src->extents[i].blockCount = SWAP_BE32 (src->extents[i].blockCount); + } +} diff --git a/newfs_hfs/hfs_endian.h b/newfs_hfs/hfs_endian.h new file mode 100644 index 0000000..9c3962b --- /dev/null +++ b/newfs_hfs/hfs_endian.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __HFS_ENDIAN_H__ +#define __HFS_ENDIAN_H__ + +/* + * hfs_endian.h + * + * This file prototypes endian swapping routines for the HFS/HFS Plus + * volume format. + */ +#include <hfs/hfs_format.h> +#include <libkern/OSByteOrder.h> + +/*********************/ +/* BIG ENDIAN Macros */ +/*********************/ +#if BYTE_ORDER == BIG_ENDIAN + + /* HFS is always big endian, make swaps into no-ops */ + #define SWAP_BE16(__a) (__a) + #define SWAP_BE32(__a) (__a) + #define SWAP_BE64(__a) (__a) + + /* HFS is always big endian, no swapping needed */ + #define SWAP_HFSMDB(__a) + #define SWAP_HFSPLUSVH(__a) + +/************************/ +/* LITTLE ENDIAN Macros */ +/************************/ +#elif BYTE_ORDER == LITTLE_ENDIAN + + /* HFS is always big endian, make swaps actually swap */ + #define SWAP_BE16(__a) OSSwapBigToHostInt16 (__a) + #define SWAP_BE32(__a) OSSwapBigToHostInt32 (__a) + #define SWAP_BE64(__a) OSSwapBigToHostInt64 (__a) + + #define SWAP_HFSMDB(__a) hfs_swap_HFSMasterDirectoryBlock ((__a)) + #define SWAP_HFSPLUSVH(__a) hfs_swap_HFSPlusVolumeHeader ((__a)); + +#else +#warning Unknown byte order +#error +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +void hfs_swap_HFSMasterDirectoryBlock (void *buf); +void hfs_swap_HFSPlusVolumeHeader (void *buf); + +#ifdef __cplusplus +} +#endif + +#endif /* __HFS_FORMAT__ */ diff --git a/newfs_hfs/makehfs.c b/newfs_hfs/makehfs.c new file mode 100644 index 0000000..27b922a --- /dev/null +++ b/newfs_hfs/makehfs.c @@ -0,0 +1,2151 @@ +/* + * Copyright (c) 1999-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: makehfs.c + + Contains: Initialization code for HFS and HFS Plus volumes. + + Copyright: � 1984-1999 by Apple Computer, Inc., all rights reserved. + +*/ + +#include <sys/param.h> +#include <sys/types.h> +#include <sys/time.h> +#include <err.h> +#include <sys/errno.h> +#include <sys/stat.h> +#include <sys/sysctl.h> +#include <sys/vmmeter.h> + +#include <err.h> +#include <errno.h> +#include <fcntl.h> +#include <paths.h> +#include <pwd.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> +#include <wipefs.h> + +/* + * CommonCrypto is meant to be a more stable API than OpenSSL. + * Defining COMMON_DIGEST_FOR_OPENSSL gives API-compatibility + * with OpenSSL, so we don't have to change the code. + */ +#define COMMON_DIGEST_FOR_OPENSSL +#include <CommonCrypto/CommonDigest.h> + +#include <libkern/OSByteOrder.h> + +#include <CoreFoundation/CFString.h> +#include <CoreFoundation/CFStringEncodingExt.h> +#include <IOKit/IOKitLib.h> +#include <IOKit/storage/IOMedia.h> + +#include <TargetConditionals.h> + +extern Boolean _CFStringGetFileSystemRepresentation(CFStringRef string, UInt8 *buffer, CFIndex maxBufLen); + + +#include <hfs/hfs_format.h> +#include <hfs/hfs_mount.h> +#include "hfs_endian.h" + +#include "newfs_hfs.h" + +#ifndef NEWFS_HFS_DEBUG +# ifdef DEBUG_BUILD +# define NEWFS_HFS_DEBUG 1 +# else +# define NEWFS_HFS_DEBUG 0 +# endif +#endif + +#define HFS_BOOT_DATA "/usr/share/misc/hfsbootdata" + +#define HFS_JOURNAL_FILE ".journal" +#define HFS_JOURNAL_INFO ".journal_info_block" + +#define kJournalFileType 0x6a726e6c /* 'jrnl' */ + + +typedef HFSMasterDirectoryBlock HFS_MDB; + +struct filefork { + UInt16 startBlock; + UInt16 blockCount; + UInt32 logicalSize; + UInt32 physicalSize; +}; + +struct ExtentRecord { + HFSPlusExtentKey key; + HFSPlusExtentRecord record; +} __attribute__((aligned(2), packed)); +static size_t numOverflowExtents = 0; +static struct ExtentRecord *overflowExtents = NULL; + +struct filefork gDTDBFork, gSystemFork, gReadMeFork; + +static void WriteVH __P((const DriveInfo *driveInfo, HFSPlusVolumeHeader *hp)); +static void InitVH __P((hfsparams_t *defaults, UInt64 sectors, + HFSPlusVolumeHeader *header)); + +static int AllocateExtent(UInt8 *buffer, UInt32 startBlock, UInt32 blockCount); +static int MarkExtentUsed(const DriveInfo *, HFSPlusVolumeHeader *, UInt32, UInt32); + +static void WriteExtentsFile __P((const DriveInfo *dip, UInt64 startingSector, + const hfsparams_t *dp, HFSExtentDescriptor *bbextp, void *buffer, + UInt32 *bytesUsed, UInt32 *mapNodes)); + +static void WriteAttributesFile(const DriveInfo *driveInfo, UInt64 startingSector, + const hfsparams_t *dp, HFSExtentDescriptor *bbextp, void *buffer, + UInt32 *bytesUsed, UInt32 *mapNodes); + +static void WriteCatalogFile __P((const DriveInfo *dip, UInt64 startingSector, + const hfsparams_t *dp, HFSPlusVolumeHeader *header, void *buffer, + UInt32 *bytesUsed, UInt32 *mapNodes)); +static int WriteJournalInfo(const DriveInfo *driveInfo, UInt64 startingSector, + const hfsparams_t *dp, HFSPlusVolumeHeader *header, + void *buffer); +static void InitCatalogRoot_HFSPlus __P((const hfsparams_t *dp, const HFSPlusVolumeHeader *header, void * buffer)); + +static void WriteMapNodes __P((const DriveInfo *driveInfo, UInt64 diskStart, + UInt32 firstMapNode, UInt32 mapNodes, UInt16 btNodeSize, void *buffer)); +static void WriteBuffer __P((const DriveInfo *driveInfo, UInt64 startingSector, + UInt64 byteCount, const void *buffer)); +static UInt32 Largest __P((UInt32 a, UInt32 b, UInt32 c, UInt32 d )); + +static UInt32 GetDefaultEncoding(); + +static UInt32 UTCToLocal __P((UInt32 utcTime)); + +static int ConvertUTF8toUnicode __P((const UInt8* source, size_t bufsize, + UniChar* unibuf, UInt16 *charcount)); + +static int getencodinghint(unsigned char *name); + +#define VOLUMEUUIDVALUESIZE 2 +typedef union VolumeUUID { + UInt32 value[VOLUMEUUIDVALUESIZE]; + struct { + UInt32 high; + UInt32 low; + } v; +} VolumeUUID; +void GenerateVolumeUUID(VolumeUUID *newVolumeID); + +void SETOFFSET (void *buffer, UInt16 btNodeSize, SInt16 recOffset, SInt16 vecOffset); +#define SETOFFSET(buf,ndsiz,offset,rec) \ + (*(SInt16 *)((UInt8 *)(buf) + (ndsiz) + (-2 * (rec))) = (SWAP_BE16 (offset))) + +#define BYTESTOBLKS(bytes,blks) DivideAndRoundUp((bytes),(blks)) + +#define ROUNDUP(x, u) (((x) % (u) == 0) ? (x) : ((x)/(u) + 1) * (u)) + +#if TARGET_OS_EMBEDDED +#define ENCODING_TO_BIT(e) \ + ((e) < 48 ? (e) : 0) +#else +#define ENCODING_TO_BIT(e) \ + ((e) < 48 ? (e) : \ + ((e) == kCFStringEncodingMacUkrainian ? 48 : \ + ((e) == kCFStringEncodingMacFarsi ? 49 : 0))) +#endif + + +#ifdef DEBUG_BUILD +struct cp_root_xattr { + u_int16_t vers; + u_int16_t reserved1; + u_int64_t reserved2; + u_int8_t reserved3[16]; +} __attribute__((aligned(2), packed)); +#endif + +/* + * Create a series of (sequential!) extents for the + * requested file. It tries to create the requested + * number, but may be stymied by the file size, and + * the number of minimum blocks. + */ +static void +createExtents(HFSPlusForkData *file, + UInt32 fileID, + UInt32 startBlock, + size_t numExtents, + int minBlocks) +{ + if (NEWFS_HFS_DEBUG == 0) { + /* + * The common case, for non-debug. + */ + file->extents[0].startBlock = startBlock; + file->extents[0].blockCount = file->totalBlocks; + } else { + UInt32 blocksLeft, blocksTotal = 0, blockStep; + int i; + int firstAdjust = 0; + + if (numExtents == 1) { + // The common case, no need to do any math + file->extents[0].startBlock = startBlock; + file->extents[0].blockCount = file->totalBlocks; + return; + } + if (file->totalBlocks < numExtents) + numExtents = file->totalBlocks; + + blocksLeft = file->totalBlocks; + + /* + * The intent here is to split the number of blocks into the + * requested number of extents. So first we determine how + * many blocks should go in each extent -- that's blockStep. + * If we have been giving minBlocks, we need to make sure it's + * a multiple of that. (In general, the values are going to be + * 1 or 2 for minBlocks.) + * + * If there are more requested extents than blocks, the division + * works out to zero... so we limit blockStep to minBlocks. + * + */ + blockStep = blocksLeft / numExtents; + + /* + * To allow invalid extent lengths, set minBlocks to 1, and + * comment out the next two if statements. + */ + if ((blockStep % minBlocks) != 0) + blockStep = (blockStep / minBlocks) * minBlocks; + if (blockStep == 0) + blockStep = minBlocks; + + /* + * Now, after that, we may still not have the right number, since + * the math may not work out properly. So we can work around that + * by making the first extent have all the spares. + */ + if ((blockStep * numExtents) < blocksLeft) { + // Need to adjust the first one. + firstAdjust = blocksLeft - (blockStep * numExtents); + if ((firstAdjust % minBlocks) != 0) + firstAdjust = ROUNDUP(firstAdjust, minBlocks); + } + + /* + * Now, at this point, start handing out blocks to each extent. + * First to the 8 extents in the fork descriptor. + */ + for (i = 0; i < 8 && blocksLeft > 0; i++) { + int n = MIN(blockStep + firstAdjust, blocksLeft); + file->extents[i].startBlock = startBlock + blocksTotal; + file->extents[i].blockCount = n; + blocksLeft -= n; + blocksTotal += n; + firstAdjust = 0; + } + /* + * Then, if there are any left, to the overflow extents. + */ + while (blocksLeft > 0) { + struct ExtentRecord tmp; + UInt32 bcount = 0; + memset(&tmp, 0, sizeof(tmp)); + tmp.key.keyLength = SWAP_BE16(sizeof(HFSPlusExtentKey) - sizeof(uint16_t)); + tmp.key.forkType = 0; + tmp.key.fileID = SWAP_BE32(fileID); + tmp.key.startBlock = SWAP_BE32(blocksTotal); + for (i = 0; i < 8 && blocksLeft > 0; i++) { + int n = MIN(blockStep, blocksLeft); + tmp.record[i].startBlock = SWAP_BE32(blocksTotal + bcount + startBlock); + tmp.record[i].blockCount = SWAP_BE32(n); + bcount += n; + blocksLeft -= n; + } + blocksTotal += bcount; + overflowExtents = realloc(overflowExtents, (numOverflowExtents+1) * sizeof(*overflowExtents)); + overflowExtents[numOverflowExtents++] = tmp; + } + } + return; +} + +/* + * wipefs() in -lutil knows about multiple filesystem formats. + * This replaces the code: + * WriteBuffer(driveInfo, 0, diskBlocksUsed * kBytesPerSector, NULL); + * WriteBuffer(driveInfo, driveInfo->totalSectors - 8, 4 * 1024, NULL); + * which was used to erase the beginning and end of the filesystem. + * + */ +static int +dowipefs(int fd) +{ + int err; + wipefs_ctx handle; + + err = wipefs_alloc(fd, 0/*sectorSize*/, &handle); + if (err == 0) { + err = wipefs_wipe(handle); + } + wipefs_free(&handle); + return err; +} + + +/* + * make_hfsplus + * + * This routine writes an initial HFS Plus volume structure onto a volume. + * It is assumed that the disk has already been formatted and verified. + * + */ +int +make_hfsplus(const DriveInfo *driveInfo, hfsparams_t *defaults) +{ + UInt16 btNodeSize; + UInt32 sectorsPerBlock; + UInt32 mapNodes; + UInt32 sectorsPerNode; + UInt32 temp; + UInt32 bytesUsed; + UInt32 endOfAttributes; + UInt32 startOfAllocation; + UInt64 bytesToZero; + void *nodeBuffer = NULL; + HFSPlusVolumeHeader *header = NULL; + UInt64 sector; + + /* Use wipefs() API to clear old metadata from the device. + * This should be done before we start writing anything on the + * device as wipefs will internally call ioctl(DKIOCDISCARD) on the + * entire device. + */ + (void) dowipefs(driveInfo->fd); + + /* --- Create an HFS Plus header: */ + + header = (HFSPlusVolumeHeader*)malloc((size_t)kBytesPerSector); + if (header == NULL) + err(1, NULL); + + defaults->encodingHint = getencodinghint(defaults->volumeName); + + /* VH Initialized in native byte order */ + InitVH(defaults, driveInfo->totalSectors, header); + + sectorsPerBlock = header->blockSize / kBytesPerSector; + + + /*--- ZERO OUT BEGINNING OF DISK: */ + /* + * Clear out the space to be occupied by the bitmap and B-Trees. + * The first chunk is the boot sectors, volume header, allocation bitmap, + * journal, Extents B-tree, and Attributes B-tree (if any). + * The second chunk is the Catalog B-tree. + */ + + /* Zero out first 1M (to be safe) for volume header */ + WriteBuffer(driveInfo, 0, 1024*1024, NULL); + + if (NEWFS_HFS_DEBUG) { + /* + * Mark each file extent as used individually, rather than doing it all at once. + * Also zero out the entire file. + */ +# define MFU(f) \ + do { \ + WriteBuffer(driveInfo, \ + header->f.extents[0].startBlock * sectorsPerBlock, \ + header->f.totalBlocks * header->blockSize, \ + NULL); \ + if (MarkExtentUsed(driveInfo, header, header->f.extents[0].startBlock, header->f.totalBlocks) == -1) { \ + errx(1, #f " extent overlap <%u, %u>", header->f.extents[0].startBlock, header->f.totalBlocks); \ + } \ + } while (0) + MFU(allocationFile); + MFU(attributesFile); + MFU(extentsFile); +# undef MFU + } else { + /* Zero out from start of allocation file to end of attribute file; + * will include allocation bitmap, journal, extents btree, and + * attribute btree + */ + sector = header->allocationFile.extents[0].startBlock * sectorsPerBlock; + endOfAttributes = header->attributesFile.extents[0].startBlock + header->attributesFile.totalBlocks; + startOfAllocation = header->allocationFile.extents[0].startBlock; + bytesToZero = (UInt64) (endOfAttributes - startOfAllocation + 1) * header->blockSize; + WriteBuffer(driveInfo, sector, bytesToZero, NULL); + + bytesToZero = (UInt64) header->catalogFile.totalBlocks * header->blockSize; + sector = header->catalogFile.extents[0].startBlock * sectorsPerBlock; + WriteBuffer(driveInfo, sector, bytesToZero, NULL); + } + /* + * Allocate a buffer for the rest of our IO. + * Note that in some cases we may need to initialize an EA, so we + * need to use the attribute B-Tree node size in this calculation. + */ + + temp = Largest( defaults->catalogNodeSize * 2, + (defaults->attributesNodeSize * 2), + header->blockSize, + (header->catalogFile.extents[0].startBlock + header->catalogFile.totalBlocks + 7) / 8 ); + /* + * If size is not a mutiple of 512, round up to nearest sector + */ + if ( (temp & 0x01FF) != 0 ) + temp = (temp + kBytesPerSector) & 0xFFFFFE00; + + nodeBuffer = valloc((size_t)temp); + if (nodeBuffer == NULL) + err(1, NULL); + + + + /*--- WRITE ALLOCATION BITMAP BITS TO DISK: */ + + /* + * XXX - this doesn't work well with using arbitrary extents. + * + * To do this, we need to find the appropriate area in the file, and + * pass that in to AllocateExtent, which is just a bitmap manipulation + * routine. Then we need to write it out at the right place. Note that + * we may have to read it in first, as well, which may mean zeroing out + * the entirety of the allocation file first. + * + * Possible solution: + * New function to mark extent as used. + * Function should figure out which block(s) for an extent. + * Read it in. Mark the bits used. Return. + * For now, it can assume the allocation extents are contiguous, but + * should be extensible to not do that. + */ + sector = header->allocationFile.extents[0].startBlock * sectorsPerBlock; + bzero(nodeBuffer, temp); + /* Mark volume header as allocated */ + if (header->blockSize == 512) { + if (MarkExtentUsed(driveInfo, header, 0, 4) == -1) { + errx(1, "Overlapped extent at <0, 4> (%d)", __LINE__); + } + } else if (header->blockSize == 1024) { + if (MarkExtentUsed(driveInfo, header, 0, 2) == -1) { + errx(1, "Overlapped extent at <0, 2> (%d)", __LINE__); + } + } else { + if (MarkExtentUsed(driveInfo, header, 0, 1) == -1) { + errx(1, "Overlapped extent at <0, 1> (%d)", __LINE__); + } + } + if (NEWFS_HFS_DEBUG == 0) { + /* Mark area from bitmap to end of attributes as allocated */ + if (MarkExtentUsed(driveInfo, header, startOfAllocation, (endOfAttributes - startOfAllocation)) == -1) { + errx(1, "Overlapped extent at <%u, %u> (%d)\n", startOfAllocation, endOfAttributes - startOfAllocation, __LINE__); + } + } + + /* Mark catalog btree blocks as allocated */ + if (NEWFS_HFS_DEBUG) { + /* Erase the catalog file first */ + WriteBuffer(driveInfo, + header->catalogFile.extents[0].startBlock * sectorsPerBlock, + header->catalogFile.totalBlocks * header->blockSize, + NULL); + } + if (MarkExtentUsed(driveInfo, header, + header->catalogFile.extents[0].startBlock, + header->catalogFile.totalBlocks) == -1) { + errx(1, "Overlapped catalog extent at <%u, %u>\n", header->catalogFile.extents[0].startBlock, header->catalogFile.totalBlocks); + } + + /* + * Write alternate Volume Header bitmap bit to allocations file at + * 2nd to last sector on HFS+ volume + */ + if (MarkExtentUsed(driveInfo, header, header->totalBlocks - 1, 1) == -1) { + errx(1, "Overlapped extent for header at <%u, %u>\n", header->totalBlocks - 1, 1); + } + + /* + * If the blockSize is 512 bytes, then the last 1kbyte has to be marked + * used via two bits. + */ + if ( header->blockSize == 512 ) { + if (MarkExtentUsed(driveInfo, header, header->totalBlocks - 2, 1) == -1) { + errx(1, "Overlapped extent for AVH at <%u, %u>\n", header->totalBlocks - 2, 1); + } + + } + + /*--- WRITE FILE EXTENTS B-TREE TO DISK: */ + + btNodeSize = defaults->extentsNodeSize; + sectorsPerNode = btNodeSize/kBytesPerSector; + + sector = header->extentsFile.extents[0].startBlock * sectorsPerBlock; + WriteExtentsFile(driveInfo, sector, defaults, NULL, nodeBuffer, &bytesUsed, &mapNodes); + + if (mapNodes > 0) { + WriteMapNodes(driveInfo, (sector + bytesUsed/kBytesPerSector), + bytesUsed/btNodeSize, mapNodes, btNodeSize, nodeBuffer); + } + + + + /*--- WRITE FILE ATTRIBUTES B-TREE TO DISK: */ + if (defaults->attributesClumpSize) { + + btNodeSize = defaults->attributesNodeSize; + sectorsPerNode = btNodeSize/kBytesPerSector; + + sector = header->attributesFile.extents[0].startBlock * sectorsPerBlock; + WriteAttributesFile(driveInfo, sector, defaults, NULL, nodeBuffer, &bytesUsed, &mapNodes); + if (mapNodes > 0) { + WriteMapNodes(driveInfo, (sector + bytesUsed/kBytesPerSector), + bytesUsed/btNodeSize, mapNodes, btNodeSize, nodeBuffer); + } + } + + /*--- WRITE CATALOG B-TREE TO DISK: */ + + btNodeSize = defaults->catalogNodeSize; + sectorsPerNode = btNodeSize/kBytesPerSector; + + sector = header->catalogFile.extents[0].startBlock * sectorsPerBlock; + WriteCatalogFile(driveInfo, sector, defaults, header, nodeBuffer, &bytesUsed, &mapNodes); + + if (mapNodes > 0) { + WriteMapNodes(driveInfo, (sector + bytesUsed/kBytesPerSector), + bytesUsed/btNodeSize, mapNodes, btNodeSize, nodeBuffer); + } + + /*--- JOURNALING SETUP */ + if (defaults->journaledHFS) { + sector = header->journalInfoBlock * sectorsPerBlock; + if (NEWFS_HFS_DEBUG) { + /* + * For debug build, the journal may be located somewhere other + * than right after the journalInfoBlock. + */ + if (MarkExtentUsed(driveInfo, header, header->journalInfoBlock, 1) == -1) { + errx(1, "Extent overlap for journalInfoBlock <%u, 1>", header->journalInfoBlock); + } + + if (!defaults->journalDevice) { + UInt32 jStart = defaults->journalBlock ? defaults->journalBlock : (header->journalInfoBlock + 1); + UInt32 jCount = (UInt32)(defaults->journalSize / header->blockSize); + if (MarkExtentUsed(driveInfo, header, jStart, jCount) == -1) { + errx(1, "Extent overlap for journal <%u, %u>", jStart, jCount); + } + } + } + if (WriteJournalInfo(driveInfo, sector, defaults, header, nodeBuffer) != 0) { + err(EINVAL, "Failed to create the journal"); + } + } + + /*--- WRITE VOLUME HEADER TO DISK: */ + + /* write header last in case we fail along the way */ + + /* Writes both copies of the volume header */ + WriteVH (driveInfo, header); + /* VH is now big-endian */ + + free(nodeBuffer); + free(header); + + return (0); +} + +/* + * WriteVH + * + * Writes the Volume Header (VH) to disk. + * + * The VH is byte-swapped if necessary to big endian. Since this + * is always the last operation, there's no point in unswapping it. + */ +static void +WriteVH (const DriveInfo *driveInfo, HFSPlusVolumeHeader *hp) +{ + SWAP_HFSPLUSVH (hp); + + WriteBuffer(driveInfo, 2, kBytesPerSector, hp); + WriteBuffer(driveInfo, driveInfo->totalSectors - 2, kBytesPerSector, hp); +} + + +/* + * InitVH + * + * Initialize a Volume Header record. + */ +static void +InitVH(hfsparams_t *defaults, UInt64 sectors, HFSPlusVolumeHeader *hp) +{ + UInt32 blockSize; + UInt32 blockCount; + UInt32 blocksUsed; + UInt32 bitmapBlocks; + UInt16 burnedBlocksBeforeVH = 0; + UInt16 burnedBlocksAfterAltVH = 0; + UInt32 nextBlock; + UInt32 allocateBlock; + VolumeUUID newVolumeUUID; + VolumeUUID* finderInfoUUIDPtr; + UInt64 hotFileBandSize; + UInt64 volsize; + +/* + * 2 MB is the minimum size for the new behavior with + * space after the attr b-tree, and hotfile stuff. + */ +#define MINVOLSIZE_WITHSPACE 2097152 + + bzero(hp, kBytesPerSector); + + blockSize = defaults->blockSize; + blockCount = sectors / (blockSize >> kLog2SectorSize); + + /* + * HFSPlusVolumeHeader is located at sector 2, so we may need + * to invalidate blocks before HFSPlusVolumeHeader. + */ + if ( blockSize == 512 ) { + burnedBlocksBeforeVH = 2; /* 2 before VH */ + burnedBlocksAfterAltVH = 1; /* 1 after altVH */ + } else if ( blockSize == 1024 ) { + burnedBlocksBeforeVH = 1; + } + nextBlock = burnedBlocksBeforeVH + 1; /* +1 for VH itself */ + if (defaults->fsStartBlock) { + if (NEWFS_HFS_DEBUG) + printf ("Laying down metadata starting at allocation block=%u (totalBlocks=%u)\n", (unsigned int)defaults->fsStartBlock, (unsigned int)blockCount); + nextBlock += defaults->fsStartBlock; /* lay down file system after this allocation block */ + } + + bitmapBlocks = defaults->allocationClumpSize / blockSize; + + /* note: add 2 for the Alternate VH, and VH */ + blocksUsed = 2 + burnedBlocksBeforeVH + burnedBlocksAfterAltVH + bitmapBlocks; + + if (defaults->flags & kMakeCaseSensitive) { + hp->signature = kHFSXSigWord; + hp->version = kHFSXVersion; + } else { + hp->signature = kHFSPlusSigWord; + hp->version = kHFSPlusVersion; + } + hp->attributes = kHFSVolumeUnmountedMask | kHFSUnusedNodeFixMask; + if (defaults->flags & kMakeContentProtect) { + hp->attributes |= kHFSContentProtectionMask; + } + hp->lastMountedVersion = kHFSPlusMountVersion; + + /* NOTE: create date is in local time, not GMT! */ + hp->createDate = UTCToLocal(defaults->createDate); + hp->modifyDate = defaults->createDate; + hp->backupDate = 0; + hp->checkedDate = defaults->createDate; + +// hp->fileCount = 0; +// hp->folderCount = 0; + + hp->blockSize = blockSize; + hp->totalBlocks = blockCount; + hp->freeBlocks = blockCount; /* will be adjusted at the end */ + + volsize = (UInt64) blockCount * (UInt64) blockSize; + + hp->rsrcClumpSize = defaults->rsrcClumpSize; + hp->dataClumpSize = defaults->dataClumpSize; + hp->nextCatalogID = defaults->nextFreeFileID; + hp->encodingsBitmap = 1 | (1 << ENCODING_TO_BIT(defaults->encodingHint)); + + /* set up allocation bitmap file */ + hp->allocationFile.clumpSize = defaults->allocationClumpSize; + hp->allocationFile.logicalSize = defaults->allocationClumpSize; + hp->allocationFile.totalBlocks = bitmapBlocks; + + if (NEWFS_HFS_DEBUG && defaults->allocationStartBlock) + allocateBlock = defaults->allocationStartBlock; + else { + allocateBlock = nextBlock; + nextBlock += bitmapBlocks; + } + + createExtents(&hp->allocationFile, kHFSAllocationFileID, allocateBlock, defaults->allocationExtsCount, 1); + + // This works because the files are contiguous for now + if (NEWFS_HFS_DEBUG) + printf ("allocationFile: (%10u, %10u)\n", hp->allocationFile.extents[0].startBlock, hp->allocationFile.totalBlocks); + + /* set up journal files */ + if (defaults->journaledHFS) { + UInt32 journalBlock; + hp->fileCount = 2; + hp->attributes |= kHFSVolumeJournaledMask; + hp->nextCatalogID += 2; + + /* + * Allocate 1 block for the journalInfoBlock. The + * journal file size is passed in hfsparams_t. + */ + if (NEWFS_HFS_DEBUG && defaults->journalInfoBlock) + hp->journalInfoBlock = defaults->journalInfoBlock; + else + hp->journalInfoBlock = nextBlock++; + if (NEWFS_HFS_DEBUG && defaults->journalBlock) + journalBlock = defaults->journalBlock; + else { + journalBlock = hp->journalInfoBlock + 1; + nextBlock += ((defaults->journalSize+blockSize-1) / blockSize); + } + + if (NEWFS_HFS_DEBUG) { + printf ("journalInfo : (%10u, %10u)\n", (u_int32_t)hp->journalInfoBlock, 1); + printf ("journal : (%10u, %10u)\n", (u_int32_t)journalBlock, (u_int32_t)((defaults->journalSize + (blockSize-1)) / blockSize)); + } + /* XXX What if journal is on a different device? */ + blocksUsed += 1 + ((defaults->journalSize+blockSize-1) / blockSize); + } else { + hp->journalInfoBlock = 0; + } + + /* set up extents b-tree file */ + hp->extentsFile.clumpSize = defaults->extentsClumpSize; + hp->extentsFile.logicalSize = defaults->extentsClumpSize; + hp->extentsFile.totalBlocks = defaults->extentsClumpSize / blockSize; + if (NEWFS_HFS_DEBUG && defaults->extentsStartBlock) + allocateBlock = defaults->extentsStartBlock; + else { + allocateBlock = nextBlock; + nextBlock += hp->extentsFile.totalBlocks; + } + createExtents(&hp->extentsFile, kHFSExtentsFileID, allocateBlock, defaults->extentsExtsCount, (defaults->journaledHFS && defaults->extentsNodeSize > hp->blockSize) ? defaults->extentsNodeSize / hp->blockSize : 1); + + blocksUsed += hp->extentsFile.totalBlocks; + + if (NEWFS_HFS_DEBUG) + printf ("extentsFile : (%10u, %10u)\n", hp->extentsFile.extents[0].startBlock, hp->extentsFile.totalBlocks); + + /* set up attributes b-tree file */ + if (defaults->attributesClumpSize) { + hp->attributesFile.clumpSize = defaults->attributesClumpSize; + hp->attributesFile.logicalSize = defaults->attributesClumpSize; + hp->attributesFile.totalBlocks = defaults->attributesClumpSize / blockSize; + if (NEWFS_HFS_DEBUG && defaults->attributesStartBlock) + allocateBlock = defaults->attributesStartBlock; + else { + allocateBlock = nextBlock; + nextBlock += hp->attributesFile.totalBlocks; + } + createExtents(&hp->attributesFile, kHFSAttributesFileID, allocateBlock, defaults->attributesExtsCount, (defaults->journaledHFS && defaults->attributesNodeSize > hp->blockSize) ? defaults->attributesNodeSize / hp->blockSize : 1); + blocksUsed += hp->attributesFile.totalBlocks; + + if (NEWFS_HFS_DEBUG) { + printf ("attributesFile: (%10u, %10u)\n", hp->attributesFile.extents[0].startBlock, hp->attributesFile.totalBlocks); + } + /* + * Leave some room for the Attributes B-tree to grow, if the volsize >= 2MB + */ + if (volsize >= MINVOLSIZE_WITHSPACE && defaults->attributesStartBlock == 0) { + nextBlock += 10 * (hp->attributesFile.clumpSize / blockSize); + } + } + + /* set up catalog b-tree file */ + hp->catalogFile.clumpSize = defaults->catalogClumpSize; + hp->catalogFile.logicalSize = defaults->catalogClumpSize; + hp->catalogFile.totalBlocks = defaults->catalogClumpSize / blockSize; + if (NEWFS_HFS_DEBUG && defaults->catalogStartBlock) + allocateBlock = defaults->catalogStartBlock; + else { + allocateBlock = nextBlock; + nextBlock += hp->catalogFile.totalBlocks; + } + createExtents(&hp->catalogFile, kHFSCatalogFileID, allocateBlock, defaults->catalogExtsCount, (defaults->journaledHFS && defaults->catalogNodeSize > hp->blockSize) ? defaults->catalogNodeSize / hp->blockSize : 1); + blocksUsed += hp->catalogFile.totalBlocks; + + if (NEWFS_HFS_DEBUG) + printf ("catalogFile : (%10u, %10u)\n\n", hp->catalogFile.extents[0].startBlock, hp->catalogFile.totalBlocks); + + if ((numOverflowExtents * sizeof(struct ExtentRecord)) > + (defaults->extentsNodeSize - sizeof(BTNodeDescriptor) - (sizeof(uint16_t) * numOverflowExtents))) { + errx(1, "Too many overflow extent records to fit into a single extent node"); + } + + /* + * Add some room for the catalog file to grow... + */ + nextBlock += 10 * (hp->catalogFile.clumpSize / hp->blockSize); + + /* + * Add some room for the hot file band. This uses the same 5MB per GB + * as the kernel. The kernel only uses hotfiles if the volume is larger + * than 10GBytes, so do the same here. + */ +#define METADATAZONE_MINIMUM_VOLSIZE (10ULL * 1024ULL * 1024ULL * 1024ULL) +#define HOTBAND_MINIMUM_SIZE (10*1024*1024) +#define HOTBAND_MAXIMUM_SIZE (512*1024*1024) + if (volsize >= METADATAZONE_MINIMUM_VOLSIZE) { + hotFileBandSize = (UInt64) blockCount * blockSize / 1024 * 5; + if (hotFileBandSize > HOTBAND_MAXIMUM_SIZE) + hotFileBandSize = HOTBAND_MAXIMUM_SIZE; + else if (hotFileBandSize < HOTBAND_MINIMUM_SIZE) + hotFileBandSize = HOTBAND_MINIMUM_SIZE; + nextBlock += hotFileBandSize / blockSize; + } + if (NEWFS_HFS_DEBUG && defaults->nextAllocBlock) + hp->nextAllocation = defaults->nextAllocBlock; + else + hp->nextAllocation = nextBlock; + + /* Adjust free blocks to reflect everything we have allocated. */ + hp->freeBlocks -= blocksUsed; + + /* Generate and write UUID for the HFS+ disk */ + GenerateVolumeUUID(&newVolumeUUID); + finderInfoUUIDPtr = (VolumeUUID *)(&hp->finderInfo[24]); + finderInfoUUIDPtr->v.high = OSSwapHostToBigInt32(newVolumeUUID.v.high); + finderInfoUUIDPtr->v.low = OSSwapHostToBigInt32(newVolumeUUID.v.low); +} + +/* + * AllocateExtent + * + * Mark the given extent as in-use in the given bitmap buffer. + */ +static int AllocateExtent(UInt8 *buffer, UInt32 startBlock, UInt32 blockCount) +{ + UInt8 *p; + + /* Point to start of extent in bitmap buffer */ + p = buffer + (startBlock / 8); + + /* + * Important to remember: block 0 is (1 << 7); + * block 7 is (1 << 0). + */ + /* Partial byte at start of extent */ + if (startBlock & 7) + { + UInt8 mask = 0xff; + unsigned int lShift = 0; + unsigned int startBit = startBlock & 7; + + /* + * Is startBlock + blockCount entirely in + * p[0]? + */ + if (blockCount < (8 - startBit)) { + lShift = 8 - (startBit + blockCount); + } + mask = (0xff >> startBit) & (0xff << lShift); + if (NEWFS_HFS_DEBUG && (*p & mask)) { + fprintf(stderr, "%s(%d): expected 0, got %x\n", __FUNCTION__, __LINE__, *p & mask); + return -1; + } + *(p++) |= mask; + /* + * We have either set <lShift> or <startBlock & 7> bits. + */ + blockCount -= 8 - (lShift + startBit); +// blockCount -= lShift ? blockCount : (8 - startBit); +// blockCount -= __builtin_popcount(mask); + } + + /* Fill in whole bytes */ + if (blockCount >= 8) + { + if (NEWFS_HFS_DEBUG) { + /* + * Put this in ifdef because it'll slow things down. + * For non-debug case, we shouldn't have to worry about + * an overlap, anyway. + */ + size_t indx; + for (indx = 0; indx < blockCount / 8; indx++) { + if (p[indx] != 0) { + fprintf(stderr, "%s(%d): Expected 0 at %zu, got 0x%x\n", __FUNCTION__, __LINE__, indx, p[indx]); + return -1; + } + p[indx] = 0xff; + } + } else { + memset(p, 0xFF, blockCount / 8); + } + p += blockCount / 8; + blockCount &= 7; + } + + /* Partial byte at end of extent */ + if (blockCount) + { + UInt8 mask = 0xff << (8 - blockCount); + if (NEWFS_HFS_DEBUG && (*p & mask)) { + fprintf(stderr, "%s(%d): Expected 0, got %x\n", __FUNCTION__, __LINE__, *p & mask); + return -1; + } + *(p++) |= mask; + } + return 0; +} + +/* + * Mark an extent as being used. + * This involves finding out where the allocations file is, + * where in the allocations file the extent starts, and how + * long it runs. + * + * One downside to this implementation is that this does + * more I/O than the old mechanism, a cost to the flexibility. + * May have to consider doing caching of some sort. + */ + +static int +MarkExtentUsed(const DriveInfo *driveInfo, + HFSPlusVolumeHeader *header, + UInt32 startBlock, + UInt32 blockCount) +{ + size_t bufSize = driveInfo->physSectorSize; + uint8_t buf[bufSize]; + uint32_t blocksLeft = blockCount; + uint32_t curBlock = startBlock; + static const int kBitsPerByte = 8; + + /* + * We loop through physSectorSize blocks. + * This allows us to set as many bits as we need. + */ + while (blocksLeft > 0) { + off_t secNum; + uint32_t numBlocks; // The number of blocks to mark as used in this pass. + uint32_t blockOffset; // This is the block number of the current range, which starts at curBlock + + memset(buf, 0, sizeof(buf)); + secNum = curBlock / (bufSize * kBitsPerByte); + blockOffset = curBlock % (bufSize * kBitsPerByte); + numBlocks = MIN((bufSize * kBitsPerByte) - blockOffset, blocksLeft); + + /* + * Okay, now we've got the block number to read, + * the offset into the block, and the number of blocks + * to set. + * + * First we read in the buffer. To do that, we need to + * know where to read. + */ + ssize_t nbytes; + ssize_t nwritten; + off_t offset; + + /* + * XXX + * This needs to be changed if/when we support non-contiguous multiple + * extents. At that point, it'll probably have to be a function to search + * for the requested offset. (How many times must MapFileC be written?) + * For now, though, the offset is the physical sector offset from the + * start of the allocations file. + */ + offset = (header->allocationFile.extents[0].startBlock * header->blockSize) + + (secNum * bufSize); + + nbytes = pread(driveInfo->fd, buf, bufSize, offset); + + if (nbytes < (ssize_t)bufSize) { + if (nbytes == -1) + err(1, "%s::pread(%d, %p, %zu, %lld)", __FUNCTION__, driveInfo->fd, buf, bufSize, offset); + return -1; + } + + if (AllocateExtent(buf, blockOffset, numBlocks) == -1) { + warnx("In-use allocation block in <%u, %u>", blockOffset, numBlocks); + return -1; + } + nwritten = pwrite(driveInfo->fd, buf, bufSize, offset); + /* + * Normally I'd check for nwritten to be less than bufSize, but since bufSize is + * the physical sector size, we shouldn't be able to get less. So that most likely + * means a return value of 0 or -1, neither of which I could do anything about. + */ + if (nwritten != (ssize_t)bufSize) + return -1; + + // And go get the next set, if needed + blocksLeft -= numBlocks; + curBlock += numBlocks; + } + + return 0; +} +/* + * WriteExtentsFile + * + * Initializes and writes out the extents b-tree file. + * + * Byte swapping is performed in place. The buffer should not be + * accessed through direct casting once it leaves this function. + */ +static void +WriteExtentsFile(const DriveInfo *driveInfo, UInt64 startingSector, + const hfsparams_t *dp, HFSExtentDescriptor *bbextp __unused , void *buffer, + UInt32 *bytesUsed, UInt32 *mapNodes) +{ + BTNodeDescriptor *ndp; + BTHeaderRec *bthp; + UInt8 *bmp; + UInt32 nodeBitsInHeader; + UInt32 fileSize; + UInt32 nodeSize; + UInt32 temp; + SInt16 offset; + + *mapNodes = 0; + fileSize = dp->extentsClumpSize; + nodeSize = dp->extentsNodeSize; + + bzero(buffer, nodeSize); + + + /* FILL IN THE NODE DESCRIPTOR: */ + ndp = (BTNodeDescriptor *)buffer; + ndp->kind = kBTHeaderNode; + ndp->numRecords = SWAP_BE16 (3); + offset = sizeof(BTNodeDescriptor); + + SETOFFSET(buffer, nodeSize, offset, 1); + + + /* FILL IN THE HEADER RECORD: */ + bthp = (BTHeaderRec *)((UInt8 *)buffer + offset); + if (numOverflowExtents) { + bthp->treeDepth = SWAP_BE16(1); + bthp->rootNode = SWAP_BE32(1); + bthp->firstLeafNode = SWAP_BE32(1); + bthp->lastLeafNode = SWAP_BE32(1); + bthp->leafRecords = SWAP_BE32(numOverflowExtents); + } else { + bthp->treeDepth = 0; + bthp->rootNode = 0; + bthp->firstLeafNode = 0; + bthp->lastLeafNode = 0; + bthp->leafRecords = 0; + } + + bthp->nodeSize = SWAP_BE16 (nodeSize); + bthp->totalNodes = SWAP_BE32 (fileSize / nodeSize); + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->totalNodes) - (numOverflowExtents ? 2 : 1)); /* header */ + bthp->clumpSize = SWAP_BE32 (fileSize); + + bthp->attributes |= SWAP_BE32 (kBTBigKeysMask); + bthp->maxKeyLength = SWAP_BE16 (kHFSPlusExtentKeyMaximumLength); + offset += sizeof(BTHeaderRec); + + SETOFFSET(buffer, nodeSize, offset, 2); + + offset += kBTreeHeaderUserBytes; + + SETOFFSET(buffer, nodeSize, offset, 3); + + + /* FIGURE OUT HOW MANY MAP NODES (IF ANY): */ + nodeBitsInHeader = 8 * (nodeSize + - sizeof(BTNodeDescriptor) + - sizeof(BTHeaderRec) + - kBTreeHeaderUserBytes + - (4 * sizeof(SInt16)) ); + + if (SWAP_BE32 (bthp->totalNodes) > nodeBitsInHeader) { + UInt32 nodeBitsInMapNode; + + ndp->fLink = SWAP_BE32 (SWAP_BE32 (bthp->lastLeafNode) + 1); + nodeBitsInMapNode = 8 * (nodeSize + - sizeof(BTNodeDescriptor) + - (2 * sizeof(SInt16)) + - 2 ); + *mapNodes = (SWAP_BE32 (bthp->totalNodes) - nodeBitsInHeader + + (nodeBitsInMapNode - 1)) / nodeBitsInMapNode; + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->freeNodes) - *mapNodes); + } + + + /* + * FILL IN THE MAP RECORD, MARKING NODES THAT ARE IN USE. + * Note - worst case (32MB alloc blk) will have only 18 nodes in use. + */ + bmp = ((UInt8 *)buffer + offset); + temp = SWAP_BE32 (bthp->totalNodes) - SWAP_BE32 (bthp->freeNodes); + + /* Working a byte at a time is endian safe */ + while (temp >= 8) { *bmp = 0xFF; temp -= 8; bmp++; } + *bmp = ~(0xFF >> temp); + offset += nodeBitsInHeader/8; + + SETOFFSET(buffer, nodeSize, offset, 4); + + if (NEWFS_HFS_DEBUG && numOverflowExtents) { + void *node2 = (uint8_t*)buffer + nodeSize; + size_t i; + int (^keyCompare)(const void *l, const void *r) = ^(const void *l, const void *r) { + const struct ExtentRecord *left = (const struct ExtentRecord*)l; + const struct ExtentRecord *right = (const struct ExtentRecord*)r; + if (SWAP_BE32(left->key.fileID) != SWAP_BE32(right->key.fileID)) { + return (SWAP_BE32(left->key.fileID) > SWAP_BE32(right->key.fileID)) ? 1 : -1; + } + // forkType will always be 0 for us + if (SWAP_BE32(left->key.startBlock) != SWAP_BE32(right->key.startBlock)) { + return (SWAP_BE32(left->key.startBlock) > SWAP_BE32(right->key.startBlock)) ? 1 : -1; + } + return 0; + }; + + if (numOverflowExtents > 1) { + qsort_b(overflowExtents, numOverflowExtents, sizeof(*overflowExtents), keyCompare); + } + bzero(node2, nodeSize); + ndp = (BTNodeDescriptor*)node2; + ndp->kind = kBTLeafNode; + ndp->numRecords = SWAP_BE16(numOverflowExtents); + ndp->height = 1; + + offset = sizeof(BTNodeDescriptor); + for (i = 0; i < numOverflowExtents; i++) { + SETOFFSET(node2, nodeSize, offset, 1 + i); + memcpy(node2 + offset, &overflowExtents[i], sizeof(*overflowExtents)); + offset += sizeof(*overflowExtents); + } + SETOFFSET(node2, nodeSize, offset, numOverflowExtents + 1); + } + + *bytesUsed = (SWAP_BE32 (bthp->totalNodes) - SWAP_BE32 (bthp->freeNodes) - *mapNodes) * nodeSize; + + WriteBuffer(driveInfo, startingSector, *bytesUsed, buffer); + +} + +/* + * WriteAttributesFile + * + * Initializes and writes out the attributes b-tree file. + * + * Byte swapping is performed in place. The buffer should not be + * accessed through direct casting once it leaves this function. + */ +static void +WriteAttributesFile(const DriveInfo *driveInfo, UInt64 startingSector, + const hfsparams_t *dp, HFSExtentDescriptor *bbextp __unused, void *buffer, + UInt32 *bytesUsed, UInt32 *mapNodes) +{ + BTNodeDescriptor *ndp; + BTHeaderRec *bthp; + UInt8 *bmp; + UInt32 nodeBitsInHeader; + UInt32 fileSize; + UInt32 nodeSize; + UInt32 temp; + SInt16 offset; + int set_cp_level = 0; + + *mapNodes = 0; + fileSize = dp->attributesClumpSize; + nodeSize = dp->attributesNodeSize; + +#ifdef DEBUG_BUILD + /* + * If user specified content protection and a protection level, + * then verify the protection level is sane. + */ + if ((dp->flags & kMakeContentProtect) && (dp->protectlevel != 0)) { + if ((dp->protectlevel >= 2 ) && (dp->protectlevel <= 4)) { + set_cp_level = 1; + } + } +#endif + + + bzero(buffer, nodeSize); + + + /* FILL IN THE NODE DESCRIPTOR: */ + ndp = (BTNodeDescriptor *)buffer; + ndp->kind = kBTHeaderNode; + ndp->numRecords = SWAP_BE16 (3); + offset = sizeof(BTNodeDescriptor); + + SETOFFSET(buffer, nodeSize, offset, 1); + + + /* FILL IN THE HEADER RECORD: */ + bthp = (BTHeaderRec *)((UInt8 *)buffer + offset); + if (set_cp_level) { + bthp->treeDepth = SWAP_BE16(1); + bthp->rootNode = SWAP_BE32(1); + bthp->firstLeafNode = SWAP_BE32(1); + bthp->lastLeafNode = SWAP_BE32(1); + bthp->leafRecords = SWAP_BE32(1); + } + else { + bthp->treeDepth = 0; + bthp->rootNode = 0; + bthp->firstLeafNode = 0; + bthp->lastLeafNode = 0; + bthp->leafRecords = 0; + } + + bthp->nodeSize = SWAP_BE16 (nodeSize); + bthp->totalNodes = SWAP_BE32 (fileSize / nodeSize); + if (set_cp_level) { + /* Add 1 node for the first record */ + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->totalNodes) - 2); + } + else { + /* Take the header into account */ + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->totalNodes) - 1); + } + bthp->clumpSize = SWAP_BE32 (fileSize); + + bthp->attributes |= SWAP_BE32 (kBTBigKeysMask | kBTVariableIndexKeysMask); + bthp->maxKeyLength = SWAP_BE16 (kHFSPlusAttrKeyMaximumLength); + + offset += sizeof(BTHeaderRec); + + SETOFFSET(buffer, nodeSize, offset, 2); + + offset += kBTreeHeaderUserBytes; + + SETOFFSET(buffer, nodeSize, offset, 3); + + + /* FIGURE OUT HOW MANY MAP NODES (IF ANY): */ + nodeBitsInHeader = 8 * (nodeSize + - sizeof(BTNodeDescriptor) + - sizeof(BTHeaderRec) + - kBTreeHeaderUserBytes + - (4 * sizeof(SInt16)) ); + if (SWAP_BE32 (bthp->totalNodes) > nodeBitsInHeader) { + UInt32 nodeBitsInMapNode; + + ndp->fLink = SWAP_BE32 (SWAP_BE32 (bthp->lastLeafNode) + 1); + nodeBitsInMapNode = 8 * (nodeSize + - sizeof(BTNodeDescriptor) + - (2 * sizeof(SInt16)) + - 2 ); + *mapNodes = (SWAP_BE32 (bthp->totalNodes) - nodeBitsInHeader + + (nodeBitsInMapNode - 1)) / nodeBitsInMapNode; + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->freeNodes) - *mapNodes); + } + + + /* + * FILL IN THE MAP RECORD, MARKING NODES THAT ARE IN USE. + * Note - worst case (32MB alloc blk) will have only 18 nodes in use. + */ + bmp = ((UInt8 *)buffer + offset); + temp = SWAP_BE32 (bthp->totalNodes) - SWAP_BE32 (bthp->freeNodes); + + /* Working a byte at a time is endian safe */ + while (temp >= 8) { *bmp = 0xFF; temp -= 8; bmp++; } + *bmp = ~(0xFF >> temp); + offset += nodeBitsInHeader/8; + + SETOFFSET(buffer, nodeSize, offset, 4); + +#ifdef DEBUG_BUILD + if (set_cp_level) { + /* Stuff in the EA on the root folder */ + void *node2 = (uint8_t*)buffer + nodeSize; + + struct cp_root_xattr ea; + + uint8_t canonicalName[256]; + CFStringRef cfstr; + + HFSPlusAttrData *attrData; + HFSPlusAttrKey *attrKey; + bzero(node2, nodeSize); + ndp = (BTNodeDescriptor*)node2; + + ndp->kind = kBTLeafNode; + ndp->numRecords = SWAP_BE16(1); + ndp->height = 1; + + offset = sizeof(BTNodeDescriptor); + SETOFFSET(node2, nodeSize, offset, 1); + + attrKey = (HFSPlusAttrKey*)((uint8_t*)node2 + offset); + attrKey->fileID = SWAP_BE32(1); + attrKey->startBlock = 0; + attrKey->keyLength = SWAP_BE16(sizeof(*attrKey) - sizeof(attrKey->keyLength)); + + cfstr = CFStringCreateWithCString(kCFAllocatorDefault, "com.apple.system.cprotect", kCFStringEncodingUTF8); + if (_CFStringGetFileSystemRepresentation(cfstr, canonicalName, sizeof(canonicalName)) && + ConvertUTF8toUnicode(canonicalName, + sizeof(attrKey->attrName), + attrKey->attrName, &attrKey->attrNameLen) == 0) { + attrKey->attrNameLen = SWAP_BE16(attrKey->attrNameLen); + offset += sizeof(*attrKey); + + /* If the offset is odd, move up to the next even value */ + if (offset & 1) { + offset++; + } + + attrData = (HFSPlusAttrData*)((uint8_t*)node2 + offset); + bzero(&ea, sizeof(ea)); + ea.vers = OSSwapHostToLittleInt16(dp->protectlevel); //(leave in LittleEndian) + attrData->recordType = SWAP_BE32(kHFSPlusAttrInlineData); + attrData->attrSize = SWAP_BE32(sizeof(ea)); + memcpy(attrData->attrData, &ea, sizeof(ea)); + offset += sizeof (HFSPlusAttrData) + sizeof(ea) - sizeof(attrData->attrData); + } + SETOFFSET (node2, nodeSize, offset, 2); + CFRelease(cfstr); + } +#endif + + *bytesUsed = (SWAP_BE32 (bthp->totalNodes) - SWAP_BE32 (bthp->freeNodes) - *mapNodes) * nodeSize; + WriteBuffer(driveInfo, startingSector, *bytesUsed, buffer); +} + +#if !TARGET_OS_EMBEDDED +static int +get_dev_uuid(const char *disk_name, char *dev_uuid_str, int dev_uuid_len) +{ + io_service_t service; + CFStringRef uuid_str; + int ret = EINVAL; + + if (strncmp(disk_name, _PATH_DEV, strlen(_PATH_DEV)) == 0) { + disk_name += strlen(_PATH_DEV); + } + + dev_uuid_str[0] = '\0'; + + service = IOServiceGetMatchingService(kIOMasterPortDefault, IOBSDNameMatching(kIOMasterPortDefault, 0, disk_name)); + if (service != IO_OBJECT_NULL) { + uuid_str = IORegistryEntryCreateCFProperty(service, CFSTR(kIOMediaUUIDKey), kCFAllocatorDefault, 0); + if (uuid_str) { + if (CFStringGetFileSystemRepresentation(uuid_str, dev_uuid_str, dev_uuid_len) != 0) { + ret = 0; + } + CFRelease(uuid_str); + } + IOObjectRelease(service); + } + + return ret; +} + +static int +clear_journal_dev(const char *dev_name) +{ + int fd; + + fd = open(dev_name, O_RDWR); + if (fd < 0) { + printf("Failed to open the journal device %s (%s)\n", dev_name, strerror(errno)); + return -1; + } + + dowipefs(fd); + + close(fd); + return 0; +} +#endif /* !TARGET_OS_EMBEDDED */ + + +static int +WriteJournalInfo(const DriveInfo *driveInfo, UInt64 startingSector, + const hfsparams_t *dp, HFSPlusVolumeHeader *header, + void *buffer) +{ + JournalInfoBlock *jibp = buffer; + UInt32 journalBlock; + + memset(buffer, 0xdb, driveInfo->physSectorSize); + memset(jibp, 0, sizeof(JournalInfoBlock)); + +#if !TARGET_OS_EMBEDDED + if (dp->journalDevice) { + char uuid_str[64]; + + if (get_dev_uuid(dp->journalDevice, uuid_str, sizeof(uuid_str)) == 0) { + strlcpy((char *)&jibp->reserved[0], uuid_str, sizeof(jibp->reserved)); + + // we also need to blast out some zeros to the journal device + // in case it had a file system on it previously. that way + // it's "initialized" in the sense that the previous contents + // won't get mounted accidently. if this fails we'll bail out. + if (clear_journal_dev(dp->journalDevice) != 0) { + return -1; + } + } else { + printf("FAILED to get the device uuid for device %s\n", dp->journalDevice); + strlcpy((char *)&jibp->reserved[0], "NO-DEV-UUID", sizeof(jibp->reserved)); + return -1; + } + } else { +#endif + jibp->flags = kJIJournalInFSMask; +#if !TARGET_OS_EMBEDDED + } +#endif + jibp->flags |= kJIJournalNeedInitMask; + if (NEWFS_HFS_DEBUG && dp->journalBlock) + journalBlock = dp->journalBlock; + else + journalBlock = header->journalInfoBlock + 1; + jibp->offset = ((UInt64) journalBlock) * header->blockSize; + jibp->size = dp->journalSize; + + jibp->flags = SWAP_BE32(jibp->flags); + jibp->offset = SWAP_BE64(jibp->offset); + jibp->size = SWAP_BE64(jibp->size); + + WriteBuffer(driveInfo, startingSector, driveInfo->physSectorSize, buffer); + + jibp->flags = SWAP_BE32(jibp->flags); + jibp->offset = SWAP_BE64(jibp->offset); + jibp->size = SWAP_BE64(jibp->size); + + return 0; +} + + +/* + * WriteCatalogFile + * + * This routine initializes a Catalog B-Tree. + * + * Note: Since large volumes can have bigger b-trees they + * might need to have map nodes setup. + */ +static void +WriteCatalogFile(const DriveInfo *driveInfo, UInt64 startingSector, + const hfsparams_t *dp, HFSPlusVolumeHeader *header, void *buffer, + UInt32 *bytesUsed, UInt32 *mapNodes) +{ + BTNodeDescriptor *ndp; + BTHeaderRec *bthp; + UInt8 *bmp; + UInt32 nodeBitsInHeader; + UInt32 fileSize; + UInt32 nodeSize; + UInt32 temp; + SInt16 offset; + + *mapNodes = 0; + fileSize = dp->catalogClumpSize; + nodeSize = dp->catalogNodeSize; + + bzero(buffer, nodeSize); + + + /* FILL IN THE NODE DESCRIPTOR: */ + ndp = (BTNodeDescriptor *)buffer; + ndp->kind = kBTHeaderNode; + ndp->numRecords = SWAP_BE16 (3); + offset = sizeof(BTNodeDescriptor); + + SETOFFSET(buffer, nodeSize, offset, 1); + + + /* FILL IN THE HEADER RECORD: */ + bthp = (BTHeaderRec *)((UInt8 *)buffer + offset); + bthp->treeDepth = SWAP_BE16 (1); + bthp->rootNode = SWAP_BE32 (1); + bthp->firstLeafNode = SWAP_BE32 (1); + bthp->lastLeafNode = SWAP_BE32 (1); + bthp->leafRecords = SWAP_BE32 (dp->journaledHFS ? 6 : 2); + bthp->nodeSize = SWAP_BE16 (nodeSize); + bthp->totalNodes = SWAP_BE32 (fileSize / nodeSize); + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->totalNodes) - 2); /* header and root */ + bthp->clumpSize = SWAP_BE32 (fileSize); + + + bthp->attributes |= SWAP_BE32 (kBTVariableIndexKeysMask + kBTBigKeysMask); + bthp->maxKeyLength = SWAP_BE16 (kHFSPlusCatalogKeyMaximumLength); + if (dp->flags & kMakeCaseSensitive) + bthp->keyCompareType = kHFSBinaryCompare; + else + bthp->keyCompareType = kHFSCaseFolding; + + offset += sizeof(BTHeaderRec); + + SETOFFSET(buffer, nodeSize, offset, 2); + + offset += kBTreeHeaderUserBytes; + + SETOFFSET(buffer, nodeSize, offset, 3); + + /* FIGURE OUT HOW MANY MAP NODES (IF ANY): */ + nodeBitsInHeader = 8 * (nodeSize + - sizeof(BTNodeDescriptor) + - sizeof(BTHeaderRec) + - kBTreeHeaderUserBytes + - (4 * sizeof(SInt16)) ); + + if (SWAP_BE32 (bthp->totalNodes) > nodeBitsInHeader) { + UInt32 nodeBitsInMapNode; + + ndp->fLink = SWAP_BE32 (SWAP_BE32 (bthp->lastLeafNode) + 1); + nodeBitsInMapNode = 8 * (nodeSize + - sizeof(BTNodeDescriptor) + - (2 * sizeof(SInt16)) + - 2 ); + *mapNodes = (SWAP_BE32 (bthp->totalNodes) - nodeBitsInHeader + + (nodeBitsInMapNode - 1)) / nodeBitsInMapNode; + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->freeNodes) - *mapNodes); + } + + /* + * FILL IN THE MAP RECORD, MARKING NODES THAT ARE IN USE. + * Note - worst case (32MB alloc blk) will have only 18 nodes in use. + */ + bmp = ((UInt8 *)buffer + offset); + temp = SWAP_BE32 (bthp->totalNodes) - SWAP_BE32 (bthp->freeNodes); + + /* Working a byte at a time is endian safe */ + while (temp >= 8) { *bmp = 0xFF; temp -= 8; bmp++; } + *bmp = ~(0xFF >> temp); + offset += nodeBitsInHeader/8; + + SETOFFSET(buffer, nodeSize, offset, 4); + + InitCatalogRoot_HFSPlus(dp, header, buffer + nodeSize); + + *bytesUsed = (SWAP_BE32 (bthp->totalNodes) - SWAP_BE32 (bthp->freeNodes) - *mapNodes) * nodeSize; + + WriteBuffer(driveInfo, startingSector, *bytesUsed, buffer); +} + + +static void +InitCatalogRoot_HFSPlus(const hfsparams_t *dp, const HFSPlusVolumeHeader *header, void * buffer) +{ + BTNodeDescriptor *ndp; + HFSPlusCatalogKey *ckp; + HFSPlusCatalogKey *tkp; + HFSPlusCatalogFolder *cdp; + HFSPlusCatalogFile *cfp; + HFSPlusCatalogThread *ctp; + UInt16 nodeSize; + SInt16 offset; + size_t unicodeBytes; + UInt8 canonicalName[256]; + CFStringRef cfstr; + Boolean cfOK; + int index = 0; + + nodeSize = dp->catalogNodeSize; + bzero(buffer, nodeSize); + + /* + * All nodes have a node descriptor... + */ + ndp = (BTNodeDescriptor *)buffer; + ndp->kind = kBTLeafNode; + ndp->height = 1; + ndp->numRecords = SWAP_BE16 (dp->journaledHFS ? 6 : 2); + offset = sizeof(BTNodeDescriptor); + SETOFFSET(buffer, nodeSize, offset, ++index); + + /* + * First record is always the root directory... + */ + ckp = (HFSPlusCatalogKey *)((UInt8 *)buffer + offset); + + /* Use CFString functions to get a HFSPlus Canonical name */ + cfstr = CFStringCreateWithCString(kCFAllocatorDefault, (char *)dp->volumeName, kCFStringEncodingUTF8); + cfOK = _CFStringGetFileSystemRepresentation(cfstr, canonicalName, sizeof(canonicalName)); + + if (!cfOK || ConvertUTF8toUnicode(canonicalName, sizeof(ckp->nodeName.unicode), + ckp->nodeName.unicode, &ckp->nodeName.length)) { + + /* On conversion errors "untitled" is used as a fallback. */ + (void) ConvertUTF8toUnicode((UInt8 *)kDefaultVolumeNameStr, + sizeof(ckp->nodeName.unicode), + ckp->nodeName.unicode, + &ckp->nodeName.length); + warnx("invalid HFS+ name: \"%s\", using \"%s\" instead", + dp->volumeName, kDefaultVolumeNameStr); + } + CFRelease(cfstr); + ckp->nodeName.length = SWAP_BE16 (ckp->nodeName.length); + + unicodeBytes = sizeof(UniChar) * SWAP_BE16 (ckp->nodeName.length); + + ckp->keyLength = SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength + unicodeBytes); + ckp->parentID = SWAP_BE32 (kHFSRootParentID); + offset += SWAP_BE16 (ckp->keyLength) + 2; + + cdp = (HFSPlusCatalogFolder *)((UInt8 *)buffer + offset); + cdp->recordType = SWAP_BE16 (kHFSPlusFolderRecord); + /* folder count is only supported on HFSX volumes */ + if (dp->flags & kMakeCaseSensitive) { + cdp->flags = SWAP_BE16 (kHFSHasFolderCountMask); + } + cdp->valence = SWAP_BE32 (dp->journaledHFS ? 2 : 0); + cdp->folderID = SWAP_BE32 (kHFSRootFolderID); + cdp->createDate = SWAP_BE32 (dp->createDate); + cdp->contentModDate = SWAP_BE32 (dp->createDate); + cdp->textEncoding = SWAP_BE32 (dp->encodingHint); + if (dp->flags & kUseAccessPerms) { + cdp->bsdInfo.ownerID = SWAP_BE32 (dp->owner); + cdp->bsdInfo.groupID = SWAP_BE32 (dp->group); + cdp->bsdInfo.fileMode = SWAP_BE16 (dp->mask | S_IFDIR); + } + offset += sizeof(HFSPlusCatalogFolder); + SETOFFSET(buffer, nodeSize, offset, ++index); + + /* + * Second record is always the root directory thread... + */ + tkp = (HFSPlusCatalogKey *)((UInt8 *)buffer + offset); + tkp->keyLength = SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength); + tkp->parentID = SWAP_BE32 (kHFSRootFolderID); + // tkp->nodeName.length = 0; + + offset += SWAP_BE16 (tkp->keyLength) + 2; + + ctp = (HFSPlusCatalogThread *)((UInt8 *)buffer + offset); + ctp->recordType = SWAP_BE16 (kHFSPlusFolderThreadRecord); + ctp->parentID = SWAP_BE32 (kHFSRootParentID); + bcopy(&ckp->nodeName, &ctp->nodeName, sizeof(UInt16) + unicodeBytes); + offset += (sizeof(HFSPlusCatalogThread) + - (sizeof(ctp->nodeName.unicode) - unicodeBytes) ); + + SETOFFSET(buffer, nodeSize, offset, ++index); + + /* + * Add records for ".journal" and ".journal_info_block" files: + */ + if (dp->journaledHFS) { + struct HFSUniStr255 *nodename1, *nodename2; + size_t uBytes1, uBytes2; + UInt32 journalBlock; + + /* File record #1 */ + ckp = (HFSPlusCatalogKey *)((UInt8 *)buffer + offset); + (void) ConvertUTF8toUnicode((UInt8 *)HFS_JOURNAL_FILE, sizeof(ckp->nodeName.unicode), + ckp->nodeName.unicode, &ckp->nodeName.length); + ckp->nodeName.length = SWAP_BE16 (ckp->nodeName.length); + uBytes1 = sizeof(UniChar) * SWAP_BE16 (ckp->nodeName.length); + ckp->keyLength = SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength + uBytes1); + ckp->parentID = SWAP_BE32 (kHFSRootFolderID); + offset += SWAP_BE16 (ckp->keyLength) + 2; + + cfp = (HFSPlusCatalogFile *)((UInt8 *)buffer + offset); + cfp->recordType = SWAP_BE16 (kHFSPlusFileRecord); + cfp->flags = SWAP_BE16 (kHFSThreadExistsMask); + cfp->fileID = SWAP_BE32 (dp->nextFreeFileID); + cfp->createDate = SWAP_BE32 (dp->createDate + 1); + cfp->contentModDate = SWAP_BE32 (dp->createDate + 1); + cfp->textEncoding = 0; + + cfp->bsdInfo.fileMode = SWAP_BE16 (S_IFREG); + cfp->bsdInfo.ownerFlags = (uint8_t) SWAP_BE16 (((uint16_t)UF_NODUMP)); + cfp->bsdInfo.special.linkCount = SWAP_BE32(1); + cfp->userInfo.fdType = SWAP_BE32 (kJournalFileType); + cfp->userInfo.fdCreator = SWAP_BE32 (kHFSPlusCreator); + cfp->userInfo.fdFlags = SWAP_BE16 (kIsInvisible + kNameLocked); + cfp->dataFork.logicalSize = SWAP_BE64 (dp->journalSize); + cfp->dataFork.totalBlocks = SWAP_BE32 ((dp->journalSize+dp->blockSize-1) / dp->blockSize); + + if (NEWFS_HFS_DEBUG && dp->journalBlock) + journalBlock = dp->journalBlock; + else + journalBlock = header->journalInfoBlock + 1; + cfp->dataFork.extents[0].startBlock = SWAP_BE32 (journalBlock); + cfp->dataFork.extents[0].blockCount = cfp->dataFork.totalBlocks; + + offset += sizeof(HFSPlusCatalogFile); + SETOFFSET(buffer, nodeSize, offset, ++index); + nodename1 = &ckp->nodeName; + + /* File record #2 */ + ckp = (HFSPlusCatalogKey *)((UInt8 *)buffer + offset); + (void) ConvertUTF8toUnicode((UInt8 *)HFS_JOURNAL_INFO, sizeof(ckp->nodeName.unicode), + ckp->nodeName.unicode, &ckp->nodeName.length); + ckp->nodeName.length = SWAP_BE16 (ckp->nodeName.length); + uBytes2 = sizeof(UniChar) * SWAP_BE16 (ckp->nodeName.length); + ckp->keyLength = SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength + uBytes2); + ckp->parentID = SWAP_BE32 (kHFSRootFolderID); + offset += SWAP_BE16 (ckp->keyLength) + 2; + + cfp = (HFSPlusCatalogFile *)((UInt8 *)buffer + offset); + cfp->recordType = SWAP_BE16 (kHFSPlusFileRecord); + cfp->flags = SWAP_BE16 (kHFSThreadExistsMask); + cfp->fileID = SWAP_BE32 (dp->nextFreeFileID + 1); + cfp->createDate = SWAP_BE32 (dp->createDate); + cfp->contentModDate = SWAP_BE32 (dp->createDate); + cfp->textEncoding = 0; + + cfp->bsdInfo.fileMode = SWAP_BE16 (S_IFREG); + cfp->bsdInfo.ownerFlags = (uint8_t) SWAP_BE16 (((uint16_t)UF_NODUMP)); + cfp->bsdInfo.special.linkCount = SWAP_BE32(1); + cfp->userInfo.fdType = SWAP_BE32 (kJournalFileType); + cfp->userInfo.fdCreator = SWAP_BE32 (kHFSPlusCreator); + cfp->userInfo.fdFlags = SWAP_BE16 (kIsInvisible + kNameLocked); + cfp->dataFork.logicalSize = SWAP_BE64(dp->blockSize);; + cfp->dataFork.totalBlocks = SWAP_BE32(1); + + cfp->dataFork.extents[0].startBlock = SWAP_BE32 (header->journalInfoBlock); + cfp->dataFork.extents[0].blockCount = cfp->dataFork.totalBlocks; + + offset += sizeof(HFSPlusCatalogFile); + SETOFFSET(buffer, nodeSize, offset, ++index); + nodename2 = &ckp->nodeName; + + /* Thread record for file #1 */ + tkp = (HFSPlusCatalogKey *)((UInt8 *)buffer + offset); + tkp->keyLength = SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength); + tkp->parentID = SWAP_BE32 (dp->nextFreeFileID); + tkp->nodeName.length = 0; + offset += SWAP_BE16 (tkp->keyLength) + 2; + + ctp = (HFSPlusCatalogThread *)((UInt8 *)buffer + offset); + ctp->recordType = SWAP_BE16 (kHFSPlusFileThreadRecord); + ctp->parentID = SWAP_BE32 (kHFSRootFolderID); + bcopy(nodename1, &ctp->nodeName, sizeof(UInt16) + uBytes1); + offset += (sizeof(HFSPlusCatalogThread) + - (sizeof(ctp->nodeName.unicode) - uBytes1) ); + SETOFFSET(buffer, nodeSize, offset, ++index); + + /* Thread record for file #2 */ + tkp = (HFSPlusCatalogKey *)((UInt8 *)buffer + offset); + tkp->keyLength = SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength); + tkp->parentID = SWAP_BE32 (dp->nextFreeFileID + 1); + tkp->nodeName.length = 0; + offset += SWAP_BE16 (tkp->keyLength) + 2; + + ctp = (HFSPlusCatalogThread *)((UInt8 *)buffer + offset); + ctp->recordType = SWAP_BE16 (kHFSPlusFileThreadRecord); + ctp->parentID = SWAP_BE32 (kHFSRootFolderID); + bcopy(nodename2, &ctp->nodeName, sizeof(UInt16) + uBytes2); + offset += (sizeof(HFSPlusCatalogThread) + - (sizeof(ctp->nodeName.unicode) - uBytes2) ); + SETOFFSET(buffer, nodeSize, offset, ++index); + } +} + +/* + * WriteMapNodes + * + * Initializes a B-tree map node and writes it out to disk. + */ +static void +WriteMapNodes(const DriveInfo *driveInfo, UInt64 diskStart, UInt32 firstMapNode, + UInt32 mapNodes, UInt16 btNodeSize, void *buffer) +{ + UInt32 sectorsPerNode; + UInt32 mapRecordBytes; + UInt16 i; + BTNodeDescriptor *nd = (BTNodeDescriptor *)buffer; + + bzero(buffer, btNodeSize); + + nd->kind = kBTMapNode; + nd->numRecords = SWAP_BE16 (1); + + /* note: must belong word aligned (hence the extra -2) */ + mapRecordBytes = btNodeSize - sizeof(BTNodeDescriptor) - 2*sizeof(SInt16) - 2; + + SETOFFSET(buffer, btNodeSize, sizeof(BTNodeDescriptor), 1); + SETOFFSET(buffer, btNodeSize, sizeof(BTNodeDescriptor) + mapRecordBytes, 2); + + sectorsPerNode = btNodeSize/kBytesPerSector; + + /* + * Note - worst case (32MB alloc blk) will have + * only 18 map nodes. So don't bother optimizing + * this section to do multiblock writes! + */ + for (i = 0; i < mapNodes; i++) { + if ((i + 1) < mapNodes) + nd->fLink = SWAP_BE32 (++firstMapNode); /* point to next map node */ + else + nd->fLink = 0; /* this is the last map node */ + + WriteBuffer(driveInfo, diskStart, btNodeSize, buffer); + + diskStart += sectorsPerNode; + } +} + +/* + * @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + * NOTE: IF buffer IS NULL, THIS FUNCTION WILL WRITE ZERO'S. + * + * startingSector is in terms of 512-byte sectors. + * @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + */ +static void +WriteBuffer(const DriveInfo *driveInfo, UInt64 startingSector, UInt64 byteCount, + const void *buffer) +{ + off_t sector; + off_t physSector = 0; + off_t byteOffsetInPhysSector; + UInt32 numBytesToIO; + UInt32 numPhysSectorsToIO; + UInt32 tempbufSizeInPhysSectors; + UInt32 tempbufSize; + UInt32 fd = driveInfo->fd; + UInt32 physSectorSize = driveInfo->physSectorSize; + void *tempbuf = NULL; + int sectorSizeRatio = driveInfo->physSectorSize / kBytesPerSector; + int status = 0; /* 0: no error; 1: alloc; 2: read; 3: write */ + + if (0 == byteCount) { + goto exit; + } + + /*@@@@@@@@@@ buffer allocation @@@@@@@@@@*/ + /* try a buffer size for optimal IO, __UP TO 4MB__. if that + fails, then try with the minimum allowed buffer size, which + is equal to physSectorSize */ + tempbufSizeInPhysSectors = MIN ( (byteCount - 1 + physSectorSize) / physSectorSize, + driveInfo->physSectorsPerIO ); + /* limit at 4MB */ + tempbufSizeInPhysSectors = MIN ( tempbufSizeInPhysSectors, (4 * 1024 * 1024) / physSectorSize ); + tempbufSize = tempbufSizeInPhysSectors * physSectorSize; + + if ((tempbuf = valloc(tempbufSize)) == NULL) { + /* try allocation of smallest allowed size: one + physical sector. + NOTE: the previous valloc tempbufSize might have + already been one physical sector. we don't want to + check if that was the case, so just try again. + */ + tempbufSizeInPhysSectors = 1; + tempbufSize = physSectorSize; + if ((tempbuf = valloc(tempbufSize)) == NULL) { + status = 1; + goto exit; + } + } + + /*@@@@@@@@@@ io @@@@@@@@@@*/ + sector = driveInfo->sectorOffset + startingSector; + physSector = sector / sectorSizeRatio; + byteOffsetInPhysSector = (sector % sectorSizeRatio) * kBytesPerSector; + + while (byteCount > 0) { + numPhysSectorsToIO = MIN ( (byteCount - 1 + physSectorSize) / physSectorSize, + tempbufSizeInPhysSectors ); + numBytesToIO = MIN(byteCount, (unsigned)((numPhysSectorsToIO * physSectorSize) - byteOffsetInPhysSector)); + + /* if IO does not align with physical sector boundaries */ + if ((0 != byteOffsetInPhysSector) || ((numBytesToIO % physSectorSize) != 0)) { + if (pread(fd, tempbuf, numPhysSectorsToIO * physSectorSize, physSector * physSectorSize) < 0) { + status = 2; + goto exit; + } + } + + if (NULL != buffer) { + memcpy(tempbuf + byteOffsetInPhysSector, buffer, numBytesToIO); + } + else { + bzero(tempbuf + byteOffsetInPhysSector, numBytesToIO); + } + + if (pwrite(fd, tempbuf, numPhysSectorsToIO * physSectorSize, physSector * physSectorSize) < 0) { + warn("%s: pwrite(%d, %p, %zu, %lld)", __FUNCTION__, fd, tempbuf, (size_t)(numPhysSectorsToIO * physSectorSize), (long long)(physSector * physSectorSize)); + status = 3; + goto exit; + } + + byteOffsetInPhysSector = 0; + byteCount -= numBytesToIO; + physSector += numPhysSectorsToIO; + if (NULL != buffer) { + buffer += numBytesToIO; + } + } + +exit: + if (tempbuf) { + free(tempbuf); + tempbuf = NULL; + } + + if (1 == status) { + err(1, NULL); + } + else if (2 == status) { + err(1, "read (sector %llu)", physSector); + } + else if (3 == status) { + err(1, "write (sector %llu)", physSector); + } + + return; +} + + +static UInt32 Largest( UInt32 a, UInt32 b, UInt32 c, UInt32 d ) +{ + /* a := max(a,b) */ + if (a < b) + a = b; + /* c := max(c,d) */ + if (c < d) + c = d; + + /* return max(a,c) */ + if (a > c) + return a; + else + return c; +} + +/* + * UTCToLocal - convert from Mac OS GMT time to Mac OS local time + */ +static UInt32 UTCToLocal(UInt32 utcTime) +{ + UInt32 localTime = utcTime; + struct timezone timeZone; + struct timeval timeVal; + + if (localTime != 0) { + + /* HFS volumes need timezone info to convert local to GMT */ + (void)gettimeofday( &timeVal, &timeZone ); + + + localTime -= (timeZone.tz_minuteswest * 60); + if (timeZone.tz_dsttime) + localTime += 3600; + } + + return (localTime); +} + +#define __kCFUserEncodingFileName ("/.CFUserTextEncoding") + +static UInt32 +GetDefaultEncoding() +{ + struct passwd *passwdp; + + if ((passwdp = getpwuid(0))) { // root account + char buffer[MAXPATHLEN + 1]; + int fd; + + strlcpy(buffer, passwdp->pw_dir, sizeof(buffer)); + strlcat(buffer, __kCFUserEncodingFileName, sizeof(buffer)); + + if ((fd = open(buffer, O_RDONLY, 0)) > 0) { + ssize_t readSize; + + readSize = read(fd, buffer, MAXPATHLEN); + buffer[(readSize < 0 ? 0 : readSize)] = '\0'; + close(fd); + return strtol(buffer, NULL, 0); + } + } + return 0; +} + + +static int +ConvertUTF8toUnicode(const UInt8* source, size_t bufsize, UniChar* unibuf, + UInt16 *charcount) +{ + UInt8 byte; + UniChar* target; + UniChar* targetEnd; + + *charcount = 0; + target = unibuf; + targetEnd = (UniChar *)((UInt8 *)unibuf + bufsize); + + while ((byte = *source++)) { + + /* check for single-byte ascii */ + if (byte < 128) { + if (byte == ':') /* ':' is mapped to '/' */ + byte = '/'; + + *target++ = SWAP_BE16 (byte); + } else { + UniChar ch; + UInt8 seq = (byte >> 4); + + switch (seq) { + case 0xc: /* double-byte sequence (1100 and 1101) */ + case 0xd: + ch = (byte & 0x1F) << 6; /* get 5 bits */ + if (((byte = *source++) >> 6) != 2) + return (EINVAL); + break; + + case 0xe: /* triple-byte sequence (1110) */ + ch = (byte & 0x0F) << 6; /* get 4 bits */ + if (((byte = *source++) >> 6) != 2) + return (EINVAL); + ch += (byte & 0x3F); ch <<= 6; /* get 6 bits */ + if (((byte = *source++) >> 6) != 2) + return (EINVAL); + break; + + default: + return (EINVAL); /* malformed sequence */ + } + + ch += (byte & 0x3F); /* get last 6 bits */ + + if (target >= targetEnd) + return (ENOBUFS); + + *target++ = SWAP_BE16 (ch); + } + } + + *charcount = target - unibuf; + + return (0); +} + +/* + * Derive the encoding hint for the given name. + */ +static int +getencodinghint(unsigned char *name) +{ + int mib[3]; + size_t buflen = sizeof(int); + struct vfsconf vfc; + int hint = 0; + + if (getvfsbyname("hfs", &vfc) < 0) + goto error; + + mib[0] = CTL_VFS; + mib[1] = vfc.vfc_typenum; + mib[2] = HFS_ENCODINGHINT; + + if (sysctl(mib, 3, &hint, &buflen, name, strlen((char *)name) + 1) < 0) + goto error; + return (hint); +error: + hint = GetDefaultEncoding(); + return (hint); +} + + +/* Generate Volume UUID - similar to code existing in hfs_util */ +void GenerateVolumeUUID(VolumeUUID *newVolumeID) { + SHA_CTX context; + char randomInputBuffer[26]; + unsigned char digest[20]; + time_t now; + clock_t uptime; + int mib[2]; + int sysdata; + char sysctlstring[128]; + size_t datalen; + double sysloadavg[3]; + struct vmtotal sysvmtotal; + + do { + /* Initialize the SHA-1 context for processing: */ + SHA1_Init(&context); + + /* Now process successive bits of "random" input to seed the process: */ + + /* The current system's uptime: */ + uptime = clock(); + SHA1_Update(&context, &uptime, sizeof(uptime)); + + /* The kernel's boot time: */ + mib[0] = CTL_KERN; + mib[1] = KERN_BOOTTIME; + datalen = sizeof(sysdata); + sysctl(mib, 2, &sysdata, &datalen, NULL, 0); + SHA1_Update(&context, &sysdata, datalen); + + /* The system's host id: */ + mib[0] = CTL_KERN; + mib[1] = KERN_HOSTID; + datalen = sizeof(sysdata); + sysctl(mib, 2, &sysdata, &datalen, NULL, 0); + SHA1_Update(&context, &sysdata, datalen); + + /* The system's host name: */ + mib[0] = CTL_KERN; + mib[1] = KERN_HOSTNAME; + datalen = sizeof(sysctlstring); + sysctl(mib, 2, sysctlstring, &datalen, NULL, 0); + SHA1_Update(&context, sysctlstring, datalen); + + /* The running kernel's OS release string: */ + mib[0] = CTL_KERN; + mib[1] = KERN_OSRELEASE; + datalen = sizeof(sysctlstring); + sysctl(mib, 2, sysctlstring, &datalen, NULL, 0); + SHA1_Update(&context, sysctlstring, datalen); + + /* The running kernel's version string: */ + mib[0] = CTL_KERN; + mib[1] = KERN_VERSION; + datalen = sizeof(sysctlstring); + sysctl(mib, 2, sysctlstring, &datalen, NULL, 0); + SHA1_Update(&context, sysctlstring, datalen); + + /* The system's load average: */ + datalen = sizeof(sysloadavg); + getloadavg(sysloadavg, 3); + SHA1_Update(&context, &sysloadavg, datalen); + + /* The system's VM statistics: */ + mib[0] = CTL_VM; + mib[1] = VM_METER; + datalen = sizeof(sysvmtotal); + sysctl(mib, 2, &sysvmtotal, &datalen, NULL, 0); + SHA1_Update(&context, &sysvmtotal, datalen); + + /* The current GMT (26 ASCII characters): */ + time(&now); + strncpy(randomInputBuffer, asctime(gmtime(&now)), 26); /* "Mon Mar 27 13:46:26 2000" */ + SHA1_Update(&context, randomInputBuffer, 26); + + /* Pad the accumulated input and extract the final digest hash: */ + SHA1_Final(digest, &context); + + memcpy(newVolumeID, digest, sizeof(*newVolumeID)); + } while ((newVolumeID->v.high == 0) || (newVolumeID->v.low == 0)); +} + + diff --git a/newfs_hfs/newfs_hfs.8 b/newfs_hfs/newfs_hfs.8 new file mode 100644 index 0000000..8d4ec83 --- /dev/null +++ b/newfs_hfs/newfs_hfs.8 @@ -0,0 +1,177 @@ +.\" Copyright (c) 2002-2003 Apple Computer, Inc. All rights reserved. +.\" +.\" The contents of this file constitute Original Code as defined in and +.\" are subject to the Apple Public Source License Version 1.1 (the +.\" "License"). You may not use this file except in compliance with the +.\" License. Please obtain a copy of the License at +.\" http://www.apple.com/publicsource and read it before using this file. +.\" +.\" This Original Code and all software distributed under the License are +.\" distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER +.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +.\" FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the +.\" License for the specific language governing rights and limitations +.\" under the License. +.\" +.\" @(#)newfs_hfs.8 +.Dd June 19, 2008 +.Dt NEWFS_HFS 8 +.Os "Mac OS X" +.Sh NAME +.Nm newfs_hfs +.Nd construct a new HFS Plus file system +.Sh SYNOPSIS +.Nm newfs_hfs +.Op Fl N +.Op Fl U Ar uid +.Op Fl G Ar gid +.Op Fl M Ar mask +.Op Fl P +.Op Fl s +.Op Fl b Ar block-size +.Op Fl c Ar clump-size-list +.Op Fl i Ar first-cnid +.Op Fl J Ar [journal-size] +.Op Fl n Ar node-size-list +.Op Fl v Ar volume-name +.Ar special +.Nm newfs_hfs +.Fl N Ar partition-size +.Op Fl U Ar uid +.Op Fl G Ar gid +.Op Fl M Ar mask +.Op Fl P +.Op Fl h | w +.Op Fl s +.Op Fl b Ar block-size +.Op Fl c Ar clump-size-list +.Op Fl i Ar first-cnid +.Op Fl J Ar [journal-size] +.Op Fl n Ar node-size-list +.Op Fl v Ar volume-name +.Sh DESCRIPTION +.Nm Newfs_hfs +builds an HFS Plus file system on the specified special device. +Before running +.Nm newfs_hfs +the disk should be partitioned using the +.Em Disk Utility +application or +.Xr pdisk 8 . +.Pp +The file system default parameters are calculated based on +the size of the disk partition. Typically the defaults are +reasonable, however +.Nm newfs_hfs +has several options to allow the defaults to be selectively overridden. +The options are as follows: +.Bl -tag -width Fl +.It Fl N Ar [partition-size] +Causes the file system parameters to be printed out +without really creating the file system. +If the argument following the +.Fl N +starts with a decimal digit, it is assumed to be a partition size. +The paritition size may be given in decimal, octal (with leading `0'), +or hexadecimal (with leading `0x'). +The partition size argument can be specified in 512-byte blocks (with a `b' suffix), +petabytes (`p' suffix), terabytes (`t' suffix), gigabytes +(`g' suffix), megabytes (`m' suffix), or kilobytes +(`k' suffix). All suffixes indicate binary, not decimal, +multipliers (e.g., `1k' is 1024 bytes). If no suffix is +specified, the value is assumed to be in bytes; if an illegal +suffix is specified, it results in a size of 0 bytes. +.Pp +If the partition size is given, then no special device argument shall be provided. +If no partition size is given, then the size of the given special device +is used instead, and the special device will not be written to. +.It Fl U Ar uid +Set the owner of the file system's root directory to +.Pa uid . +.It Fl G Ar gid +Set the group of the file system's root directory to +.Pa gid . +.It Fl M Ar mask +Specify the octal access permissions mask for the file system's root +directory. +.It Fl P +Set kHFSContentProtectionBit in the volume's attributes, which will cause the +volume to be mounted with the "protect" option if the kernel supports it. +.It Fl s +Creates a case-sensitive HFS Plus filesystem. By +default a case-insensitive filesystem is created. +Case-sensitive HFS Plus file systems require a Mac OS X +version of 10.3 (Darwin 7.0) or later. +.It Fl b Ar block-size +The allocation block size of the file system. +The default value is 4096. +.It Fl c Ar clump-size-list +This specifies the +.Em clump +and/or +.Em initial +sizes, in allocation +blocks, for the various metadata files. +.Em Clump +sizes are specified with the +.Fl c +option followed by a comma +separated list of the form arg=blocks. +.Pp +Example: -c c=5000,e=500 +.Bl -tag -width Fl +.It Em a=blocks +Set the attribute file clump size. +.It Em b=blocks +Set the allocation bitmap file clump size. +.It Em c=blocks +Set the catalog file clump size. +.It Em d=blocks +Set the data fork clump size. +.It Em e=blocks +Set the extent overflow file clump size. +.It Em r=blocks +Set the resource fork clump size. +.El +.It Fl i Ar first-cnid +This specifies the initial catalog node ID for user files +and directories. The default value is 16. +.It Fl J Ar [journal-size] +Creates a journaled HFS+ volume. +The default journal size varies, based on the size of the volume. Appending an 'M' to the +journal size implies megabytes (i.e. 64M is 64 megabytes). +The maximum journal size is 1024 megabytes. +.It Fl n Ar node-size-list +This specifies the b-tree +.Em node +sizes, in bytes, +for the various b-tree files. +.Em Node +sizes are specified with the +.Fl n +option followed by a comma separated list of +the form arg=bytes. The +.Em node +size must be a power of two and no larger than +32768 bytes. +.Pp +Example: -n c=8192,e=4096 +.Bl -tag -width Fl +.It Em a=bytes +Set the attribute b-tree node size. +.It Em c=bytes +Set the catalog b-tree node size. +.It Em e=bytes +Set the extent overflow b-tree node size. +.El +.It Fl v Ar volume-name +Volume name (file system name) in ascii or UTF-8 format. +.El +.Sh SEE ALSO +.Xr mount 8 , +.Xr pdisk 8 +.Sh HISTORY +The +.Nm +command appeared in Mac OS X Server 1.0 . As of Mac OS X 10.6, this utility no longer generates HFS standard file systems. diff --git a/newfs_hfs/newfs_hfs.c b/newfs_hfs/newfs_hfs.c new file mode 100644 index 0000000..682ed8d --- /dev/null +++ b/newfs_hfs/newfs_hfs.c @@ -0,0 +1,1458 @@ +/* + * Copyright (c) 1999-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +#include <err.h> +#include <errno.h> +#include <fcntl.h> +#include <grp.h> +#include <paths.h> +#include <pwd.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> +#include <syslog.h> +#include <unistd.h> + +#include <sys/ioctl.h> +#include <sys/mount.h> +#include <sys/param.h> +#include <sys/stat.h> + +#include <IOKit/storage/IOMediaBSDClient.h> + +#include <hfs/hfs_format.h> +#include "newfs_hfs.h" + +#if __STDC__ +#include <stdarg.h> +#else +#include <varargs.h> +#endif + +#define NOVAL (-1) +#define UMASK (0755) +#define ACCESSMASK (0777) + +/* + * The maximum HFS volume size is calculated thusly: + * + * The maximum allocation block size (which must be a power of 2 value), + * is 2GB, or 2^31 bytes + * + * The maximum number of allocation blocks is 2^32 -1. + * + * Multiplying that out yields 2GB * ( 4GB - 1 ) == 2GB*4GB - 2GB. + * More explicitly, 8 exabytes - 2 gigabytes, + * or 0x7FFFFFFF80000000 bytes. That gives us our value below. + */ + +#define MAXHFSVOLSIZE (0x7FFFFFFF80000000ULL) + +#define ROUNDUP(x,y) (((x)+(y)-1)/(y)*(y)) + +static void getnodeopts __P((char* optlist)); +static void getclumpopts __P((char* optlist)); +#ifdef DEBUG_BUILD +static void getstartopts __P((char *optlist)); +static void getextsopts __P((char* optlist)); +#endif +static gid_t a_gid __P((char *)); +static uid_t a_uid __P((char *)); +static mode_t a_mask __P((char *)); +static int hfs_newfs __P((char *device)); +static void validate_hfsplus_block_size __P((UInt64 sectorCount, UInt32 sectorSize)); +static void hfsplus_params __P((const DriveInfo* dip, hfsparams_t *defaults)); +static UInt32 clumpsizecalc __P((UInt32 clumpblocks)); +static UInt32 CalcHFSPlusBTreeClumpSize __P((UInt32 blockSize, UInt32 nodeSize, UInt64 sectors, int fileID)); +static void usage __P((void)); +static int get_high_bit (u_int64_t bitstring); +static int bad_disk_size (u_int64_t numsectors, u_int64_t sectorsize); + + + +char *progname; +char gVolumeName[kHFSPlusMaxFileNameChars + 1] = {kDefaultVolumeNameStr}; +char rawdevice[MAXPATHLEN]; +char blkdevice[MAXPATHLEN]; +uint32_t gBlockSize = 0; +UInt32 gNextCNID = kHFSFirstUserCatalogNodeID; + +time_t createtime; + +int gNoCreate = FALSE; +int gUserCatNodeSize = FALSE; +int gCaseSensitive = FALSE; +int gUserAttrSize = FALSE; +int gContentProtect = FALSE; + +static UInt32 attrExtCount = 1, blkallocExtCount = 1, catExtCount = 1, extExtCount = 1; +static UInt32 attrExtStart = 0, blkallocExtStart = 0, catExtStart = 0, extExtStart = 0; +static UInt32 jibStart = 0, jnlStart = 0, allocStart = 0; + +#ifdef DEBUG_BUILD +uint16_t gProtectLevel = 0; +#endif + +#define JOURNAL_DEFAULT_SIZE (8*1024*1024) +int gJournaled = FALSE; +char *gJournalDevice = NULL; +UInt64 gJournalSize = 0; + +uid_t gUserID = (uid_t)NOVAL; +gid_t gGroupID = (gid_t)NOVAL; +mode_t gModeMask = (mode_t)NOVAL; + +/* Starting allocation block number for the file system, + * all btrees, including journal will be laid down at this + * alloation block offset. + */ +UInt32 gFSStartBlock = 0; + +UInt64 gPartitionSize = 0; + +UInt32 catnodesiz = 8192; +UInt32 extnodesiz = 4096; +UInt32 atrnodesiz = 8192; + +UInt32 catclumpblks = 0; +UInt32 extclumpblks = 0; +UInt32 atrclumpblks = 0; +UInt32 bmclumpblks = 0; +UInt32 rsrclumpblks = 0; +UInt32 datclumpblks = 0; +uint32_t hfsgrowblks = 0; /* maximum growable size of wrapper */ + + +UInt64 +get_num(char *str) +{ + UInt64 num; + char *ptr; + + num = strtoull(str, &ptr, 0); + + if (*ptr) { + char scale = tolower(*ptr); + + switch(scale) { + case 'b': + num *= 512ULL; + break; + case 'p': + num *= 1024ULL; + /* fall through */ + case 't': + num *= 1024ULL; + /* fall through */ + case 'g': + num *= 1024ULL; + /* fall through */ + case 'm': + num *= 1024ULL; + /* fall through */ + case 'k': + num *= 1024ULL; + break; + + default: + num = 0ULL; + break; + } + } + return num; +} + + +int +main(argc, argv) + int argc; + char **argv; +{ + extern char *optarg; + extern int optind; + int ch; + char *cp, *special; + struct statfs *mp; + int n; + + if ((progname = strrchr(*argv, '/'))) + ++progname; + else + progname = *argv; + +// No semicolon at end of line deliberately! + + static const char *options = "G:J:D:M:N:PU:hsb:c:i:n:v:" +#ifdef DEBUG_BUILD + "p:a:E:" +#endif + ; + + while ((ch = getopt(argc, argv, options)) != -1) + switch (ch) { + case 'G': + gGroupID = a_gid(optarg); + break; + + case 'J': + gJournaled = TRUE; + if (isdigit(optarg[0])) { + gJournalSize = get_num(optarg); + if (gJournalSize < 512*1024) { + printf("%s: journal size %lldk too small. Reset to %dk.\n", + progname, gJournalSize/1024, JOURNAL_DEFAULT_SIZE/1024); + gJournalSize = JOURNAL_DEFAULT_SIZE; + } + } else { + /* back up because there was no size argument */ + optind--; + } + break; + + case 'D': + gJournalDevice = (char *)optarg; + break; + + case 'N': + gNoCreate = TRUE; + if (isdigit(optarg[0])) { + gPartitionSize = get_num(optarg); + } else { + /* back up because there was no size argument */ + optind--; + } + break; + + case 'P': + gContentProtect = TRUE; + break; + +#ifdef DEBUG_BUILD + case 'p': + if (isdigit (optarg[0])) { + uint64_t level = get_num (optarg); + gProtectLevel = (uint16_t) level; + } + else { + /* back up because no level was provided */ + optind--; + } + break; +#endif + + case 'M': + gModeMask = a_mask(optarg); + break; + + case 'U': + gUserID = a_uid(optarg); + break; + +#ifdef DEBUG_BUILD + case 'a': + getstartopts(optarg); + break; +#endif + +#ifdef DEBUG_BUILD + case 'E': + getextsopts(optarg); + break; +#endif + case 'b': + { + UInt64 tempBlockSize; + + tempBlockSize = get_num(optarg); + if (tempBlockSize < HFSMINBSIZE) + fatal("%s: bad allocation block size (too small)", optarg); + if (tempBlockSize > HFSMAXBSIZE) + fatal("%s: bad allocation block size (too large)", optarg); + gBlockSize = tempBlockSize; + break; + } + + case 'c': + getclumpopts(optarg); + break; + + case 'i': + gNextCNID = atoi(optarg); + /* + * make sure its at least kHFSFirstUserCatalogNodeID + */ + if (gNextCNID < kHFSFirstUserCatalogNodeID) + fatal("%s: starting catalog node id too small (must be > 15)", optarg); + break; + + case 'n': + getnodeopts(optarg); + break; + + case 's': + gCaseSensitive = TRUE; + break; + + case 'v': + n = strlen(optarg); + if ((size_t)n > (sizeof(gVolumeName) - 1)) + fatal("\"%s\" is too long (%d byte maximum)", + optarg, sizeof(gVolumeName) - 1); + if (n == 0) + fatal("name required with -v option"); + strlcpy(gVolumeName, optarg, sizeof(gVolumeName)); + break; + + case '?': + default: + usage(); + } + + argc -= optind; + argv += optind; + +#ifdef DEBUG_BUILD + if ((gProtectLevel) && !(gContentProtect)) { + fatal ("content protection must be specified to set a protection level"); + } +#endif + + if (gPartitionSize != 0) { + /* + * If we are given -N, a size, and a device, that's a usage error. + */ + if (argc != 0) + usage(); + + rawdevice[0] = blkdevice[0] = 0; + } else { + if (argc != 1) + usage(); + + special = argv[0]; + cp = strrchr(special, '/'); + if (cp != 0) + special = cp + 1; + if (*special == 'r') + special++; + (void) snprintf(rawdevice, sizeof(rawdevice), "%sr%s", _PATH_DEV, special); + (void) snprintf(blkdevice, sizeof(blkdevice), "%s%s", _PATH_DEV, special); + } + + if (gPartitionSize == 0) { + /* + * Check if target device is aready mounted + */ + n = getmntinfo(&mp, MNT_NOWAIT); + if (n == 0) + fatal("%s: getmntinfo: %s", blkdevice, strerror(errno)); + + while (--n >= 0) { + if (strcmp(blkdevice, mp->f_mntfromname) == 0) + fatal("%s is mounted on %s", blkdevice, mp->f_mntonname); + ++mp; + } + } + + if (hfs_newfs(rawdevice) < 0) { + err(1, "cannot create filesystem on %s", rawdevice); + } + + exit(0); +} + + +static void getnodeopts(char* optlist) +{ + char *strp = optlist; + char *ndarg; + char *p; + UInt32 ndsize; + + while((ndarg = strsep(&strp, ",")) != NULL && *ndarg != '\0') { + + p = strchr(ndarg, '='); + if (p == NULL) + usage(); + + ndsize = atoi(p+1); + + switch (*ndarg) { + case 'c': + if (ndsize < 4096 || ndsize > 32768 || (ndsize & (ndsize-1)) != 0) + fatal("%s: invalid catalog b-tree node size", ndarg); + catnodesiz = ndsize; + gUserCatNodeSize = TRUE; + break; + + case 'e': + if (ndsize < 1024 || ndsize > 32768 || (ndsize & (ndsize-1)) != 0) + fatal("%s: invalid extents b-tree node size", ndarg); + extnodesiz = ndsize; + break; + + case 'a': + if (ndsize < 4096 || ndsize > 32768 || (ndsize & (ndsize-1)) != 0) + fatal("%s: invalid atrribute b-tree node size", ndarg); + atrnodesiz = ndsize; + break; + + default: + usage(); + } + } +} + + +static void getclumpopts(char* optlist) +{ + char *strp = optlist; + char *ndarg; + char *p; + UInt32 clpblocks; + + while((ndarg = strsep(&strp, ",")) != NULL && *ndarg != '\0') { + + p = strchr(ndarg, '='); + if (p == NULL) + usage(); + + clpblocks = atoi(p+1); + + switch (*ndarg) { + case 'a': + atrclumpblks = clpblocks; + gUserAttrSize = TRUE; + break; + case 'b': + bmclumpblks = clpblocks; + break; + case 'c': + catclumpblks = clpblocks; + break; + case 'd': + datclumpblks = clpblocks; + break; + case 'e': + extclumpblks = clpblocks; + break; + case 'r': + rsrclumpblks = clpblocks; + break; + + default: + usage(); + } + } +} + +#ifdef DEBUG_BUILD +static void getextsopts(char* optlist) +{ + char *strp = optlist; + char *ndarg; + char *p; + UInt32 numexts; + + while((ndarg = strsep(&strp, ",")) != NULL && *ndarg != '\0') { + + p = strchr(ndarg, '='); + if (p == NULL) + usage(); + + numexts = atoi(p+1); + + switch (*ndarg) { + case 'a': + attrExtCount = numexts; + break; + case 'b': + blkallocExtCount = numexts; + break; + case 'c': + catExtCount = numexts; + break; + case 'e': + extExtCount = numexts; + break; + default: + usage(); + } + } +} + +static void getstartopts(char* optlist) +{ + char *strp; + char *ndarg; + char *p; + unsigned long startat = 0; + + startat = strtoul(optlist, &strp, 0); + if (startat == ULONG_MAX && errno != 0) { + err(1, "invalid allocation start block string %s", optlist); + } + if (startat > UINT_MAX) { + errx(1, "Allocation block %lu larger than max", startat); + } + if (strp && *strp == ',') + strp++; + + gFSStartBlock = startat; + + while((ndarg = strsep(&strp, ",")) != NULL && *ndarg != '\0') { + + startat = strtoul(optlist, NULL, 0); + p = strchr(ndarg, '='); + if (p == NULL) + usage(); + + startat = atoi(p+1); + + switch (*ndarg) { + case 'a': + attrExtStart = startat; + break; + case 'b': + blkallocExtStart = startat; + break; + case 'c': + catExtStart = startat; + break; + case 'e': + extExtStart = startat; + break; + case 'j': + jibStart = startat; + break; + case 'J': + jnlStart = startat; + break; + case 'N': + allocStart = startat; + break; + default: + usage(); + } + } +} +#endif + +gid_t +static a_gid(char *s) +{ + struct group *gr; + char *gname; + gid_t gid = 0; + + if ((gr = getgrnam(s)) != NULL) + gid = gr->gr_gid; + else { + for (gname = s; *s && isdigit(*s); ++s); + if (!*s) + gid = atoi(gname); + else + errx(1, "unknown group id: %s", gname); + } + return (gid); +} + +static uid_t +a_uid(char *s) +{ + struct passwd *pw; + char *uname; + uid_t uid = 0; + + if ((pw = getpwnam(s)) != NULL) + uid = pw->pw_uid; + else { + for (uname = s; *s && isdigit(*s); ++s); + if (!*s) + uid = atoi(uname); + else + errx(1, "unknown user id: %s", uname); + } + return (uid); +} + +static mode_t +a_mask(char *s) +{ + int done, rv; + char *ep; + + done = 0; + rv = -1; + if (*s >= '0' && *s <= '7') { + done = 1; + rv = strtol(s, &ep, 8); + } + if (!done || rv < 0 || *ep) + errx(1, "invalid access mask: %s", s); + return (rv); +} + +/* + * Check to see if the volume is too big. + * + * Returns: + * 0 if it is appropriately sized. + * 1 if HFS+ cannot be formatted onto the disk. + */ + +static int bad_disk_size (u_int64_t numsectors, u_int64_t sectorsize) { + + u_int32_t maxSectorBits = 0; + u_int32_t maxSectorSizeBits = 0; + u_int32_t maxBits = 0; + u_int64_t bytes; + + /* + * The essential problem here is that we cannot simply multiply the sector size by the + * number of sectors because the product could overflow a 64 bit integer. We do a cursory + * check and then a longer check once we know the product will not overflow. + */ + + maxSectorBits = get_high_bit (numsectors); + maxSectorSizeBits = get_high_bit (sectorsize); + + /* + * We get the number of bits to represent the number of sectors and the sector size. + * Adding the two numbers gives us the number of bits required to represent the product. + * If the product is > 63 then it must be too big. + */ + + maxBits = maxSectorBits + maxSectorSizeBits; + if (maxBits > 63) { + return 1; + } + + /* Well, now we know that the two values won't overflow. Time to multiply */ + bytes = numsectors * sectorsize; + + if (bytes > MAXHFSVOLSIZE) { + /* Too big! */ + return 1; + } + + /* Otherwise, it looks good */ + return 0; + +} + +/* + * The allocation block size must be defined as a power of 2 value, with a floor of + * 512 bytes. However, we never default to anything less than 4096 bytes, so that + * gives us 20 block size values from 4kb -> 2GB block size. + * + * See inline comments for how this table is used to determine the minimum fs size that + * will use a specified allocation block size. + * + * The growth boundary is used to figure out if we need a bigger block size than the + * 4 KB default. We get the index of the highest bit set in the FS size, then subtract the + * growth boundary to index into the block allocation size array. + * + * Note that 8K appears twice in table since we want to use it for the range 2 TB < 8 TB FS size. + * This means that when the 2TB bit or the 4TB bit is the high bit set, we prefer the 8K block size. + */ +#define NUM_ALLOC_BLOCKSIZES 21 +#define GROWTH_BOUNDARY 41 + +u_int64_t alloc_blocksize[NUM_ALLOC_BLOCKSIZES] = { + /* Block Size*/ /* Min Dflt FS Size */ /* Max FS Size */ + 4096, /* 0 bytes */ /* 16 TB */ + 8192, /* 2 TB */ /* 32 TB */ /* Note that 8K appears twice in table ! */ + 8192, /* 4 TB */ /* 32 TB */ /* Note that 8K appears twice in table ! */ + 16384, /* 8 TB */ /* 64 TB */ + 32768, /* 16 TB */ /* 128 TB */ + 65536, /* 32 TB */ /* 256 TB */ + 131072, /* 64 TB */ /* 512 TB */ + 262144, /* 128 TB */ /* 1 PB */ + 524288, /* 256 TB */ /* 2 PB */ + 1048576, /* 512 TB */ /* 4 PB */ + 2097152, /* 1 PB */ /* 8 PB */ + 4194304, /* 2 PB */ /* 16 PB */ + 8388608, /* 4 PB */ /* 32 PB */ + 16777216, /* 8 PB */ /* 64 PB */ + 33554432, /* 16 PB */ /* 128 PB */ + 67108864, /* 32 PB */ /* 256 PB */ + 134217728, /* 64 PB */ /* 512 PB */ + 268435456, /* 128 PB */ /* 1 EB */ + 536870912, /* 256 PB */ /* 2 EB */ + 1073741824, /* 512 PB */ /* 4 EB */ + 2147483648ULL /* 1 EB */ /* 8 EB */ +}; + +static int get_high_bit (u_int64_t bitstring) { + u_int64_t bits = bitstring; + int counter = 0; + while (bits) { + bits = (bits >> 1); + counter++; + } + return counter; +} + + +/* + * Validate the HFS Plus allocation block size in gBlockSize. If none was + * specified, then calculate a suitable default. + * + * Modifies the global variable gBlockSize. + */ +static void validate_hfsplus_block_size(UInt64 sectorCount, UInt32 sectorSize) +{ + if (gBlockSize == 0) { + + /* Start by calculating the fs size */ + u_int64_t fs_size = sectorCount * sectorSize; + + /* + * Determine the default based on a sliding scale. The maximum number of + * allocation blocks is always 4294967295 == (32 bits worth). At 1 bit per + * allocation block, that yields 512 MB of bitmap no matter what size we use + * for the allocation block. + * + * The general default policy is to allow the filesystem to grow up to 8x the + * current maximum size. So for a 1.5TB filesystem, an 8x multiplier would be + * 12TB. That means we can use the default size of 4096 bytes. The boundary begins + * at 2TB, since at that point, we can no longer use the default 4096 block size to + * extend the filesystem by 8x. For a 16KB block size, the max is 64 TB, but the 8x + * multiplier begins at 8 TB. Thereafter, we increase for every power of 2 that + * the current filesystem size grows. + */ + + gBlockSize = DFL_BLKSIZE; /* Prefer the default of 4K */ + + int bit_index = get_high_bit (fs_size); + bit_index -= GROWTH_BOUNDARY; + + /* + * After subtracting the GROWTH_BOUNDARY to index into the array, we'll + * use the values in the static array if we have a non-negative index. + * That means that if the filesystem is >= 1 TB, then we'll use the index + * value. At 2TB, we grow to the 8K block size. + */ + if ((bit_index >= 0) && (bit_index < 22)) { + gBlockSize = alloc_blocksize[bit_index]; + } + + if (bit_index >= 22) { + fatal("Error: Disk Device is too big (%llu sectors, %d bytes per sector", sectorCount, sectorSize); + } + } + else { + /* Make sure a user-specified block size is reasonable */ + if ((gBlockSize & (gBlockSize-1)) != 0) { + fatal("%s: bad HFS Plus allocation block size (must be a power of two)", optarg); + } + + if ((sectorCount / (gBlockSize / sectorSize)) > 0xFFFFFFFF) { + fatal("%s: block size is too small for %lld sectors", optarg, gBlockSize, sectorCount); + } + + if (gBlockSize < HFSOPTIMALBLKSIZE) { + warnx("Warning: %u is a non-optimal block size (4096 would be a better choice)", (unsigned int)gBlockSize); + } + } + + if (gFSStartBlock) { + u_int64_t fs_size = sectorCount * sectorSize; + u_int32_t totalBlocks = fs_size/gBlockSize; + + if (gFSStartBlock >= totalBlocks) { + warnx("Warning: %u is invalid file system start allocation block number, must be less than total allocation blocks (%u)", (unsigned int)gFSStartBlock, (unsigned int)totalBlocks); + warnx("Warning: Resetting file system start block to zero"); + gFSStartBlock = 0; + } + } +} + + + +static int +hfs_newfs(char *device) +{ + struct stat stbuf; + DriveInfo dip = { 0 }; + int fso = -1; + int retval = 0; + hfsparams_t defaults = {0}; + UInt64 maxPhysPerIO = 0; + + if (gPartitionSize) { + dip.sectorSize = kBytesPerSector; + dip.physTotalSectors = dip.totalSectors = gPartitionSize / kBytesPerSector; + dip.physSectorSize = kBytesPerSector; /* 512-byte sectors */ + dip.fd = 0; + } else { + if (gNoCreate) { + fso = open( device, O_RDONLY | O_NDELAY, 0 ); + } else { + fso = open( device, O_RDWR | O_NDELAY, 0 ); + } + if (fso == -1) { + return -1; + } + + dip.fd = fso; + fcntl(fso, F_NOCACHE, 1); + + if (fso < 0) + fatal("%s: %s", device, strerror(errno)); + + if (fstat( fso, &stbuf) < 0) + fatal("%s: %s", device, strerror(errno)); + + if (ioctl(fso, DKIOCGETBLOCKSIZE, &dip.physSectorSize) < 0) + fatal("%s: %s", device, strerror(errno)); + + if ((dip.physSectorSize % kBytesPerSector) != 0) + fatal("%d is an unsupported sector size\n", dip.physSectorSize); + + if (ioctl(fso, DKIOCGETBLOCKCOUNT, &dip.physTotalSectors) < 0) + fatal("%s: %s", device, strerror(errno)); + + } + + dip.physSectorsPerIO = (1024 * 1024) / dip.physSectorSize; /* use 1M as default */ + + if (fso != -1 && ioctl(fso, DKIOCGETMAXBLOCKCOUNTREAD, &maxPhysPerIO) < 0) + fatal("%s: %s", device, strerror(errno)); + + if (maxPhysPerIO) + dip.physSectorsPerIO = MIN(dip.physSectorsPerIO, maxPhysPerIO); + + if (fso != -1 && ioctl(fso, DKIOCGETMAXBLOCKCOUNTWRITE, &maxPhysPerIO) < 0) + fatal("%s: %s", device, strerror(errno)); + + if (maxPhysPerIO) + dip.physSectorsPerIO = MIN(dip.physSectorsPerIO, maxPhysPerIO); + + if (fso != -1 && ioctl(fso, DKIOCGETMAXBYTECOUNTREAD, &maxPhysPerIO) < 0) + fatal("%s: %s", device, strerror(errno)); + + if (maxPhysPerIO) + dip.physSectorsPerIO = MIN(dip.physSectorsPerIO, maxPhysPerIO / dip.physSectorSize); + + if (fso != -1 && ioctl(fso, DKIOCGETMAXBYTECOUNTWRITE, &maxPhysPerIO) < 0) + fatal("%s: %s", device, strerror(errno)); + + if (maxPhysPerIO) + dip.physSectorsPerIO = MIN(dip.physSectorsPerIO, maxPhysPerIO / dip.physSectorSize); + + dip.sectorSize = kBytesPerSector; + dip.totalSectors = dip.physTotalSectors * dip.physSectorSize / dip.sectorSize; + + dip.sectorOffset = 0; + time(&createtime); + + /* Check to see if the disk is too big */ + u_int64_t secsize = (u_int64_t) dip.sectorSize; + if (bad_disk_size(dip.totalSectors, secsize)) { + fatal("%s: partition is too big (maximum is %llu KB)", device, MAXHFSVOLSIZE/1024); + } + + /* + * If we're going to make an HFS Plus disk (with or without a wrapper), validate the + * HFS Plus allocation block size. This will also calculate a default allocation + * block size if none (or zero) was specified. + */ + validate_hfsplus_block_size(dip.totalSectors, dip.sectorSize); + + /* Make an HFS Plus disk */ + + if ((dip.totalSectors * dip.sectorSize ) < kMinHFSPlusVolumeSize) + fatal("%s: partition is too small (minimum is %d KB)", device, kMinHFSPlusVolumeSize/1024); + + hfsplus_params(&dip, &defaults); + if (gNoCreate == 0) { + retval = make_hfsplus(&dip, &defaults); + if (retval == 0) { + printf("Initialized %s as a ", device); + if (dip.totalSectors > 2048ULL*1024*1024) + printf("%ld TB", + (long)((dip.totalSectors + (1024ULL*1024*1024))/(2048ULL*1024*1024))); + else if (dip.totalSectors > 2048*1024) + printf("%ld GB", + (long)((dip.totalSectors + (1024*1024))/(2048*1024))); + else if (dip.totalSectors > 2048) + printf("%ld MB", + (long)((dip.totalSectors + 1024)/2048)); + else + printf("%ld KB", + (long)((dip.totalSectors + 1)/2)); + + if (gCaseSensitive) { + printf(" case-sensitive"); + } + else { + printf(" case-insensitive"); + } + if (gJournaled) + printf(" HFS Plus volume with a %uk journal\n", + (u_int32_t)defaults.journalSize/1024); + else + printf(" HFS Plus volume\n"); + } + } + + if (retval) + fatal("%s: %s", device, strerror(errno)); + + if ( fso > 0 ) { + close(fso); + } + + return retval; +} + +/* + typedef struct block_info { + off_t bnum; //64 bit + union { + _blk_info bi; //64 bit + struct buf *bp; //64 bit on K64, 32 bit on K32 + } u; + }__attribute__((__packed__)) block_info; + + total size == 16 bytes + */ + +#define BLOCK_INFO_SIZE 16 + +static void hfsplus_params (const DriveInfo* dip, hfsparams_t *defaults) +{ + UInt64 sectorCount = dip->totalSectors; + UInt32 sectorSize = dip->sectorSize; + uint32_t totalBlocks; + UInt32 minClumpSize; + UInt32 clumpSize; + UInt32 oddBitmapBytes; + + defaults->flags = 0; + defaults->blockSize = gBlockSize; + defaults->fsStartBlock = gFSStartBlock; + defaults->nextFreeFileID = gNextCNID; + defaults->createDate = createtime + MAC_GMT_FACTOR; /* Mac OS GMT time */ + defaults->hfsAlignment = 0; + defaults->journaledHFS = gJournaled; + defaults->journalDevice = gJournalDevice; + + /* + * 8429818 + * Always set kUseAccessPerms now; this also + * means we have to always have an owner, group, + * and mask. + */ + defaults->owner = (gUserID == (uid_t)NOVAL) ? geteuid() : gUserID; + defaults->group = (gGroupID == (gid_t)NOVAL) ? getegid() : gGroupID; + defaults->mask = (gModeMask == (mode_t)NOVAL) ? UMASK : (gModeMask & ACCESSMASK); + defaults->flags |= kUseAccessPerms; + + /* + * We want at least 8 megs of journal for each 100 gigs of + * disk space. We cap the size at 512 megs (64x default), unless + * the allocation block size is larger, in which case we use one + * allocation block. + * + * Only scale if it's the default, otherwise just take what + * the user specified, with the caveat below. + */ + if (gJournaled) { + uint32_t min_size = 0; + + /* + * Check to ensure the journal size is not too small relative to the + * sector size of the device. This is the check in the kernel: + if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || + tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) + * We assume that there will be a block header and that there will be a + * non-negative max_blocks value. + * + * The 2nd check is the problematic one. We cannot have a journal that's too + * small relative to the sector size. max_blocks == (blhdr_size / 16). However, + * this only matters where the current block header size is smaller than the current + * sector size. So, assume that the blhdr_size == sector size for now. We look + * at the condition above to get the rest of the equation -- (journal size / sector size). + * Then, it's simple algebra to figure out what the new minimum journal size + * should be: + * + * (sector_size / 16) > (journal_size / sector_size) + * (sector_size / 16) = (journal_size / sector_size) + * (sector_size / 16) * sector_size = (journal_size / sector_size) * sector_size + * (sector_size / 16) * sector_size = journal_size + * + * This becomes our new _floor_ for the journal_size. + */ + + if (dip->physSectorSize != 0) { + min_size = dip->physSectorSize * (dip->physSectorSize / BLOCK_INFO_SIZE); + } + + if (gJournalSize != 0) { + + /* Was the supplied journal size at least the minimum computed above? */ + if (gJournalSize < min_size) { + printf("%s: journal size %lldk too small. Reset to %dk.\n", + progname, gJournalSize/1024, JOURNAL_DEFAULT_SIZE/1024); + gJournalSize = 0; + + } + /* defaults->journalSize will get reset below if it is 0 */ + defaults->journalSize = gJournalSize; + } + + if ((gJournalSize == 0) || (defaults->journalSize == 0)) { + UInt32 jscale; + uint32_t target_size; + /* Figure out how many 100's of GBs this filesystem represents */ + jscale = (sectorCount * sectorSize) / ((UInt64)100 * 1024 * 1024 * 1024); + if (jscale > 64) { + jscale = 64; + } + + target_size = JOURNAL_DEFAULT_SIZE * (jscale + 1); + /* Is the target size at least the min_size computed above? */ + if (target_size < min_size) { + target_size = min_size; + } + + defaults->journalSize = target_size; + } + + +#ifndef DEBUG_BUILD + // volumes that are 128 megs or less in size have such + // a small bitmap (one 4k-block) and inherhently such + // a small btree that we can get by with a much smaller + // journal. even in a worst case scenario of a catalog + // filled with very long korean file names we should + // never touch more than 256k of meta-data for a single + // transaction. therefore we'll make the journal 512k, + // or as small as possible, given the sector size, + // which is safe and doesn't waste much space. + // However, be careful not to let the journal size drop BELOW + // 512k, since the min_size computations can create an artificially + // tiny journal (16k or so) with 512byte sectors. + // + if (sectorCount * sectorSize < 128*1024*1024) { + /* This is a small (<128MB) FS */ + uint32_t small_default = (512 * 1024); + + if (small_default <= min_size) { + /* + * If 512k is too small given the sector size, + * then use the larger sector size + */ + defaults->journalSize = min_size; + } + else { + /* 512k was bigger than the min size; we can use it */ + defaults->journalSize = small_default; + } + } +#endif + + if (defaults->journalSize > 512 * 1024 * 1024) { + defaults->journalSize = 512 * 1024 * 1024; + } + + if (defaults->journalSize < defaults->blockSize) { + defaults->journalSize = defaults->blockSize; + } + } + + strncpy((char *)defaults->volumeName, gVolumeName, sizeof(defaults->volumeName) - 1); + defaults->volumeName[sizeof(defaults->volumeName) - 1] = '\0'; + + if (rsrclumpblks == 0) { + if (gBlockSize > DFL_BLKSIZE) + defaults->rsrcClumpSize = ROUNDUP(kHFSPlusRsrcClumpFactor * DFL_BLKSIZE, gBlockSize); + else + defaults->rsrcClumpSize = kHFSPlusRsrcClumpFactor * gBlockSize; + } else + defaults->rsrcClumpSize = clumpsizecalc(rsrclumpblks); + + if (datclumpblks == 0) { + if (gBlockSize > DFL_BLKSIZE) + defaults->dataClumpSize = ROUNDUP(kHFSPlusRsrcClumpFactor * DFL_BLKSIZE, gBlockSize); + else + defaults->dataClumpSize = kHFSPlusRsrcClumpFactor * gBlockSize; + } else + defaults->dataClumpSize = clumpsizecalc(datclumpblks); + + /* + * The default b-tree node size is 8K. However, if the + * volume is small (< 1 GB) we use 4K instead. + */ + if (!gUserCatNodeSize) { + if ((gBlockSize < HFSOPTIMALBLKSIZE) || + ((UInt64)(sectorCount * sectorSize) < (UInt64)0x40000000)) + catnodesiz = 4096; + } + + if (catclumpblks == 0) { + clumpSize = CalcHFSPlusBTreeClumpSize(gBlockSize, catnodesiz, sectorCount, kHFSCatalogFileID); + } + else { + clumpSize = clumpsizecalc(catclumpblks); + + if (clumpSize % catnodesiz != 0) + fatal("c=%ld: clump size is not a multiple of node size\n", clumpSize/gBlockSize); + } + defaults->catalogClumpSize = clumpSize; + defaults->catalogNodeSize = catnodesiz; + defaults->catalogExtsCount = catExtCount; + defaults->catalogStartBlock = catExtStart; + + if (gBlockSize < 4096 && gBlockSize < catnodesiz) + warnx("Warning: block size %u is less than catalog b-tree node size %u", (unsigned int)gBlockSize, (unsigned int)catnodesiz); + + if (extclumpblks == 0) { + clumpSize = CalcHFSPlusBTreeClumpSize(gBlockSize, extnodesiz, sectorCount, kHFSExtentsFileID); + } + else { + clumpSize = clumpsizecalc(extclumpblks); + if (clumpSize % extnodesiz != 0) + fatal("e=%ld: clump size is not a multiple of node size\n", clumpSize/gBlockSize); + } + defaults->extentsClumpSize = clumpSize; + defaults->extentsNodeSize = extnodesiz; + defaults->extentsExtsCount = extExtCount; + defaults->extentsStartBlock = extExtStart; + + if (gBlockSize < extnodesiz) + warnx("Warning: block size %u is less than extents b-tree node size %u", (unsigned int)gBlockSize, (unsigned int)extnodesiz); + if (defaults->extentsExtsCount > 8) { + warnx("Warning: extents overflow extent requested count %u exceeds maximum 8, capping at 8\n", defaults->extentsExtsCount); + defaults->extentsExtsCount = 8; + } + if (atrclumpblks == 0) { + if (gUserAttrSize) { + clumpSize = 0; + } + else { + clumpSize = CalcHFSPlusBTreeClumpSize(gBlockSize, atrnodesiz, sectorCount, kHFSAttributesFileID); + } + } + else { + clumpSize = clumpsizecalc(atrclumpblks); + if (clumpSize % atrnodesiz != 0) + fatal("a=%ld: clump size is not a multiple of node size\n", clumpSize/gBlockSize); + } + defaults->attributesClumpSize = clumpSize; + defaults->attributesNodeSize = atrnodesiz; + defaults->attributesExtsCount = attrExtCount; + defaults->attributesStartBlock = attrExtStart; + + /* + * Calculate the number of blocks needed for bitmap (rounded up to a multiple of the block size). + */ + + /* + * Figure out how many bytes we need for the given totalBlocks + * Note: this minimum value may be too large when it counts the + * space used by the wrapper + */ + totalBlocks = sectorCount / (gBlockSize / sectorSize); + + minClumpSize = totalBlocks >> 3; /* convert bits to bytes by dividing by 8 */ + if (totalBlocks & 7) + ++minClumpSize; /* round up to whole bytes */ + + /* Round up to a multiple of blockSize */ + if ((oddBitmapBytes = minClumpSize % gBlockSize)) + minClumpSize = minClumpSize - oddBitmapBytes + gBlockSize; + + if (bmclumpblks == 0) { + clumpSize = minClumpSize; + } + else { + clumpSize = clumpsizecalc(bmclumpblks); + + if (clumpSize < minClumpSize) + fatal("b=%ld: bitmap clump size is too small\n", clumpSize/gBlockSize); + } + defaults->allocationClumpSize = clumpSize; + defaults->allocationExtsCount = blkallocExtCount; + defaults->allocationStartBlock = blkallocExtStart; + + defaults->journalInfoBlock = jibStart; + defaults->journalBlock = jnlStart; + defaults->nextAllocBlock = allocStart; + + if (gCaseSensitive) + defaults->flags |= kMakeCaseSensitive; + + if (gContentProtect) + defaults->flags |= kMakeContentProtect; + +#ifdef DEBUG_BUILD + if (gProtectLevel) + defaults->protectlevel = gProtectLevel; +#endif + + if (gNoCreate) { + if (gPartitionSize == 0) + printf("%llu sectors (%u bytes per sector)\n", dip->physTotalSectors, dip->physSectorSize); + printf("HFS Plus format parameters:\n"); + printf("\tvolume name: \"%s\"\n", gVolumeName); + printf("\tblock-size: %u\n", defaults->blockSize); + printf("\ttotal blocks: %u\n", totalBlocks); + if (gJournaled) + printf("\tjournal-size: %uk\n", defaults->journalSize/1024); + printf("\tfirst free catalog node id: %u\n", defaults->nextFreeFileID); + printf("\tcatalog b-tree node size: %u\n", defaults->catalogNodeSize); + printf("\tinitial catalog file size: %u\n", defaults->catalogClumpSize); + printf("\textents b-tree node size: %u\n", defaults->extentsNodeSize); + printf("\tinitial extents file size: %u\n", defaults->extentsClumpSize); + printf("\tattributes b-tree node size: %u\n", defaults->attributesNodeSize); + printf("\tinitial attributes file size: %u\n", defaults->attributesClumpSize); + printf("\tinitial allocation file size: %u (%u blocks)\n", + defaults->allocationClumpSize, defaults->allocationClumpSize / gBlockSize); + printf("\tdata fork clump size: %u\n", defaults->dataClumpSize); + printf("\tresource fork clump size: %u\n", defaults->rsrcClumpSize); + if (defaults->flags & kUseAccessPerms) { + printf("\tuser ID: %d\n", (int)defaults->owner); + printf("\tgroup ID: %d\n", (int)defaults->group); + printf("\taccess mask: %o\n", (int)defaults->mask); + } + printf("\tfile system start block: %u\n", defaults->fsStartBlock); + } +} + + +static UInt32 +clumpsizecalc(UInt32 clumpblocks) +{ + UInt64 clumpsize; + + clumpsize = (UInt64)clumpblocks * (UInt64)gBlockSize; + + if (clumpsize & (UInt64)(0xFFFFFFFF00000000ULL)) + fatal("=%ld: too many blocks for clump size!", clumpblocks); + + return ((UInt32)clumpsize); +} + + +#define CLUMP_ENTRIES 15 + +short clumptbl[CLUMP_ENTRIES * 3] = { +/* + * Volume Attributes Catalog Extents + * Size Clump (MB) Clump (MB) Clump (MB) + */ + /* 1GB */ 4, 4, 4, + /* 2GB */ 6, 6, 4, + /* 4GB */ 8, 8, 4, + /* 8GB */ 11, 11, 5, + /* + * For volumes 16GB and larger, we want to make sure that a full OS + * install won't require fragmentation of the Catalog or Attributes + * B-trees. We do this by making the clump sizes sufficiently large, + * and by leaving a gap after the B-trees for them to grow into. + * + * For SnowLeopard 10A298, a FullNetInstall with all packages selected + * results in: + * Catalog B-tree Header + * nodeSize: 8192 + * totalNodes: 31616 + * freeNodes: 1978 + * (used = 231.55 MB) + * Attributes B-tree Header + * nodeSize: 8192 + * totalNodes: 63232 + * freeNodes: 958 + * (used = 486.52 MB) + * + * We also want Time Machine backup volumes to have a sufficiently + * large clump size to reduce fragmentation. + * + * The series of numbers for Catalog and Attribute form a geometric series. + * For Catalog (16GB to 512GB), each term is 8**(1/5) times the previous + * term. For Attributes (16GB to 512GB), each term is 4**(1/5) times + * the previous term. For 1TB to 16TB, each term is 2**(1/5) times the + * previous term. + */ + /* 16GB */ 64, 32, 5, + /* 32GB */ 84, 49, 6, + /* 64GB */ 111, 74, 7, + /* 128GB */ 147, 111, 8, + /* 256GB */ 194, 169, 9, + /* 512GB */ 256, 256, 11, + /* 1TB */ 294, 294, 14, + /* 2TB */ 338, 338, 16, + /* 4TB */ 388, 388, 20, + /* 8TB */ 446, 446, 25, + /* 16TB */ 512, 512, 32 +}; + +/* + * CalcHFSPlusBTreeClumpSize + * + * This routine calculates the file clump size for either + * the catalog file or the extents overflow file. + */ +static UInt32 +CalcHFSPlusBTreeClumpSize(UInt32 blockSize, UInt32 nodeSize, UInt64 sectors, int fileID) +{ + UInt32 mod = MAX(nodeSize, blockSize); + UInt32 clumpSize; + int column; + int i; + + /* Figure out which column of the above table to use for this file. */ + switch (fileID) { + case kHFSAttributesFileID: + column = 0; + break; + case kHFSCatalogFileID: + column = 1; + break; + default: + column = 2; + break; + } + + /* + * The default clump size is 0.8% of the volume size. And + * it must also be a multiple of the node and block size. + */ + if (sectors < 0x200000) { + clumpSize = sectors << 2; /* 0.8 % */ + if (clumpSize < (8 * nodeSize)) + clumpSize = 8 * nodeSize; + } else { + /* + * XXX This should scale more smoothly! + */ + /* turn exponent into table index... */ + for (i = 0, sectors = sectors >> 22; + sectors && (i < CLUMP_ENTRIES-1); + ++i, sectors = sectors >> 1); + + clumpSize = clumptbl[column + (i) * 3] * 1024 * 1024; + } + + /* + * Round the clump size to a multiple of node and block size. + * NOTE: This rounds down. + */ + clumpSize /= mod; + clumpSize *= mod; + + /* + * Rounding down could have rounded down to 0 if the block size was + * greater than the clump size. If so, just use one block or node. + */ + if (clumpSize == 0) + clumpSize = mod; + + return (clumpSize); +} + + +/* VARARGS */ +void +#if __STDC__ +fatal(const char *fmt, ...) +#else +fatal(fmt, va_alist) + char *fmt; + va_dcl +#endif +{ + va_list ap; + +#if __STDC__ + va_start(ap, fmt); +#else + va_start(ap); +#endif + if (fcntl(STDERR_FILENO, F_GETFL) < 0) { + openlog(progname, LOG_CONS, LOG_DAEMON); + vsyslog(LOG_ERR, fmt, ap); + closelog(); + } else { + vwarnx(fmt, ap); + } + va_end(ap); + exit(1); + /* NOTREACHED */ +} + + +void usage() +{ + fprintf(stderr, "usage: %s [-N [partition-size]] [hfsplus-options] special-device\n", progname); + + fprintf(stderr, " options:\n"); + fprintf(stderr, "\t-N do not create file system, just print out parameters\n"); + fprintf(stderr, "\t-s use case-sensitive filenames (default is case-insensitive)\n"); + + fprintf(stderr, " where hfsplus-options are:\n"); + fprintf(stderr, "\t-J [journal-size] make this HFS+ volume journaled\n"); + fprintf(stderr, "\t-D journal-dev use 'journal-dev' for an external journal\n"); + fprintf(stderr, "\t-G group-id (for root directory)\n"); + fprintf(stderr, "\t-U user-id (for root directory)\n"); + fprintf(stderr, "\t-M octal access-mask (for root directory)\n"); + fprintf(stderr, "\t-b allocation block size (4096 optimal)\n"); + fprintf(stderr, "\t-c clump size list (comma separated)\n"); + fprintf(stderr, "\t\ta=blocks (attributes file)\n"); + fprintf(stderr, "\t\tb=blocks (bitmap file)\n"); + fprintf(stderr, "\t\tc=blocks (catalog file)\n"); + fprintf(stderr, "\t\td=blocks (user data fork)\n"); + fprintf(stderr, "\t\te=blocks (extents file)\n"); + fprintf(stderr, "\t\tr=blocks (user resource fork)\n"); + fprintf(stderr, "\t-i starting catalog node id\n"); + fprintf(stderr, "\t-n b-tree node size list (comma separated)\n"); + fprintf(stderr, "\t\te=size (extents b-tree)\n"); + fprintf(stderr, "\t\tc=size (catalog b-tree)\n"); + fprintf(stderr, "\t\ta=size (attributes b-tree)\n"); + fprintf(stderr, "\t-v volume name (in ascii or UTF-8)\n"); +#ifdef DEBUG_BUILD + fprintf(stderr, "\t-E extent count list (comma separated)\n"); + fprintf(stderr, "\t\ta=count (attributes file)\n"); + fprintf(stderr, "\t\tb=count (bitmap file)\n"); + fprintf(stderr, "\t\tc=count (catalog file)\n"); + fprintf(stderr, "\t\te=count (extents file)\n"); + fprintf(stderr, "\t-a <num>[,list] metadata start allocation block, all btrees and journal will be created starting at this allocation block offset\n"); + fprintf(stderr, "\t\tlist is as with -E above, plus:\n"); + fprintf(stderr, "\t\tj=addr (JournalInfoBlock)\n"); + fprintf(stderr, "\t\tJ=addr (Journal)\n"); + fprintf(stderr, "\t\tN=addr (Next Allocation Block)\n"); + fprintf(stderr, "\t\tExample: -a 100,e=200,c=500\n"); +#endif + + fprintf(stderr, " examples:\n"); + fprintf(stderr, "\t%s -v Untitled /dev/rdisk0s7 \n", progname); + fprintf(stderr, "\t%s -v Untitled -n c=4096,e=1024 /dev/rdisk0s7 \n", progname); + fprintf(stderr, "\t%s -v Untitled -c b=64,c=1024 /dev/rdisk0s7 \n\n", progname); + + exit(1); +} diff --git a/newfs_hfs/newfs_hfs.h b/newfs_hfs/newfs_hfs.h new file mode 100644 index 0000000..4f0c521 --- /dev/null +++ b/newfs_hfs/newfs_hfs.h @@ -0,0 +1,259 @@ +/* + * Copyright (c) 1999-2011 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include <CoreFoundation/CFBase.h> + +/* + * Mac OS Finder flags + */ +enum { + kHasBeenInited = 0x0100, /* Files only */ + /* Clear if the file contains desktop database */ + /* bit 0x0200 was the letter bit for AOCE, but is now reserved for future use */ + kHasCustomIcon = 0x0400, /* Files and folders */ + kIsStationery = 0x0800, /* Files only */ + kNameLocked = 0x1000, /* Files and folders */ + kHasBundle = 0x2000, /* Files only */ + kIsInvisible = 0x4000, /* Files and folders */ + kIsAlias = 0x8000 /* Files only */ +}; + + +/* Finder types (mostly opaque in our usage) */ +struct FInfo { + uint32_t fileType; /* The type of the file */ + uint32_t fileCreator; /* The file's creator */ + UInt16 finderFlags; /* ex: kHasBundle, kIsInvisible... */ + UInt8 opaque[6]; /* If set to {0, 0}, the Finder will place the item automatically */ +}; +typedef struct FInfo FInfo; + +struct FXInfo { + UInt8 opaque[16]; +}; +typedef struct FXInfo FXInfo; + +struct DInfo { + UInt8 opaque[16]; +}; +typedef struct DInfo DInfo; + +struct DXInfo { + UInt8 opaque[16]; +}; +typedef struct DXInfo DXInfo; + + +enum { + kMinHFSPlusVolumeSize = (512 * 1024), + + kBytesPerSector = 512, + kBitsPerSector = 4096, + kBTreeHeaderUserBytes = 128, + kLog2SectorSize = 9, + kHFSNodeSize = 512, + kHFSMaxAllocationBlks = 65536, + + kHFSPlusDataClumpFactor = 16, + kHFSPlusRsrcClumpFactor = 16, + + kWriteSeqNum = 2, + kHeaderBlocks = 3, + kTailBlocks = 2, + kMDBStart = 2, + kVolBitMapStart = kHeaderBlocks, + + /* Desktop DB, Desktop DF, Finder, System, ReadMe */ + kWapperFileCount = 5, + /* Maximum wrapper size is 32MB */ + kMaxWrapperSize = 1024 * 1024 * 32, + /* Maximum volume that can be wrapped is 256GB */ + kMaxWrapableSectors = (kMaxWrapperSize/8) * (65536/512) +}; + +/* B-tree key descriptor */ +#define KD_SKIP 0 +#define KD_BYTE 1 +#define KD_SIGNBYTE 2 +#define KD_STRING 3 +#define KD_WORD 4 +#define KD_SIGNWORD 5 +#define KD_LONG 6 +#define KD_SIGNLONG 7 +#define KD_FIXLENSTR 8 +#define KD_DTDBSTR 9 +#define KD_USEPROC 10 + + +enum { + kTextEncodingMacRoman = 0L, + kTextEncodingMacJapanese = 1 +}; + + +/* + * The following constant sets the default block size. + * This constant must be a power of 2 and meet the following constraints: + * MINBSIZE <= DFL_BLKSIZE <= MAXBSIZE + * sectorsize <= DFL_BLKSIZE + */ +#define HFSOPTIMALBLKSIZE 4096 +#define HFSMINBSIZE 512 +#define HFSMAXBSIZE 2147483648U +#define DFL_BLKSIZE HFSOPTIMALBLKSIZE + + +#define kDTDF_FileID 16 +#define kDTDF_Name "Desktop DF" +#define kDTDF_Chars 10 +#define kDTDF_Type 'DTFL' +#define kDTDF_Creator 'DMGR' + +#define kDTDB_FileID 17 +#define kDTDB_Name "Desktop DB" +#define kDTDB_Chars 10 +#define kDTDB_Type 'BTFL' +#define kDTDB_Creator 'DMGR' +#define kDTDB_Size 1024 + +#define kReadMe_FileID 18 +#define kReadMe_Name "ReadMe" +#define kReadMe_Chars 6 +#define kReadMe_Type 'ttro' +#define kReadMe_Creator 'ttxt' + +#define kFinder_FileID 19 +#define kFinder_Name "Finder" +#define kFinder_Chars 6 +#define kFinder_Type 'FNDR' +#define kFinder_Creator 'MACS' + +#define kSystem_FileID 20 +#define kSystem_Name "System" +#define kSystem_Chars 6 +#define kSystem_Type 'zsys' +#define kSystem_Creator 'MACS' + + + +#if !defined(FALSE) && !defined(TRUE) +enum { + FALSE = 0, + TRUE = 1 +}; +#endif + + +#define kDefaultVolumeNameStr "untitled" + +/* + * This is the straight GMT conversion constant: + * + * 00:00:00 January 1, 1970 - 00:00:00 January 1, 1904 + * (3600 * 24 * ((365 * (1970 - 1904)) + (((1970 - 1904) / 4) + 1))) + */ +#define MAC_GMT_FACTOR 2082844800UL + + +/* sectorSize = kBytesPerSector = 512 + sectorOffset and totalSectors are in terms of 512-byte sector size. +*/ +struct DriveInfo { + int fd; + uint32_t sectorSize; + uint32_t sectorOffset; + uint64_t totalSectors; + + /* actual device info. physSectorSize is necessary to de-block + * while using the raw device. + */ + uint32_t physSectorSize; + uint64_t physSectorsPerIO; + uint64_t physTotalSectors; +}; +typedef struct DriveInfo DriveInfo; + +enum { + kMakeHFSWrapper = 0x01, + kMakeMaxHFSBitmap = 0x02, + kMakeStandardHFS = 0x04, + kMakeCaseSensitive = 0x08, + kUseAccessPerms = 0x10, + kMakeContentProtect= 0x20, +}; + + +struct hfsparams { + uint32_t flags; /* kMakeHFSWrapper, ... */ + uint32_t blockSize; + uint32_t rsrcClumpSize; + uint32_t dataClumpSize; + uint32_t nextFreeFileID; + + uint32_t catalogClumpSize; + uint32_t catalogNodeSize; + uint32_t catalogExtsCount; + uint32_t catalogStartBlock; + + uint32_t extentsClumpSize; + uint32_t extentsNodeSize; + uint32_t extentsExtsCount; + uint32_t extentsStartBlock; + + uint32_t attributesClumpSize; + uint32_t attributesNodeSize; + uint32_t attributesExtsCount; + uint32_t attributesStartBlock; + + uint32_t allocationClumpSize; + uint32_t allocationExtsCount; + uint32_t allocationStartBlock; + + uint32_t createDate; /* in UTC */ + uint32_t hfsAlignment; + unsigned char volumeName[kHFSPlusMaxFileNameChars + 1]; /* in UTF-8 */ + uint32_t encodingHint; + uint32_t journaledHFS; + uint32_t journalSize; + uint32_t journalInfoBlock; + uint32_t journalBlock; + char *journalDevice; + uid_t owner; + gid_t group; + mode_t mask; +#ifdef DEBUG_BUILD + uint16_t protectlevel; +#endif + uint32_t fsStartBlock; /* allocation block offset where the btree allocaiton should start */ + uint32_t nextAllocBlock; /* Set VH nextAllocationBlock */ +}; +typedef struct hfsparams hfsparams_t; + +extern int make_hfsplus(const DriveInfo *driveInfo, hfsparams_t *defaults); + + +#if __STDC__ +void fatal(const char *fmt, ...); +#else +void fatal(); +#endif -- 2.45.2