2 * Copyright (c) 2000-2002, 2004-2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include "Scavenger.h"
27 #define DEBUG_HARDLINKCHECK 0
29 /* If set, the node in hash is initialized and has valid inodeID */
30 #define LINKINFO_INIT 0x01
31 /* If set, verification of corresponding inode is completed successfully */
32 #define LINKINFO_CHECK 0x02
34 /* info saved for each indirect link encountered */
35 struct IndirectLinkInfo
{
36 /* linkID is link reference number for file hard links, and
37 * inodeID for directory hard links.
42 struct HardLinkList
*list
;
45 #define VISITED_INODE_ID 1
50 uint32_t priv_dir_itime
; /* Creation (initialization) time of metadata folder */
51 uint32_t root_dir_itime
; /* Creation (initialization) time of root folder */
52 PrimeBuckets
*fileBucket
;
61 HFSPlusCatalogKey gMetaDataDirKey
= {
63 2, /* parent directory ID */
65 21, /* number of unicode characters */
69 'P','r','i','v','a','t','e',' ',
75 /* private local routines */
76 static int GetPrivateDir(SGlobPtr gp
, CatalogRecord
*rec
);
77 static int GetRootDir(SGlobPtr gp
, CatalogRecord
*rec
);
78 static int RecordOrphanOpenUnlink(SGlobPtr gp
, UInt32 parID
, unsigned char * filename
);
79 static int RecordBadHardLinkChainFirst(SGlobPtr
, UInt32
, UInt32
, UInt32
);
80 static int RecordBadHardLinkNext(SGlobPtr gp
, UInt32 fileID
, UInt32 is
, UInt32 shouldbe
);
81 static int RecordBadHardLinkPrev(SGlobPtr gp
, UInt32 fileID
, UInt32 is
, UInt32 shouldbe
);
82 static int RecordBadLinkCount(SGlobPtr gp
, UInt32 inodeID
, UInt32 is
, UInt32 shouldbe
) ;
83 static int RecordOrphanLink(SGlobPtr gp
, Boolean isdir
, UInt32 linkID
);
84 static int RecordOrphanInode(SGlobPtr gp
, Boolean isdir
, UInt32 inodeID
);
85 static void hash_insert(UInt32 linkID
, int totalSlots
, int slotsUsed
, struct IndirectLinkInfo
*linkInfo
);
86 static struct IndirectLinkInfo
* hash_search(UInt32 linkID
, int totalSlots
, int slotsUsed
, struct IndirectLinkInfo
*linkInfo
);
89 * Some functions used when sorting the hard link chain.
90 * chain_compare() is used by qsort; find_id is just a linear
91 * search to find a specific fileID; and tsort does a
92 * topological sort on the linked list.
95 chain_compare(const void *a1
, const void *a2
) {
96 struct HardLinkList
*left
= (struct HardLinkList
*)a1
;
97 struct HardLinkList
*right
= (struct HardLinkList
*)a2
;
99 return (left
->prev
- right
->prev
);
103 find_id(struct HardLinkList
*list
, int nel
, int id
)
106 for (i
= 0; i
< nel
; i
++) {
107 if (list
[i
].fileID
== id
)
114 tsort(struct HardLinkList
*list
, int nel
)
116 struct HardLinkList
*tmp
;
117 int cur_indx
, tmp_indx
= 0;
121 tmp
= calloc(sizeof(struct HardLinkList
), nel
);
128 * Since we only check list.next when doing the sort, we want to
129 * start with nodes that have prev == 0 (ones at the top of the
130 * graph, in other words). If there aren't any with a prev of 0,
131 * then the chain is broken somehow, and we'll repair it later.
133 qsort(list
, nel
, sizeof(list
[0]), chain_compare
);
135 for (cur_indx
= 0; cur_indx
< nel
; cur_indx
++) {
137 /* Skip nodes we've already come across */
138 if (list
[cur_indx
].fileID
== 0)
141 /* Copy this node over to the new list */
142 tmp
[tmp_indx
++] = list
[cur_indx
];
143 list
[cur_indx
].fileID
= 0;
145 /* ... and then find all its children. */
146 for (i
= tmp
[tmp_indx
-1].next
; i
!= 0; ) {
147 // look for the node in list with that fileID
148 int j
= find_id(list
, nel
, i
);
150 // We couldn't find it
154 // we add this one to tmp
155 tmp
[tmp_indx
++] = list
[j
];
157 i
= tmp
[tmp_indx
-1].next
;
162 /* Copy the temporary list over, and we're done. */
163 memcpy(list
, tmp
, nel
* sizeof(struct HardLinkList
));
175 * Verify that the linked list of hard link nodes (the catalog entries, not the indirect
176 * node in the private metadata directory) are correct and sane. If any discrepancies
177 * are detected, create repair order.
179 * To do this, we need to topologically sort the list, and then ensure that the prev/next
180 * chains are correct.
184 CheckHardLinkList(SGlobPtr gp
, UInt32 inodeID
, struct HardLinkList
*list
, int calc_link_count
, UInt32 firstID
)
190 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
191 plog("\tCheckHardLinkList: list=NULL for inodeID = %u\n", inodeID
);
196 * If we have a list, then we sort and verify it. It's pretty easy, once
197 * we're sorted, and tsort() above does the hard work for that.
199 if (calc_link_count
> 1) {
200 retval
= tsort(list
, calc_link_count
);
206 /* Previous link of first link should always be zero */
207 if (list
[0].prev
!= 0) {
208 RecordBadHardLinkPrev(gp
, list
[0].fileID
, list
[0].prev
, 0);
211 /* First ID in the inode should match the ID of the first hard link */
212 if (list
[0].fileID
!= firstID
) {
213 RecordBadHardLinkChainFirst(gp
, inodeID
, firstID
, list
[0].fileID
);
216 /* Check if the previous/next IDs for all nodes except the first node are valid */
217 for (indx
= 1; indx
< calc_link_count
; indx
++) {
218 if (list
[indx
-1].next
!= list
[indx
].fileID
) {
219 RecordBadHardLinkNext(gp
, list
[indx
-1].fileID
, list
[indx
-1].next
, list
[indx
].fileID
);
222 if (list
[indx
].prev
!= list
[indx
-1].fileID
) {
223 RecordBadHardLinkPrev(gp
, list
[indx
].fileID
, list
[indx
].prev
, list
[indx
-1].fileID
);
227 /* Next ID for the last link should always be zero */
228 if (list
[calc_link_count
-1].next
!= 0) {
229 RecordBadHardLinkNext(gp
, list
[calc_link_count
-1].fileID
, list
[calc_link_count
-1].next
, 0);
233 #if DEBUG_HARDLINKCHECK
234 /* This is just for debugging -- it's useful to know what the list looks like */
235 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
236 for (indx
= 0; indx
< calc_link_count
; indx
++) {
237 fplog(stderr
, "CNID %u: #%u: <%u, %u, %u>\n", inodeID
, indx
, list
[indx
].prev
, list
[indx
].fileID
, list
[indx
].next
);
248 * Get ready to capture indirect link info.
249 * Called before iterating over all the catalog leaf nodes.
252 HardLinkCheckBegin(SGlobPtr gp
, void** cookie
)
254 struct HardLinkInfo
*info
;
256 CatalogRecord rootFolder
;
259 if (GetPrivateDir(gp
, &rec
) == 0) {
260 folderID
= rec
.hfsPlusFolder
.folderID
;
265 info
= (struct HardLinkInfo
*) malloc(sizeof(struct HardLinkInfo
));
268 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
269 plog("hardLinkCheckBegin: malloc(%zu) failed\n", sizeof(struct HardLinkInfo
));
274 info
->privDirID
= folderID
;
275 info
->priv_dir_itime
= folderID
? rec
.hfsPlusFolder
.createDate
: 0;
276 if (GetRootDir(gp
, &rootFolder
) == 0) {
277 info
->root_dir_itime
= rootFolder
.hfsPlusFolder
.createDate
;
279 info
->root_dir_itime
= 0;
284 /* We will use the ID of private metadata directory for file hard
285 * links to skip over hard link inode for an alias from directory
288 gp
->filelink_priv_dir_id
= folderID
;
291 info
->fileBucket
= calloc(1, sizeof(PrimeBuckets
));
292 if (info
->fileBucket
== NULL
) {
293 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
294 plog("HardLinkCheckBegin: prime bucket allocation failed\n");
305 * Dispose of captured data.
306 * Called after calling CheckHardLinks.
309 HardLinkCheckEnd(void * cookie
)
312 struct HardLinkInfo
* infoPtr
;
314 infoPtr
= (struct HardLinkInfo
*) cookie
;
315 if (infoPtr
->fileBucket
) {
316 free(infoPtr
->fileBucket
);
317 infoPtr
->fileBucket
= NULL
;
319 DisposeMemory(cookie
);
324 /* Structures for file hard link hash used when a
325 * file hard link created in pre-Leopard OS is detected
326 * i.e. the file inode and hard links do not have the
327 * kHFSHasLinkChainBit set, and the first link, the
328 * previous link and the next link IDs are zero. The
329 * link count for such hard links cannot be verified
330 * using CRT, therefore it is accounted in this hash.
332 #define FILELINK_HASH_SIZE 257
334 struct filelink_hash
{
336 UInt32 found_link_count
;
337 UInt32 calc_link_count
;
338 struct filelink_hash
*next
;
341 struct filelink_hash
**filelink_head
= NULL
;
342 UInt32 filelink_entry_count
= 0;
344 /* Search and return pointer to the entry for given inode ID.
345 * If no entry is found, return NULL.
347 static struct filelink_hash
*filelink_hash_search(UInt32 link_ref_num
)
349 struct filelink_hash
*cur
;
351 if (filelink_head
== NULL
) {
355 cur
= filelink_head
[link_ref_num
% FILELINK_HASH_SIZE
];
357 if (link_ref_num
== cur
->link_ref_num
) {
366 /* Allocate and insert entry for given inode ID in the
367 * hash. The caller function is responsible for searching
368 * for duplicates before calling this function.
369 * Returns the pointer to the new hash entry.
371 static struct filelink_hash
*filelink_hash_insert(UInt32 link_ref_num
)
373 struct filelink_hash
*cur
;
375 cur
= malloc(sizeof(struct filelink_hash
));
379 cur
->link_ref_num
= link_ref_num
;
380 cur
->found_link_count
= 0;
381 cur
->calc_link_count
= 0;
382 cur
->next
= filelink_head
[link_ref_num
% FILELINK_HASH_SIZE
];
383 filelink_head
[link_ref_num
% FILELINK_HASH_SIZE
] = cur
;
384 filelink_entry_count
++;
388 /* Update the hash with information about a file hard link
389 * that points to given inode ID. The calculated link count
390 * for given inode is incremented.
391 * Returns zero if the value was successfully updated to hash,
392 * and ENOMEM on error.
394 static int filelink_hash_link(UInt32 link_ref_num
)
396 struct filelink_hash
*cur
;
398 /* If no hash exists, allocate the hash */
399 if (filelink_head
== NULL
) {
400 filelink_head
= calloc(FILELINK_HASH_SIZE
, sizeof(struct filelink_hash
*));
401 if (filelink_head
== NULL
) {
406 cur
= filelink_hash_search(link_ref_num
);
408 cur
= filelink_hash_insert(link_ref_num
);
411 cur
->calc_link_count
++;
418 /* Update the hash with information about given file inode.
419 * The found link count in the hash is updated with the
420 * link count value provided.
421 * Returns zero if the value was successfully updated to hash,
422 * and ENOMEM on error.
424 int filelink_hash_inode(UInt32 link_ref_num
, UInt32 linkCount
)
426 struct filelink_hash
*cur
;
428 /* If no hash exists, allocate the hash */
429 if (filelink_head
== NULL
) {
430 filelink_head
= calloc(FILELINK_HASH_SIZE
, sizeof(struct filelink_hash
*));
431 if (filelink_head
== NULL
) {
436 cur
= filelink_hash_search(link_ref_num
);
438 cur
= filelink_hash_insert(link_ref_num
);
441 cur
->found_link_count
= linkCount
;
447 /* If the file link hash was used to account for
448 * link count of file hard links created on pre-Leopard
449 * OS, destroy the hash by freeing all allocated
452 static void filelink_hash_destroy(void)
455 struct filelink_hash
*cur
;
457 for (i
= 0; i
< FILELINK_HASH_SIZE
; i
++) {
458 while (filelink_head
[i
]) {
459 cur
= filelink_head
[i
];
460 filelink_head
[i
] = cur
->next
;
465 filelink_head
= NULL
;
466 filelink_entry_count
= 0;
472 * Capture indirect link info.
473 * Called for every indirect link in the catalog.
476 CaptureHardLink(void *cookie
, const HFSPlusCatalogFile
*file
)
478 struct HardLinkInfo
* info
= (struct HardLinkInfo
*) cookie
;
480 /* A file hard link created on pre-Leopard OS does not
481 * have kHFSHasLinkChainBit set or prev/next link IDs.
482 * Ignore such file hard links from all check and CRT account
483 * and instead account the information in hash to verify the
486 if ((info
->fileBucket
== NULL
) ||
487 (((file
->flags
& kHFSHasLinkChainMask
) == 0) &&
488 (file
->hl_prevLinkID
== 0) &&
489 (file
->hl_nextLinkID
== 0))) {
490 filelink_hash_link(file
->hl_linkReference
);
492 /* For file hard links, add link reference number
493 * and catalog link ID pair to the prime buckets.
495 hardlink_add_bucket(info
->fileBucket
, file
->hl_linkReference
,
498 if ((file
->flags
& kHFSHasLinkChainMask
) == 0) {
499 record_link_badchain(info
->globals
, false);
502 if ((info
->priv_dir_itime
&& file
->createDate
!= info
->priv_dir_itime
) &&
503 (info
->root_dir_itime
&& file
->createDate
!= info
->root_dir_itime
)) {
510 plog("Hard Link catalog entry %u has bad time %u (should be %u, or at least %u)\n",
511 file
->fileID
, file
->createDate
, info
->priv_dir_itime
, info
->root_dir_itime
);
512 correct
= info
->priv_dir_itime
;
514 p
= AllocMinorRepairOrder(info
->globals
, 0);
517 plog("Unable to allocate hard link time repair order!");
521 fsckPrint(info
->globals
->context
, E_BadHardLinkDate
);
522 snprintf(str1
, sizeof(str1
), "%u", correct
);
523 snprintf(str2
, sizeof(str2
), "%u", file
->createDate
);
524 fsckPrint(info
->globals
->context
, E_BadValue
, str1
, str2
);
526 p
->type
= E_BadHardLinkDate
;
527 p
->parid
= file
->fileID
;
528 p
->correct
= info
->priv_dir_itime
;
529 p
->incorrect
= file
->createDate
;
536 * RepairHardLinkChains
538 * Cycle through the catalog tree, and generate repair orders for hard
539 * links that may be broken.
542 RepairHardLinkChains(SGlobPtr gp
, Boolean isdir
)
545 struct IndirectLinkInfo
*linkInfo
= NULL
;
547 HFSPlusCatalogKey
*keyp
;
548 BTreeIterator iterator
;
549 FSBufferDescriptor btrec
;
551 UInt32 linkID
, inodeID
;
555 int slotsUsed
= 0, slots
= 0;
563 metadirid
= gp
->dirlink_priv_dir_id
;
564 prefixlen
= strlen(HFS_DIRINODE_PREFIX
);
565 prefixName
= HFS_DIRINODE_PREFIX
;
567 metadirid
= gp
->filelink_priv_dir_id
;
568 prefixlen
= strlen(HFS_INODE_PREFIX
);
569 prefixName
= HFS_INODE_PREFIX
;
572 if (metadirid
== 0) {
573 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
575 plog ("\tPrivate directory for dirlinks not found. Stopping repairs.\n");
577 plog ("\tPrivate directory for filelinks not found. Stopping repairs.\n");
584 // Initialize the hash
585 if (GetPrivateDir(gp
, &rec
) == 0 && rec
.hfsPlusFolder
.valence
!= 0) {
586 entries
= rec
.hfsPlusFolder
.valence
+ 10;
587 folderID
= rec
.hfsPlusFolder
.folderID
;
593 for (slots
= 1; slots
<= entries
; slots
<<= 1)
595 if (slots
< (entries
+ (entries
/3)))
597 linkInfo
= calloc(slots
, sizeof(struct IndirectLinkInfo
));
598 if (linkInfo
== NULL
) {
599 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
600 plog("RepairHardLinkChains: calloc(%d, %zu) failed\n", slots
, sizeof(struct IndirectLinkInfo
));
605 // Done initializing the hash
607 // Set up the catalog BTree iterator
608 // (start from the root folder, and work our way down)
609 fcb
= gp
->calculatedCatalogFCB
;
610 ClearMemory(&iterator
, sizeof(iterator
));
611 keyp
= (HFSPlusCatalogKey
*)&iterator
.key
;
612 BuildCatalogKey(kHFSRootFolderID
, NULL
, true, (CatalogKey
*)keyp
);
613 btrec
.bufferAddress
= &rec
;
615 btrec
.itemSize
= sizeof(rec
);
617 /* Counter for number of inodes found and verified in the
618 * hash. When an inode is found when checking the hard links,
619 * the value is incremented. When an inode's linked list and
620 * link count are verified, the value is decremented. If
621 * this value remains non-zero at the end, there are
627 * This chunk of code iterates through the entire catalog BTree.
628 * For each hard link node (that is, the "directory entry" that
629 * points to the actual node in the metadata directory), it may
630 * add it to the hash (if it doesn't exist yet; it also then increments
631 * the link count for that "inode"); it also creates an array
632 * of <previous, fileid, next> for the linked list.
634 for (result
= BTIterateRecord(fcb
, kBTreeFirstRecord
, &iterator
, &btrec
, &reclen
);
636 result
= BTIterateRecord(fcb
, kBTreeNextRecord
, &iterator
, &btrec
, &reclen
)) {
637 HFSPlusCatalogFile
*file
= &rec
.hfsPlusFile
;
638 Boolean islink
= false;
640 if (rec
.recordType
!= kHFSPlusFileRecord
)
644 /* Assume that this is a directory hard link if
645 * the atleast one value in finder info corresponds to
646 * alias, and the alias is not a file inode, and
647 * either the inode number is greater than
648 * kHFSFirstUserCatalogNodeID or the flag has
649 * kHFSHasLinkChainBit set.
651 if (((file
->userInfo
.fdType
== kHFSAliasType
) ||
652 (file
->userInfo
.fdCreator
== kHFSAliasCreator
) ||
653 (file
->userInfo
.fdFlags
& kIsAlias
)) &&
654 (keyp
->parentID
!= gp
->filelink_priv_dir_id
) &&
655 ((file
->hl_linkReference
>= kHFSFirstUserCatalogNodeID
) ||
656 (file
->flags
& kHFSHasLinkChainMask
))) {
657 flags
= rec
.hfsPlusFile
.flags
;
660 } else if (file
->userInfo
.fdType
== kHardLinkFileType
&&
661 file
->userInfo
.fdCreator
== kHFSPlusCreator
) {
662 flags
= rec
.hfsPlusFile
.flags
;
666 struct IndirectLinkInfo
*li
= NULL
;
667 struct HardLinkList
*tlist
= NULL
;
671 linkID
= file
->fileID
;
672 inodeID
= file
->bsdInfo
.special
.iNodeNum
;
674 /* Now that we are in repair, all hard links should
675 * have this bit set because we upgrade all pre-Leopard
676 * file hard links to Leopard hard links on any
677 * file hard link repairs.
679 if ((flags
& kHFSHasLinkChainMask
) == 0) {
680 record_link_badflags(gp
, linkID
, isdir
, flags
,
681 flags
| kHFSHasLinkChainMask
);
684 /* For directory hard links, check ownerFlags and
685 * finderInfo because we could have missed creating
686 * repair orders in verification. Verification could
687 * have stopped before we saw this record because it
688 * stops as soon as it determines that it needs full
689 * knowledge of hard links on the disk during repair.
692 /* Check if the directory hard link has UF_IMMUTABLE bit set */
693 if ((file
->bsdInfo
.ownerFlags
& UF_IMMUTABLE
) == 0) {
694 record_dirlink_badownerflags(gp
, file
->fileID
,
695 file
->bsdInfo
.ownerFlags
,
696 file
->bsdInfo
.ownerFlags
| UF_IMMUTABLE
, true);
699 /* Check Finder Info */
700 if ((file
->userInfo
.fdType
!= kHFSAliasType
) ||
701 (file
->userInfo
.fdCreator
!= kHFSAliasCreator
) ||
702 ((file
->userInfo
.fdFlags
& kIsAlias
) == 0)) {
703 record_link_badfinderinfo(gp
, file
->fileID
, true);
707 /* For directory hard links, hash using inodeID. For
708 * file hard links, hash using link reference number
709 * (which is same as inode ID for file hard links
710 * created post-Tiger). For each inodeID, add the
711 * <prev, id, next> triad.
713 li
= hash_search(inodeID
, slots
, slotsUsed
, linkInfo
);
718 /* hash_insert() initializes linkCount to 1 */
719 hash_insert(inodeID
, slots
, slotsUsed
++, linkInfo
);
720 li
= hash_search(inodeID
, slots
, slotsUsed
, linkInfo
);
724 * Either the hash passed in should have the entry, or
725 * the one we just created should (because we just added it);
726 * either way, if it's not here, we've got something weird
727 * going on, so let's just abort now.
733 count
= li
->linkCount
- 1;
734 /* Reallocate memory to store information about file/directory hard links */
735 if ((count
% 10) == 0) {
736 tlist
= realloc(li
->list
, (count
+ 10) * sizeof(struct HardLinkList
));
743 li
->list
= tlist
; // May be the same
744 for (i
= count
; i
< (count
+ 10); i
++) {
745 memset(&li
->list
[i
], 0, sizeof(li
->list
[i
]));
750 /* Store information about this hard link */
752 li
->list
[count
].fileID
= linkID
;
753 li
->list
[count
].prev
= file
->hl_prevLinkID
;
754 li
->list
[count
].next
= file
->hl_nextLinkID
;
759 if (result
== btNotFound
)
760 result
= 0; // If we hit the end of the catalog tree, that's okay
767 * Next, we iterate through the metadata directory, and check the linked list.
770 ClearMemory(&iterator
, sizeof(iterator
));
771 keyp
= (HFSPlusCatalogKey
*)&iterator
.key
;
772 BuildCatalogKey(metadirid
, NULL
, true, (CatalogKey
*)keyp
);
773 btrec
.bufferAddress
= &rec
;
775 btrec
.itemSize
= sizeof(rec
);
777 for (result
= BTSearchRecord(fcb
, &iterator
, kInvalidMRUCacheKey
, &btrec
, &reclen
, &iterator
);
779 result
= BTIterateRecord(fcb
, kBTreeNextRecord
, &iterator
, &btrec
, &reclen
)) {
780 unsigned char filename
[64];
782 struct IndirectLinkInfo
*li
= NULL
;
784 if (rec
.recordType
== kHFSPlusFolderThreadRecord
||
785 rec
.recordType
== kHFSPlusFileThreadRecord
)
787 if (keyp
->parentID
!= metadirid
)
789 if ((isdir
&& rec
.recordType
!= kHFSPlusFolderRecord
) ||
790 (!isdir
&& rec
.recordType
!= kHFSPlusFileRecord
))
792 (void)utf_encodestr(keyp
->nodeName
.unicode
,
793 keyp
->nodeName
.length
* 2,
794 filename
, &len
, sizeof(filename
));
796 if (strstr((char*)filename
, prefixName
) != (char*)filename
)
800 inodeID
= rec
.hfsPlusFolder
.folderID
;
802 flags
= rec
.hfsPlusFolder
.flags
;
803 li
= hash_search(inodeID
, slots
, slotsUsed
, linkInfo
);
805 inodeID
= rec
.hfsPlusFile
.fileID
;
806 link_ref_num
= atol((char*)&filename
[prefixlen
]);
807 flags
= rec
.hfsPlusFile
.flags
;
808 li
= hash_search(link_ref_num
, slots
, slotsUsed
, linkInfo
);
811 /* file/directory inode should always have kHFSHasLinkChainBit set */
812 if ((flags
& kHFSHasLinkChainMask
) == 0) {
813 record_inode_badflags(gp
, inodeID
, isdir
, flags
,
814 flags
| kHFSHasLinkChainMask
, true);
818 UInt32 first_link_id
= 0;
819 uint32_t linkCount
= 0;
821 result
= get_first_link_id(gp
, &rec
, inodeID
, isdir
, &first_link_id
);
823 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
)
824 plog("\tError getting first link ID for inode = %u (result=%d)\n", inodeID
, result
);
827 /* Check and create repairs for doubly linked list */
828 result
= CheckHardLinkList(gp
, inodeID
, li
->list
, li
->linkCount
, first_link_id
);
830 linkCount
= isdir
? rec
.hfsPlusFolder
.bsdInfo
.special
.linkCount
: rec
.hfsPlusFile
.bsdInfo
.special
.linkCount
;
831 if (linkCount
!= li
->linkCount
) {
832 RecordBadLinkCount(gp
, inodeID
, linkCount
, li
->linkCount
);
835 li
->flags
|= LINKINFO_CHECK
;
838 /* Not found in hash, this is orphaned file/directory inode */
839 RecordOrphanInode(gp
, isdir
, inodeID
);
843 if (result
== btNotFound
) {
850 /* Check for orphan hard links */
853 for (i
= 0; i
< slots
; i
++) {
854 /* If node is initialized but never checked, record orphan link */
855 if ((linkInfo
[i
].flags
& LINKINFO_INIT
) &&
856 ((linkInfo
[i
].flags
& LINKINFO_CHECK
) == 0)) {
857 for (j
= 0; j
< linkInfo
[i
].linkCount
; j
++) {
858 RecordOrphanLink(gp
, isdir
, linkInfo
[i
].list
[j
].fileID
);
867 for (i
= 0; i
< slots
; i
++) {
868 if (linkInfo
[i
].list
)
869 free(linkInfo
[i
].list
);
880 * Check indirect node link counts against the indirect
881 * links that were found. There are 4 possible problems
883 * 1. orphaned indirect node (i.e. no links found)
884 * 2. orphaned indirect link (i.e. indirect node missing)
885 * 3. incorrect link count
886 * 4. indirect link id was 0 (i.e. link id wasn't preserved)
889 CheckHardLinks(void *cookie
)
891 struct HardLinkInfo
*info
= (struct HardLinkInfo
*)cookie
;
896 HFSPlusCatalogKey
* keyp
;
897 BTreeIterator iterator
;
898 FSBufferDescriptor btrec
;
903 unsigned char filename
[64];
904 PrimeBuckets
*catBucket
;
906 /* All done if no hard links exist. */
911 fsckPrint(gp
->context
, hfsHardLinkCheck
);
913 folderID
= info
->privDirID
;
915 catBucket
= calloc(1, sizeof(PrimeBuckets
));
916 if (catBucket
== NULL
) {
917 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
918 plog("CheckHardLinks: calloc(1, %zu) failed\n", sizeof(PrimeBuckets
));
925 fcb
= gp
->calculatedCatalogFCB
;
926 prefixlen
= strlen(HFS_INODE_PREFIX
);
927 ClearMemory(&iterator
, sizeof(iterator
));
928 keyp
= (HFSPlusCatalogKey
*)&iterator
.key
;
929 btrec
.bufferAddress
= &rec
;
931 btrec
.itemSize
= sizeof(rec
);
933 * position iterator at folder thread record
934 * (i.e. one record before first child)
936 ClearMemory(&iterator
, sizeof(iterator
));
937 BuildCatalogKey(folderID
, NULL
, true, (CatalogKey
*)keyp
);
938 result
= BTSearchRecord(fcb
, &iterator
, kInvalidMRUCacheKey
, &btrec
,
940 if ((result
!= 0) && (result
!= btNotFound
)) {
944 /* Visit all the children in private directory. */
946 result
= BTIterateRecord(fcb
, kBTreeNextRecord
, &iterator
,
948 if (result
|| keyp
->parentID
!= folderID
)
951 if (rec
.recordType
!= kHFSPlusFileRecord
)
954 (void) utf_encodestr(keyp
->nodeName
.unicode
,
955 keyp
->nodeName
.length
* 2,
956 filename
, &len
, sizeof(filename
));
957 filename
[len
] = '\0';
959 /* Report Orphaned nodes only in debug mode */
960 if ((strstr((char *)filename
, HFS_DELETE_PREFIX
) == (char *)filename
) &&
961 (fsckGetVerbosity(gp
->context
) == kDebugLog
)) {
962 RecordOrphanOpenUnlink(gp
, folderID
, filename
);
966 if (strstr((char *)filename
, HFS_INODE_PREFIX
) != (char *)filename
)
969 result
= inode_check(gp
, catBucket
, (CatalogRecord
*)&rec
, (CatalogKey
*)keyp
, false);
976 if (result
== btNotFound
) {
981 * If we've reached this point, and result is clean,
982 * then we need to compare the two hard link
983 * buckets: if they don't match, then we have a hard link chain error, and
984 * need to either repair it, or just mark the error.
986 if ((result
== 0) && (info
->fileBucket
!= NULL
)) {
987 result
= compare_prime_buckets(catBucket
, info
->fileBucket
);
989 record_link_badchain(gp
, false);
990 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
991 plog("\tfilelink prime buckets do not match\n");
997 /* If hard links created in pre-Leopard OS were detected, they were
998 * added to the hash for checking link counts later. Check the
999 * link counts from the hash. Note that the hard links created in
1000 * pre-Leopard OS do not have kHFSHasLinkChainBit set in the inode
1001 * and the hard links, and the first/prev/next ID is zero --- and
1002 * hence they were ignored from CRT check and added to hash.
1004 if (filelink_entry_count
) {
1006 struct filelink_hash
*cur
;
1008 /* Since pre-Leopard OS hard links were detected, they
1009 * should be updated to new version. This is however
1010 * an opportunistic repair and no corruption will be
1011 * reported. This will be performed only if any other
1012 * file hard link repairs are performed.
1014 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
1015 plog("\tCheckHardLinks: found %u pre-Leopard file inodes.\n", filelink_entry_count
);
1018 for (i
= 0; i
< FILELINK_HASH_SIZE
; i
++) {
1019 cur
= filelink_head
[i
];
1021 if ((cur
->found_link_count
== 0) ||
1022 (cur
->calc_link_count
== 0) ||
1023 (cur
->found_link_count
!= cur
->calc_link_count
)) {
1024 record_link_badchain(gp
, false);
1033 if (filelink_entry_count
) {
1034 filelink_hash_destroy();
1046 * Get catalog entry for the "HFS+ Private Data" directory.
1047 * The indirect nodes are stored in this directory.
1050 GetPrivateDir(SGlobPtr gp
, CatalogRecord
* rec
)
1052 HFSPlusCatalogKey
* keyp
;
1053 BTreeIterator iterator
;
1054 FSBufferDescriptor btrec
;
1059 isHFSPlus
= VolumeObjectIsHFSPlus( );
1063 ClearMemory(&iterator
, sizeof(iterator
));
1064 keyp
= (HFSPlusCatalogKey
*)&iterator
.key
;
1066 btrec
.bufferAddress
= rec
;
1067 btrec
.itemCount
= 1;
1068 btrec
.itemSize
= sizeof(CatalogRecord
);
1070 /* look up record for HFS+ private folder */
1071 ClearMemory(&iterator
, sizeof(iterator
));
1072 CopyMemory(&gMetaDataDirKey
, keyp
, sizeof(gMetaDataDirKey
));
1073 result
= BTSearchRecord(gp
->calculatedCatalogFCB
, &iterator
,
1074 kInvalidMRUCacheKey
, &btrec
, &reclen
, &iterator
);
1082 * Get catalog entry for the Root Folder.
1085 GetRootDir(SGlobPtr gp
, CatalogRecord
* rec
)
1092 isHFSPlus
= VolumeObjectIsHFSPlus( );
1096 result
= GetCatalogRecordByID(gp
, kHFSRootFolderID
, isHFSPlus
, &key
, rec
, &recSize
);
1104 * Record a repair to delete an orphaned hard links, i.e. hard links
1105 * that do not have any corresponding inode.
1108 RecordOrphanLink(SGlobPtr gp
, Boolean isdir
, UInt32 linkID
)
1112 fsckPrint(gp
->context
, isdir
? E_OrphanDirLink
: E_OrphanFileLink
, linkID
);
1114 p
= AllocMinorRepairOrder(gp
, 0);
1118 p
->type
= isdir
? E_OrphanDirLink
: E_OrphanFileLink
;
1121 gp
->CatStat
|= S_LinkErrRepair
;
1129 * Record a repair for orphan inode i.e. inodes that do not have
1130 * any corresponding hard links.
1133 RecordOrphanInode(SGlobPtr gp
, Boolean isdir
, UInt32 inodeID
)
1137 fsckPrint(gp
->context
, isdir
? E_OrphanDirInode
: E_OrphanFileInode
, inodeID
);
1139 p
= AllocMinorRepairOrder(gp
, 0);
1143 p
->type
= isdir
? E_OrphanDirInode
: E_OrphanFileInode
;
1146 gp
->CatStat
|= S_LinkErrRepair
;
1152 * RecordOrphanOpenUnlink
1154 * This is only called when debugging is turned on. Don't
1155 * record an actual error, just print out a message.
1158 RecordOrphanOpenUnlink(SGlobPtr gp
, UInt32 parID
, unsigned char* filename
)
1160 fsckPrint(gp
->context
, E_UnlinkedFile
, filename
);
1167 RecordBadHardLinkChainFirst(SGlobPtr gp
, UInt32 fileID
, UInt32 is
, UInt32 shouldbe
)
1170 char goodstr
[16], badstr
[16];
1172 fsckPrint(gp
->context
, E_InvalidLinkChainFirst
, fileID
);
1173 sprintf(goodstr
, "%u", shouldbe
);
1174 sprintf(badstr
, "%u", is
);
1175 fsckPrint(gp
->context
, E_BadValue
, goodstr
, badstr
);
1177 p
= AllocMinorRepairOrder(gp
, 0);
1183 p
->type
= E_InvalidLinkChainFirst
;
1185 p
->correct
= shouldbe
;
1187 p
->parid
= fileID
; // *Not* the parent ID!
1188 gp
->CatStat
|= S_LinkErrRepair
;
1195 RecordBadHardLinkPrev(SGlobPtr gp
, UInt32 fileID
, UInt32 is
, UInt32 shouldbe
)
1198 char goodstr
[16], badstr
[16];
1200 fsckPrint(gp
->context
, E_InvalidLinkChainPrev
, fileID
);
1201 sprintf(goodstr
, "%u", shouldbe
);
1202 sprintf(badstr
, "%u", is
);
1203 fsckPrint(gp
->context
, E_BadValue
, goodstr
, badstr
);
1205 p
= AllocMinorRepairOrder(gp
, 0);
1209 p
->type
= E_InvalidLinkChainPrev
;
1211 p
->correct
= shouldbe
;
1213 p
->parid
= fileID
; // *Not* the parent ID
1214 gp
->CatStat
|= S_LinkCount
;
1219 RecordBadHardLinkNext(SGlobPtr gp
, UInt32 fileID
, UInt32 is
, UInt32 shouldbe
)
1222 char goodstr
[16], badstr
[16];
1224 fsckPrint(gp
->context
, E_InvalidLinkChainNext
, fileID
);
1226 sprintf(goodstr
, "%u", shouldbe
);
1227 sprintf(badstr
, "%u", is
);
1228 fsckPrint(gp
->context
, E_BadValue
, goodstr
, badstr
);
1230 p
= AllocMinorRepairOrder(gp
, 0);
1234 p
->type
= E_InvalidLinkChainNext
;
1236 p
->correct
= shouldbe
;
1238 p
->parid
= fileID
; // *Not* the parent ID
1239 gp
->CatStat
|= S_LinkCount
;
1244 RecordBadLinkCount(SGlobPtr gp
, UInt32 inodeID
, UInt32 is
, UInt32 shouldbe
)
1247 char goodstr
[16], badstr
[16];
1248 fsckPrint(gp
->context
, E_InvalidLinkCount
, inodeID
);
1250 sprintf(goodstr
, "%u", shouldbe
);
1251 sprintf(badstr
, "%u", is
);
1252 fsckPrint(gp
->context
, E_BadValue
, goodstr
, badstr
);
1254 p
= AllocMinorRepairOrder(gp
, 0);
1258 p
->type
= E_InvalidLinkCount
;
1260 p
->correct
= shouldbe
;
1262 p
->parid
= inodeID
; // *Not* the parent ID
1268 hash_insert(UInt32 linkID
, int totalSlots
, int slotsUsed
, struct IndirectLinkInfo
*linkInfo
)
1272 i
= linkID
& (totalSlots
- 1);
1274 last
= (i
+ (totalSlots
-1)) % totalSlots
;
1275 while ((i
!= last
) &&
1276 (linkInfo
[i
].flags
& LINKINFO_INIT
) &&
1277 (linkInfo
[i
].linkID
!= linkID
)) {
1278 i
= (i
+ 1) % totalSlots
;
1281 if ((linkInfo
[i
].flags
& LINKINFO_INIT
) == 0) {
1282 if (linkInfo
[i
].list
) {
1283 plog ("hash: overwriting data! (old:%u, new:%u)\n", linkInfo
[i
].linkID
, linkID
);
1286 linkInfo
[i
].flags
|= LINKINFO_INIT
;
1287 linkInfo
[i
].linkID
= linkID
;
1288 linkInfo
[i
].linkCount
= 1;
1289 } else if (linkInfo
[i
].linkID
== linkID
) {
1290 plog("hash: duplicate insert! (%d)\n", linkID
);
1293 plog("hash table full (%d entries) \n", slotsUsed
);
1299 static struct IndirectLinkInfo
*
1300 hash_search(UInt32 linkID
, int totalSlots
, int slotsUsed
, struct IndirectLinkInfo
*linkInfo
)
1306 i
= linkID
& (totalSlots
- 1);
1308 last
= (i
+ (slotsUsed
-1)) % totalSlots
;
1309 while ((i
!= last
) &&
1310 (linkInfo
[i
].flags
& LINKINFO_INIT
) &&
1311 (linkInfo
[i
].linkID
!= linkID
)) {
1312 i
= (i
+ 1) % totalSlots
;
1316 if ((linkInfo
[i
].flags
& LINKINFO_INIT
) &&
1317 (linkInfo
[i
].linkID
== linkID
)) {
1318 return (&linkInfo
[i
]);