2 * Copyright (c) 2000-2002, 2004-2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include "Scavenger.h"
27 #define DEBUG_HARDLINKCHECK 0
29 /* If set, the node in hash is initialized and has valid inodeID */
30 #define LINKINFO_INIT 0x01
31 /* If set, verification of corresponding inode is completed successfully */
32 #define LINKINFO_CHECK 0x02
34 /* info saved for each indirect link encountered */
35 struct IndirectLinkInfo
{
36 /* linkID is link reference number for file hard links, and
37 * inodeID for directory hard links.
42 struct HardLinkList
*list
;
45 #define VISITED_INODE_ID 1
50 uint32_t priv_dir_itime
; /* Creation (initialization) time of metadata folder */
51 uint32_t root_dir_itime
; /* Creation (initialization) time of root folder */
52 PrimeBuckets
*fileBucket
;
61 HFSPlusCatalogKey gMetaDataDirKey
= {
63 2, /* parent directory ID */
65 21, /* number of unicode characters */
69 'P','r','i','v','a','t','e',' ',
75 /* private local routines */
76 static int GetPrivateDir(SGlobPtr gp
, CatalogRecord
*rec
);
77 static int GetRootDir(SGlobPtr gp
, CatalogRecord
*rec
);
78 static int RecordOrphanOpenUnlink(SGlobPtr gp
, UInt32 parID
, unsigned char * filename
);
79 static int RecordBadHardLinkChainFirst(SGlobPtr
, UInt32
, UInt32
, UInt32
);
80 static int RecordBadHardLinkNext(SGlobPtr gp
, UInt32 fileID
, UInt32 is
, UInt32 shouldbe
);
81 static int RecordBadHardLinkPrev(SGlobPtr gp
, UInt32 fileID
, UInt32 is
, UInt32 shouldbe
);
82 static int RecordBadLinkCount(SGlobPtr gp
, UInt32 inodeID
, UInt32 is
, UInt32 shouldbe
) ;
83 static int RecordOrphanLink(SGlobPtr gp
, Boolean isdir
, UInt32 linkID
);
84 static int RecordOrphanInode(SGlobPtr gp
, Boolean isdir
, UInt32 inodeID
);
85 static void hash_insert(UInt32 linkID
, int totalSlots
, int slotsUsed
, struct IndirectLinkInfo
*linkInfo
);
86 static struct IndirectLinkInfo
* hash_search(UInt32 linkID
, int totalSlots
, int slotsUsed
, struct IndirectLinkInfo
*linkInfo
);
89 * Some functions used when sorting the hard link chain.
90 * chain_compare() is used by qsort; find_id is just a linear
91 * search to find a specific fileID; and tsort does a
92 * topological sort on the linked list.
95 chain_compare(const void *a1
, const void *a2
) {
96 struct HardLinkList
*left
= (struct HardLinkList
*)a1
;
97 struct HardLinkList
*right
= (struct HardLinkList
*)a2
;
99 return (left
->prev
- right
->prev
);
103 find_id(struct HardLinkList
*list
, int nel
, int id
)
106 for (i
= 0; i
< nel
; i
++) {
107 if (list
[i
].fileID
== id
)
114 tsort(struct HardLinkList
*list
, int nel
)
116 struct HardLinkList
*tmp
;
117 int cur_indx
, tmp_indx
= 0;
121 tmp
= calloc(sizeof(struct HardLinkList
), nel
);
128 * Since we only check list.next when doing the sort, we want to
129 * start with nodes that have prev == 0 (ones at the top of the
130 * graph, in other words). If there aren't any with a prev of 0,
131 * then the chain is broken somehow, and we'll repair it later.
133 qsort(list
, nel
, sizeof(list
[0]), chain_compare
);
135 for (cur_indx
= 0; cur_indx
< nel
; cur_indx
++) {
137 /* Skip nodes we've already come across */
138 if (list
[cur_indx
].fileID
== 0)
141 /* Copy this node over to the new list */
142 tmp
[tmp_indx
++] = list
[cur_indx
];
143 list
[cur_indx
].fileID
= 0;
145 /* ... and then find all its children. */
146 for (i
= tmp
[tmp_indx
-1].next
; i
!= 0; ) {
147 // look for the node in list with that fileID
148 int j
= find_id(list
, nel
, i
);
150 // We couldn't find it
154 // we add this one to tmp
155 tmp
[tmp_indx
++] = list
[j
];
157 i
= tmp
[tmp_indx
-1].next
;
162 /* Copy the temporary list over, and we're done. */
163 memcpy(list
, tmp
, nel
* sizeof(struct HardLinkList
));
175 * Verify that the linked list of hard link nodes (the catalog entries, not the indirect
176 * node in the private metadata directory) are correct and sane. If any discrepancies
177 * are detected, create repair order.
179 * To do this, we need to topologically sort the list, and then ensure that the prev/next
180 * chains are correct.
184 CheckHardLinkList(SGlobPtr gp
, UInt32 inodeID
, struct HardLinkList
*list
, int calc_link_count
, UInt32 firstID
)
190 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
191 plog("\tCheckHardLinkList: list=NULL for inodeID = %u\n", inodeID
);
196 * If we have a list, then we sort and verify it. It's pretty easy, once
197 * we're sorted, and tsort() above does the hard work for that.
199 if (calc_link_count
> 1) {
200 retval
= tsort(list
, calc_link_count
);
206 /* Previous link of first link should always be zero */
207 if (list
[0].prev
!= 0) {
208 RecordBadHardLinkPrev(gp
, list
[0].fileID
, list
[0].prev
, 0);
211 /* First ID in the inode should match the ID of the first hard link */
212 if (list
[0].fileID
!= firstID
) {
213 RecordBadHardLinkChainFirst(gp
, inodeID
, firstID
, list
[0].fileID
);
216 /* Check if the previous/next IDs for all nodes except the first node are valid */
217 for (indx
= 1; indx
< calc_link_count
; indx
++) {
218 if (list
[indx
-1].next
!= list
[indx
].fileID
) {
219 RecordBadHardLinkNext(gp
, list
[indx
-1].fileID
, list
[indx
-1].next
, list
[indx
].fileID
);
222 if (list
[indx
].prev
!= list
[indx
-1].fileID
) {
223 RecordBadHardLinkPrev(gp
, list
[indx
].fileID
, list
[indx
].prev
, list
[indx
-1].fileID
);
227 /* Next ID for the last link should always be zero */
228 if (list
[calc_link_count
-1].next
!= 0) {
229 RecordBadHardLinkNext(gp
, list
[calc_link_count
-1].fileID
, list
[calc_link_count
-1].next
, 0);
233 #if DEBUG_HARDLINKCHECK
234 /* This is just for debugging -- it's useful to know what the list looks like */
235 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
236 for (indx
= 0; indx
< calc_link_count
; indx
++) {
237 fplog(stderr
, "CNID %u: #%u: <%u, %u, %u>\n", inodeID
, indx
, list
[indx
].prev
, list
[indx
].fileID
, list
[indx
].next
);
248 * Get ready to capture indirect link info.
249 * Called before iterating over all the catalog leaf nodes.
252 HardLinkCheckBegin(SGlobPtr gp
, void** cookie
)
254 struct HardLinkInfo
*info
;
256 CatalogRecord rootFolder
;
259 if (GetPrivateDir(gp
, &rec
) == 0) {
260 folderID
= rec
.hfsPlusFolder
.folderID
;
265 info
= (struct HardLinkInfo
*) malloc(sizeof(struct HardLinkInfo
));
268 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
269 plog("hardLinkCheckBegin: malloc(%zu) failed\n", sizeof(struct HardLinkInfo
));
274 info
->privDirID
= folderID
;
275 info
->priv_dir_itime
= folderID
? rec
.hfsPlusFolder
.createDate
: 0;
276 if (GetRootDir(gp
, &rootFolder
) == 0) {
277 info
->root_dir_itime
= rootFolder
.hfsPlusFolder
.createDate
;
279 info
->root_dir_itime
= 0;
284 /* We will use the ID of private metadata directory for file hard
285 * links to skip over hard link inode for an alias from directory
288 gp
->filelink_priv_dir_id
= folderID
;
291 info
->fileBucket
= calloc(1, sizeof(PrimeBuckets
));
292 if (info
->fileBucket
== NULL
) {
293 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
294 plog("HardLinkCheckBegin: prime bucket allocation failed\n");
305 * Dispose of captured data.
306 * Called after calling CheckHardLinks.
309 HardLinkCheckEnd(void * cookie
)
312 struct HardLinkInfo
* infoPtr
;
314 infoPtr
= (struct HardLinkInfo
*) cookie
;
315 if (infoPtr
->fileBucket
) {
316 free(infoPtr
->fileBucket
);
317 infoPtr
->fileBucket
= NULL
;
319 DisposeMemory(cookie
);
324 /* Structures for file hard link hash used when a
325 * file hard link created in pre-Leopard OS is detected
326 * i.e. the file inode and hard links do not have the
327 * kHFSHasLinkChainBit set, and the first link, the
328 * previous link and the next link IDs are zero. The
329 * link count for such hard links cannot be verified
330 * using CRT, therefore it is accounted in this hash.
332 #define FILELINK_HASH_SIZE 257
334 struct filelink_hash
{
336 UInt32 found_link_count
;
337 UInt32 calc_link_count
;
338 struct filelink_hash
*next
;
341 struct filelink_hash
**filelink_head
= NULL
;
342 UInt32 filelink_entry_count
= 0;
344 /* Search and return pointer to the entry for given inode ID.
345 * If no entry is found, return NULL.
347 static struct filelink_hash
*filelink_hash_search(UInt32 link_ref_num
)
349 struct filelink_hash
*cur
;
351 if (filelink_head
== NULL
) {
355 cur
= filelink_head
[link_ref_num
% FILELINK_HASH_SIZE
];
357 if (link_ref_num
== cur
->link_ref_num
) {
366 /* Allocate and insert entry for given inode ID in the
367 * hash. The caller function is responsible for searching
368 * for duplicates before calling this function.
369 * Returns the pointer to the new hash entry.
371 static struct filelink_hash
*filelink_hash_insert(UInt32 link_ref_num
)
373 struct filelink_hash
*cur
;
375 cur
= malloc(sizeof(struct filelink_hash
));
379 cur
->link_ref_num
= link_ref_num
;
380 cur
->found_link_count
= 0;
381 cur
->calc_link_count
= 0;
382 cur
->next
= filelink_head
[link_ref_num
% FILELINK_HASH_SIZE
];
383 filelink_head
[link_ref_num
% FILELINK_HASH_SIZE
] = cur
;
384 filelink_entry_count
++;
388 /* Update the hash with information about a file hard link
389 * that points to given inode ID. The calculated link count
390 * for given inode is incremented.
391 * Returns zero if the value was successfully updated to hash,
392 * and ENOMEM on error.
394 static int filelink_hash_link(UInt32 link_ref_num
)
396 struct filelink_hash
*cur
;
398 /* If no hash exists, allocate the hash */
399 if (filelink_head
== NULL
) {
400 filelink_head
= calloc(FILELINK_HASH_SIZE
, sizeof(struct filelink_hash
*));
401 if (filelink_head
== NULL
) {
406 cur
= filelink_hash_search(link_ref_num
);
408 cur
= filelink_hash_insert(link_ref_num
);
411 cur
->calc_link_count
++;
418 /* Update the hash with information about given file inode.
419 * The found link count in the hash is updated with the
420 * link count value provided.
421 * Returns zero if the value was successfully updated to hash,
422 * and ENOMEM on error.
424 int filelink_hash_inode(UInt32 link_ref_num
, UInt32 linkCount
)
426 struct filelink_hash
*cur
;
428 /* If no hash exists, allocate the hash */
429 if (filelink_head
== NULL
) {
430 filelink_head
= calloc(FILELINK_HASH_SIZE
, sizeof(struct filelink_hash
*));
431 if (filelink_head
== NULL
) {
436 cur
= filelink_hash_search(link_ref_num
);
438 cur
= filelink_hash_insert(link_ref_num
);
441 cur
->found_link_count
= linkCount
;
447 /* If the file link hash was used to account for
448 * link count of file hard links created on pre-Leopard
449 * OS, destroy the hash by freeing all allocated
452 static void filelink_hash_destroy(void)
455 struct filelink_hash
*cur
;
457 for (i
= 0; i
< FILELINK_HASH_SIZE
; i
++) {
458 while (filelink_head
[i
]) {
459 cur
= filelink_head
[i
];
460 filelink_head
[i
] = cur
->next
;
465 filelink_head
= NULL
;
466 filelink_entry_count
= 0;
472 * Capture indirect link info.
473 * Called for every indirect link in the catalog.
476 CaptureHardLink(void *cookie
, const HFSPlusCatalogFile
*file
)
478 struct HardLinkInfo
* info
= (struct HardLinkInfo
*) cookie
;
480 /* A file hard link created on pre-Leopard OS does not
481 * have kHFSHasLinkChainBit set or prev/next link IDs.
482 * Ignore such file hard links from all check and CRT account
483 * and instead account the information in hash to verify the
486 if ((info
->fileBucket
== NULL
) ||
487 (((file
->flags
& kHFSHasLinkChainMask
) == 0) &&
488 (file
->hl_prevLinkID
== 0) &&
489 (file
->hl_nextLinkID
== 0))) {
490 filelink_hash_link(file
->hl_linkReference
);
492 /* For file hard links, add link reference number
493 * and catalog link ID pair to the prime buckets.
495 hardlink_add_bucket(info
->fileBucket
, file
->hl_linkReference
,
498 if ((file
->flags
& kHFSHasLinkChainMask
) == 0) {
499 record_link_badchain(info
->globals
, false);
502 if ((info
->priv_dir_itime
&& file
->createDate
!= info
->priv_dir_itime
) &&
503 (info
->root_dir_itime
&& file
->createDate
!= info
->root_dir_itime
)) {
510 plog("Hard Link catalog entry %u has bad time %u (should be %u, or at least %u)\n",
511 file
->fileID
, file
->createDate
, info
->priv_dir_itime
, info
->root_dir_itime
);
512 correct
= info
->priv_dir_itime
;
514 p
= AllocMinorRepairOrder(info
->globals
, 0);
517 plog("Unable to allocate hard link time repair order!");
521 fsckPrint(info
->globals
->context
, E_BadHardLinkDate
);
522 snprintf(str1
, sizeof(str1
), "%u", correct
);
523 snprintf(str2
, sizeof(str2
), "%u", file
->createDate
);
524 fsckPrint(info
->globals
->context
, E_BadValue
, str1
, str2
);
526 p
->type
= E_BadHardLinkDate
;
527 p
->parid
= file
->fileID
;
528 p
->correct
= info
->priv_dir_itime
;
529 p
->incorrect
= file
->createDate
;
536 * RepairHardLinkChains
538 * Cycle through the catalog tree, and generate repair orders for hard
539 * links that may be broken.
542 RepairHardLinkChains(SGlobPtr gp
, Boolean isdir
)
545 struct IndirectLinkInfo
*linkInfo
= NULL
;
547 HFSPlusCatalogKey
*keyp
;
548 BTreeIterator iterator
;
549 FSBufferDescriptor btrec
;
551 UInt32 linkID
, inodeID
;
555 int slotsUsed
= 0, slots
= 0;
563 metadirid
= gp
->dirlink_priv_dir_id
;
564 prefixlen
= strlen(HFS_DIRINODE_PREFIX
);
565 prefixName
= HFS_DIRINODE_PREFIX
;
567 metadirid
= gp
->filelink_priv_dir_id
;
568 prefixlen
= strlen(HFS_INODE_PREFIX
);
569 prefixName
= HFS_INODE_PREFIX
;
572 if (metadirid
== 0) {
573 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
575 plog ("\tPrivate directory for dirlinks not found. Stopping repairs.\n");
577 plog ("\tPrivate directory for filelinks not found. Stopping repairs.\n");
584 // Initialize the hash
585 if (GetPrivateDir(gp
, &rec
) == 0 && rec
.hfsPlusFolder
.valence
!= 0) {
586 entries
= rec
.hfsPlusFolder
.valence
+ 10;
587 folderID
= rec
.hfsPlusFolder
.folderID
;
593 for (slots
= 1; slots
<= entries
; slots
<<= 1)
595 if (slots
< (entries
+ (entries
/3)))
597 linkInfo
= calloc(slots
, sizeof(struct IndirectLinkInfo
));
598 if (linkInfo
== NULL
) {
599 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
600 plog("RepairHardLinkChains: calloc(%d, %zu) failed\n", slots
, sizeof(struct IndirectLinkInfo
));
605 // Done initializing the hash
607 // Set up the catalog BTree iterator
608 // (start from the root folder, and work our way down)
609 fcb
= gp
->calculatedCatalogFCB
;
610 ClearMemory(&iterator
, sizeof(iterator
));
611 keyp
= (HFSPlusCatalogKey
*)&iterator
.key
;
612 BuildCatalogKey(kHFSRootFolderID
, NULL
, true, (CatalogKey
*)keyp
);
613 btrec
.bufferAddress
= &rec
;
615 btrec
.itemSize
= sizeof(rec
);
617 /* Counter for number of inodes found and verified in the
618 * hash. When an inode is found when checking the hard links,
619 * the value is incremented. When an inode's linked list and
620 * link count are verified, the value is decremented. If
621 * this value remains non-zero at the end, there are
627 * This chunk of code iterates through the entire catalog BTree.
628 * For each hard link node (that is, the "directory entry" that
629 * points to the actual node in the metadata directory), it may
630 * add it to the hash (if it doesn't exist yet; it also then increments
631 * the link count for that "inode"); it also creates an array
632 * of <previous, fileid, next> for the linked list.
634 for (result
= BTIterateRecord(fcb
, kBTreeFirstRecord
, &iterator
, &btrec
, &reclen
);
636 result
= BTIterateRecord(fcb
, kBTreeNextRecord
, &iterator
, &btrec
, &reclen
)) {
637 HFSPlusCatalogFile
*file
= &rec
.hfsPlusFile
;
638 Boolean islink
= false;
640 if (rec
.recordType
!= kHFSPlusFileRecord
)
644 /* Assume that this is a directory hard link if
645 * the atleast one value in finder info corresponds to
646 * alias, and the alias is not a file inode, and
647 * either the inode number is greater than
648 * kHFSFirstUserCatalogNodeID or the flag has
649 * kHFSHasLinkChainBit set.
651 if (((file
->userInfo
.fdType
== kHFSAliasType
) ||
652 (file
->userInfo
.fdCreator
== kHFSAliasCreator
) ||
653 (file
->userInfo
.fdFlags
& kIsAlias
)) &&
654 (keyp
->parentID
!= gp
->filelink_priv_dir_id
) &&
655 ((file
->hl_linkReference
>= kHFSFirstUserCatalogNodeID
) ||
656 (file
->flags
& kHFSHasLinkChainMask
))) {
657 flags
= rec
.hfsPlusFile
.flags
;
660 } else if (file
->userInfo
.fdType
== kHardLinkFileType
&&
661 file
->userInfo
.fdCreator
== kHFSPlusCreator
) {
662 flags
= rec
.hfsPlusFile
.flags
;
666 struct IndirectLinkInfo
*li
= NULL
;
667 struct HardLinkList
*tlist
= NULL
;
671 linkID
= file
->fileID
;
672 inodeID
= file
->bsdInfo
.special
.iNodeNum
;
674 /* Now that we are in repair, all hard links should
675 * have this bit set because we upgrade all pre-Leopard
676 * file hard links to Leopard hard links on any
677 * file hard link repairs.
679 if ((flags
& kHFSHasLinkChainMask
) == 0) {
680 record_link_badflags(gp
, linkID
, isdir
, flags
,
681 flags
| kHFSHasLinkChainMask
);
684 /* For directory hard links, check ownerFlags and
685 * finderInfo because we could have missed creating
686 * repair orders in verification. Verification could
687 * have stopped before we saw this record because it
688 * stops as soon as it determines that it needs full
689 * knowledge of hard links on the disk during repair.
692 /* Check if the directory hard link has UF_IMMUTABLE bit set */
693 if ((file
->bsdInfo
.ownerFlags
& UF_IMMUTABLE
) == 0) {
694 record_dirlink_badownerflags(gp
, file
->fileID
,
695 file
->bsdInfo
.ownerFlags
,
696 file
->bsdInfo
.ownerFlags
| UF_IMMUTABLE
, true);
699 /* Check Finder Info */
700 if ((file
->userInfo
.fdType
!= kHFSAliasType
) ||
701 (file
->userInfo
.fdCreator
!= kHFSAliasCreator
) ||
702 ((file
->userInfo
.fdFlags
& kIsAlias
) == 0)) {
703 record_link_badfinderinfo(gp
, file
->fileID
, true);
707 /* For directory hard links, hash using inodeID. For
708 * file hard links, hash using link reference number
709 * (which is same as inode ID for file hard links
710 * created post-Tiger). For each inodeID, add the
711 * <prev, id, next> triad.
713 li
= hash_search(inodeID
, slots
, slotsUsed
, linkInfo
);
718 /* hash_insert() initializes linkCount to 1 */
719 hash_insert(inodeID
, slots
, slotsUsed
++, linkInfo
);
720 li
= hash_search(inodeID
, slots
, slotsUsed
, linkInfo
);
724 * Either the hash passed in should have the entry, or
725 * the one we just created should (because we just added it);
726 * either way, if it's not here, we've got something weird
727 * going on, so let's just abort now.
733 count
= li
->linkCount
- 1;
734 /* Reallocate memory to store information about file/directory hard links */
735 if ((count
% 10) == 0) {
736 tlist
= realloc(li
->list
, (count
+ 10) * sizeof(struct HardLinkList
));
743 li
->list
= tlist
; // May be the same
744 for (i
= count
; i
< (count
+ 10); i
++) {
745 memset(&li
->list
[i
], 0, sizeof(li
->list
[i
]));
750 /* Store information about this hard link */
752 li
->list
[count
].fileID
= linkID
;
753 li
->list
[count
].prev
= file
->hl_prevLinkID
;
754 li
->list
[count
].next
= file
->hl_nextLinkID
;
759 if (result
== btNotFound
)
760 result
= 0; // If we hit the end of the catalog tree, that's okay
767 * Next, we iterate through the metadata directory, and check the linked list.
770 ClearMemory(&iterator
, sizeof(iterator
));
771 keyp
= (HFSPlusCatalogKey
*)&iterator
.key
;
772 BuildCatalogKey(metadirid
, NULL
, true, (CatalogKey
*)keyp
);
773 btrec
.bufferAddress
= &rec
;
775 btrec
.itemSize
= sizeof(rec
);
777 for (result
= BTSearchRecord(fcb
, &iterator
, kInvalidMRUCacheKey
, &btrec
, &reclen
, &iterator
);
779 result
= BTIterateRecord(fcb
, kBTreeNextRecord
, &iterator
, &btrec
, &reclen
)) {
780 unsigned char filename
[64];
782 struct IndirectLinkInfo
*li
= NULL
;
784 if (rec
.recordType
== kHFSPlusFolderThreadRecord
||
785 rec
.recordType
== kHFSPlusFileThreadRecord
)
787 if (keyp
->parentID
!= metadirid
)
789 if ((isdir
&& rec
.recordType
!= kHFSPlusFolderRecord
) ||
790 (!isdir
&& rec
.recordType
!= kHFSPlusFileRecord
))
792 (void)utf_encodestr(keyp
->nodeName
.unicode
,
793 keyp
->nodeName
.length
* 2,
794 filename
, &len
, sizeof(filename
));
796 if (strstr((char*)filename
, prefixName
) != (char*)filename
)
800 inodeID
= rec
.hfsPlusFolder
.folderID
;
802 flags
= rec
.hfsPlusFolder
.flags
;
803 li
= hash_search(inodeID
, slots
, slotsUsed
, linkInfo
);
807 inodeID
= rec
.hfsPlusFile
.fileID
;
808 ref_num
= atol((char*)&filename
[prefixlen
]);
809 if (ref_num
< 0 || ref_num
> UINT32_MAX
) {
811 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
812 plog ("\tLink reference num=%ld is invalid for inode=%u result=%d\n", ref_num
, inodeID
, result
);
816 link_ref_num
= (UInt32
)ref_num
;
817 flags
= rec
.hfsPlusFile
.flags
;
818 li
= hash_search(link_ref_num
, slots
, slotsUsed
, linkInfo
);
821 /* file/directory inode should always have kHFSHasLinkChainBit set */
822 if ((flags
& kHFSHasLinkChainMask
) == 0) {
823 record_inode_badflags(gp
, inodeID
, isdir
, flags
,
824 flags
| kHFSHasLinkChainMask
, true);
828 UInt32 first_link_id
= 0;
829 uint32_t linkCount
= 0;
831 result
= get_first_link_id(gp
, &rec
, inodeID
, isdir
, &first_link_id
);
833 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
)
834 plog("\tError getting first link ID for inode = %u (result=%d)\n", inodeID
, result
);
837 /* Check and create repairs for doubly linked list */
838 result
= CheckHardLinkList(gp
, inodeID
, li
->list
, li
->linkCount
, first_link_id
);
840 linkCount
= isdir
? rec
.hfsPlusFolder
.bsdInfo
.special
.linkCount
: rec
.hfsPlusFile
.bsdInfo
.special
.linkCount
;
841 if (linkCount
!= li
->linkCount
) {
842 RecordBadLinkCount(gp
, inodeID
, linkCount
, li
->linkCount
);
845 li
->flags
|= LINKINFO_CHECK
;
848 /* Not found in hash, this is orphaned file/directory inode */
849 RecordOrphanInode(gp
, isdir
, inodeID
);
853 if (result
== btNotFound
) {
860 /* Check for orphan hard links */
863 for (i
= 0; i
< slots
; i
++) {
864 /* If node is initialized but never checked, record orphan link */
865 if ((linkInfo
[i
].flags
& LINKINFO_INIT
) &&
866 ((linkInfo
[i
].flags
& LINKINFO_CHECK
) == 0)) {
867 for (j
= 0; j
< linkInfo
[i
].linkCount
; j
++) {
868 RecordOrphanLink(gp
, isdir
, linkInfo
[i
].list
[j
].fileID
);
877 for (i
= 0; i
< slots
; i
++) {
878 if (linkInfo
[i
].list
)
879 free(linkInfo
[i
].list
);
890 * Check indirect node link counts against the indirect
891 * links that were found. There are 4 possible problems
893 * 1. orphaned indirect node (i.e. no links found)
894 * 2. orphaned indirect link (i.e. indirect node missing)
895 * 3. incorrect link count
896 * 4. indirect link id was 0 (i.e. link id wasn't preserved)
899 CheckHardLinks(void *cookie
)
901 struct HardLinkInfo
*info
= (struct HardLinkInfo
*)cookie
;
906 HFSPlusCatalogKey
* keyp
;
907 BTreeIterator iterator
;
908 FSBufferDescriptor btrec
;
913 unsigned char filename
[64];
914 PrimeBuckets
*catBucket
;
916 /* All done if no hard links exist. */
921 fsckPrint(gp
->context
, hfsHardLinkCheck
);
923 folderID
= info
->privDirID
;
925 catBucket
= calloc(1, sizeof(PrimeBuckets
));
926 if (catBucket
== NULL
) {
927 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
928 plog("CheckHardLinks: calloc(1, %zu) failed\n", sizeof(PrimeBuckets
));
935 fcb
= gp
->calculatedCatalogFCB
;
936 prefixlen
= strlen(HFS_INODE_PREFIX
);
937 ClearMemory(&iterator
, sizeof(iterator
));
938 keyp
= (HFSPlusCatalogKey
*)&iterator
.key
;
939 btrec
.bufferAddress
= &rec
;
941 btrec
.itemSize
= sizeof(rec
);
943 * position iterator at folder thread record
944 * (i.e. one record before first child)
946 ClearMemory(&iterator
, sizeof(iterator
));
947 BuildCatalogKey(folderID
, NULL
, true, (CatalogKey
*)keyp
);
948 result
= BTSearchRecord(fcb
, &iterator
, kInvalidMRUCacheKey
, &btrec
,
950 if ((result
!= 0) && (result
!= btNotFound
)) {
954 /* Visit all the children in private directory. */
956 result
= BTIterateRecord(fcb
, kBTreeNextRecord
, &iterator
,
958 if (result
|| keyp
->parentID
!= folderID
)
961 if (rec
.recordType
!= kHFSPlusFileRecord
)
964 (void) utf_encodestr(keyp
->nodeName
.unicode
,
965 keyp
->nodeName
.length
* 2,
966 filename
, &len
, sizeof(filename
));
967 filename
[len
] = '\0';
969 /* Report Orphaned nodes only in debug mode */
970 if ((strstr((char *)filename
, HFS_DELETE_PREFIX
) == (char *)filename
) &&
971 (fsckGetVerbosity(gp
->context
) == kDebugLog
)) {
972 RecordOrphanOpenUnlink(gp
, folderID
, filename
);
976 if (strstr((char *)filename
, HFS_INODE_PREFIX
) != (char *)filename
)
979 result
= inode_check(gp
, catBucket
, (CatalogRecord
*)&rec
, (CatalogKey
*)keyp
, false);
986 if (result
== btNotFound
) {
991 * If we've reached this point, and result is clean,
992 * then we need to compare the two hard link
993 * buckets: if they don't match, then we have a hard link chain error, and
994 * need to either repair it, or just mark the error.
996 if ((result
== 0) && (info
->fileBucket
!= NULL
)) {
997 result
= compare_prime_buckets(catBucket
, info
->fileBucket
);
999 record_link_badchain(gp
, false);
1000 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
1001 plog("\tfilelink prime buckets do not match\n");
1007 /* If hard links created in pre-Leopard OS were detected, they were
1008 * added to the hash for checking link counts later. Check the
1009 * link counts from the hash. Note that the hard links created in
1010 * pre-Leopard OS do not have kHFSHasLinkChainBit set in the inode
1011 * and the hard links, and the first/prev/next ID is zero --- and
1012 * hence they were ignored from CRT check and added to hash.
1014 if (filelink_entry_count
) {
1016 struct filelink_hash
*cur
;
1018 /* Since pre-Leopard OS hard links were detected, they
1019 * should be updated to new version. This is however
1020 * an opportunistic repair and no corruption will be
1021 * reported. This will be performed only if any other
1022 * file hard link repairs are performed.
1024 if (fsckGetVerbosity(gp
->context
) >= kDebugLog
) {
1025 plog("\tCheckHardLinks: found %u pre-Leopard file inodes.\n", filelink_entry_count
);
1028 for (i
= 0; i
< FILELINK_HASH_SIZE
; i
++) {
1029 cur
= filelink_head
[i
];
1031 if ((cur
->found_link_count
== 0) ||
1032 (cur
->calc_link_count
== 0) ||
1033 (cur
->found_link_count
!= cur
->calc_link_count
)) {
1034 record_link_badchain(gp
, false);
1043 if (filelink_entry_count
) {
1044 filelink_hash_destroy();
1056 * Get catalog entry for the "HFS+ Private Data" directory.
1057 * The indirect nodes are stored in this directory.
1060 GetPrivateDir(SGlobPtr gp
, CatalogRecord
* rec
)
1062 HFSPlusCatalogKey
* keyp
;
1063 BTreeIterator iterator
;
1064 FSBufferDescriptor btrec
;
1069 isHFSPlus
= VolumeObjectIsHFSPlus( );
1073 ClearMemory(&iterator
, sizeof(iterator
));
1074 keyp
= (HFSPlusCatalogKey
*)&iterator
.key
;
1076 btrec
.bufferAddress
= rec
;
1077 btrec
.itemCount
= 1;
1078 btrec
.itemSize
= sizeof(CatalogRecord
);
1080 /* look up record for HFS+ private folder */
1081 ClearMemory(&iterator
, sizeof(iterator
));
1082 CopyMemory(&gMetaDataDirKey
, keyp
, sizeof(gMetaDataDirKey
));
1083 result
= BTSearchRecord(gp
->calculatedCatalogFCB
, &iterator
,
1084 kInvalidMRUCacheKey
, &btrec
, &reclen
, &iterator
);
1092 * Get catalog entry for the Root Folder.
1095 GetRootDir(SGlobPtr gp
, CatalogRecord
* rec
)
1102 isHFSPlus
= VolumeObjectIsHFSPlus( );
1106 result
= GetCatalogRecordByID(gp
, kHFSRootFolderID
, isHFSPlus
, &key
, rec
, &recSize
);
1114 * Record a repair to delete an orphaned hard links, i.e. hard links
1115 * that do not have any corresponding inode.
1118 RecordOrphanLink(SGlobPtr gp
, Boolean isdir
, UInt32 linkID
)
1122 fsckPrint(gp
->context
, isdir
? E_OrphanDirLink
: E_OrphanFileLink
, linkID
);
1124 p
= AllocMinorRepairOrder(gp
, 0);
1128 p
->type
= isdir
? E_OrphanDirLink
: E_OrphanFileLink
;
1131 gp
->CatStat
|= S_LinkErrRepair
;
1139 * Record a repair for orphan inode i.e. inodes that do not have
1140 * any corresponding hard links.
1143 RecordOrphanInode(SGlobPtr gp
, Boolean isdir
, UInt32 inodeID
)
1147 fsckPrint(gp
->context
, isdir
? E_OrphanDirInode
: E_OrphanFileInode
, inodeID
);
1149 p
= AllocMinorRepairOrder(gp
, 0);
1153 p
->type
= isdir
? E_OrphanDirInode
: E_OrphanFileInode
;
1156 gp
->CatStat
|= S_LinkErrRepair
;
1162 * RecordOrphanOpenUnlink
1164 * This is only called when debugging is turned on. Don't
1165 * record an actual error, just print out a message.
1168 RecordOrphanOpenUnlink(SGlobPtr gp
, UInt32 parID
, unsigned char* filename
)
1170 fsckPrint(gp
->context
, E_UnlinkedFile
, filename
);
1177 RecordBadHardLinkChainFirst(SGlobPtr gp
, UInt32 fileID
, UInt32 is
, UInt32 shouldbe
)
1180 char goodstr
[16], badstr
[16];
1182 fsckPrint(gp
->context
, E_InvalidLinkChainFirst
, fileID
);
1183 sprintf(goodstr
, "%u", shouldbe
);
1184 sprintf(badstr
, "%u", is
);
1185 fsckPrint(gp
->context
, E_BadValue
, goodstr
, badstr
);
1187 p
= AllocMinorRepairOrder(gp
, 0);
1193 p
->type
= E_InvalidLinkChainFirst
;
1195 p
->correct
= shouldbe
;
1197 p
->parid
= fileID
; // *Not* the parent ID!
1198 gp
->CatStat
|= S_LinkErrRepair
;
1205 RecordBadHardLinkPrev(SGlobPtr gp
, UInt32 fileID
, UInt32 is
, UInt32 shouldbe
)
1208 char goodstr
[16], badstr
[16];
1210 fsckPrint(gp
->context
, E_InvalidLinkChainPrev
, fileID
);
1211 sprintf(goodstr
, "%u", shouldbe
);
1212 sprintf(badstr
, "%u", is
);
1213 fsckPrint(gp
->context
, E_BadValue
, goodstr
, badstr
);
1215 p
= AllocMinorRepairOrder(gp
, 0);
1219 p
->type
= E_InvalidLinkChainPrev
;
1221 p
->correct
= shouldbe
;
1223 p
->parid
= fileID
; // *Not* the parent ID
1224 gp
->CatStat
|= S_LinkCount
;
1229 RecordBadHardLinkNext(SGlobPtr gp
, UInt32 fileID
, UInt32 is
, UInt32 shouldbe
)
1232 char goodstr
[16], badstr
[16];
1234 fsckPrint(gp
->context
, E_InvalidLinkChainNext
, fileID
);
1236 sprintf(goodstr
, "%u", shouldbe
);
1237 sprintf(badstr
, "%u", is
);
1238 fsckPrint(gp
->context
, E_BadValue
, goodstr
, badstr
);
1240 p
= AllocMinorRepairOrder(gp
, 0);
1244 p
->type
= E_InvalidLinkChainNext
;
1246 p
->correct
= shouldbe
;
1248 p
->parid
= fileID
; // *Not* the parent ID
1249 gp
->CatStat
|= S_LinkCount
;
1254 RecordBadLinkCount(SGlobPtr gp
, UInt32 inodeID
, UInt32 is
, UInt32 shouldbe
)
1257 char goodstr
[16], badstr
[16];
1258 fsckPrint(gp
->context
, E_InvalidLinkCount
, inodeID
);
1260 sprintf(goodstr
, "%u", shouldbe
);
1261 sprintf(badstr
, "%u", is
);
1262 fsckPrint(gp
->context
, E_BadValue
, goodstr
, badstr
);
1264 p
= AllocMinorRepairOrder(gp
, 0);
1268 p
->type
= E_InvalidLinkCount
;
1270 p
->correct
= shouldbe
;
1272 p
->parid
= inodeID
; // *Not* the parent ID
1278 hash_insert(UInt32 linkID
, int totalSlots
, int slotsUsed
, struct IndirectLinkInfo
*linkInfo
)
1282 i
= linkID
& (totalSlots
- 1);
1284 last
= (i
+ (totalSlots
-1)) % totalSlots
;
1285 while ((i
!= last
) &&
1286 (linkInfo
[i
].flags
& LINKINFO_INIT
) &&
1287 (linkInfo
[i
].linkID
!= linkID
)) {
1288 i
= (i
+ 1) % totalSlots
;
1291 if ((linkInfo
[i
].flags
& LINKINFO_INIT
) == 0) {
1292 if (linkInfo
[i
].list
) {
1293 plog ("hash: overwriting data! (old:%u, new:%u)\n", linkInfo
[i
].linkID
, linkID
);
1296 linkInfo
[i
].flags
|= LINKINFO_INIT
;
1297 linkInfo
[i
].linkID
= linkID
;
1298 linkInfo
[i
].linkCount
= 1;
1299 } else if (linkInfo
[i
].linkID
== linkID
) {
1300 plog("hash: duplicate insert! (%d)\n", linkID
);
1303 plog("hash table full (%d entries) \n", slotsUsed
);
1309 static struct IndirectLinkInfo
*
1310 hash_search(UInt32 linkID
, int totalSlots
, int slotsUsed
, struct IndirectLinkInfo
*linkInfo
)
1316 i
= linkID
& (totalSlots
- 1);
1318 last
= (i
+ (slotsUsed
-1)) % totalSlots
;
1319 while ((i
!= last
) &&
1320 (linkInfo
[i
].flags
& LINKINFO_INIT
) &&
1321 (linkInfo
[i
].linkID
!= linkID
)) {
1322 i
= (i
+ 1) % totalSlots
;
1326 if ((linkInfo
[i
].flags
& LINKINFO_INIT
) &&
1327 (linkInfo
[i
].linkID
== linkID
)) {
1328 return (&linkInfo
[i
]);