]> git.saurik.com Git - apple/hfs.git/blob - fsck_hfs/dfalib/SVerify1.c
hfs-556.41.1.tar.gz
[apple/hfs.git] / fsck_hfs / dfalib / SVerify1.c
1 /*
2 * Copyright (c) 1999-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 File: SVerify1.c
25
26 Contains: xxx put contents here xxx
27
28 Version: xxx put version here xxx
29
30 Copyright: © 1997-1999 by Apple Computer, Inc., all rights reserved.
31
32 */
33
34 #include "Scavenger.h"
35 #include "../cache.h"
36 #include <stdlib.h>
37 #include <stddef.h>
38 #include <unistd.h>
39 #include <errno.h>
40 #include <fcntl.h>
41 #include <limits.h>
42
43 #include <libkern/OSByteOrder.h>
44 #define SW16(x) OSSwapBigToHostInt16(x)
45 #define SW32(x) OSSwapBigToHostInt32(x)
46 #define SW64(x) OSSwapBigToHostInt64(x)
47
48 extern int OpenDeviceByUUID(void *uuidp, char **nameptr);
49
50 // internal routine prototypes
51
52 static int RcdValErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID parid );
53
54 static int RcdNameLockedErr( SGlobPtr GPtr, OSErr type, UInt32 incorrect );
55
56 static OSErr RcdMDBEmbededVolDescriptionErr( SGlobPtr GPtr, OSErr type, HFSMasterDirectoryBlock *mdb );
57
58 static OSErr CheckNodesFirstOffset( SGlobPtr GPtr, BTreeControlBlock *btcb );
59
60 static OSErr ScavengeVolumeType( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb, UInt32 *volumeType );
61 static OSErr SeekVolumeHeader( SGlobPtr GPtr, UInt64 startSector, UInt32 numSectors, UInt64 *vHSector );
62
63 /* overlapping extents verification functions prototype */
64 static OSErr AddExtentToOverlapList( SGlobPtr GPtr, HFSCatalogNodeID fileNumber, const char *attrName, UInt32 extentStartBlock, UInt32 extentBlockCount, UInt8 forkType );
65
66 static Boolean ExtentInfoExists( ExtentsTable **extentsTableH, ExtentInfo *extentInfo);
67
68 static void CheckHFSPlusExtentRecords(SGlobPtr GPtr, UInt32 fileID, const char *attrname, HFSPlusExtentRecord extent, UInt8 forkType);
69
70 static void CheckHFSExtentRecords(SGlobPtr GPtr, UInt32 fileID, HFSExtentRecord extent, UInt8 forkType);
71
72 static Boolean DoesOverlap(SGlobPtr GPtr, UInt32 fileID, const char *attrname, UInt32 startBlock, UInt32 blockCount, UInt8 forkType);
73
74 static int CompareExtentFileID(const void *first, const void *second);
75
76 /*
77 * Check if a volume is journaled.
78 *
79 * If journal_bit_only is true, the function only checks
80 * if kHFSVolumeJournaledBit is set or not. If the bit
81 * is set, function returns 1 otherwise 0.
82 *
83 * If journal_bit_only is false, in addition to checking
84 * kHFSVolumeJournaledBit, the function also checks if the
85 * last mounted version indicates failed journal replay,
86 * or runtime corruption was detected or simply the volume
87 * is not journaled and it was not unmounted cleanly.
88 * If all of the above conditions are false and the journal
89 * bit is set, function returns 1 to indicate that the
90 * volume is journaled truly otherwise returns 1 to fake
91 * that volume is not journaled.
92 *
93 * returns: 0 not journaled or any of the above conditions are true
94 * 1 journaled
95 *
96 */
97 int
98 CheckIfJournaled(SGlobPtr GPtr, Boolean journal_bit_only)
99 {
100 #define kIDSector 2
101
102 OSErr err;
103 int result;
104 HFSMasterDirectoryBlock *mdbp;
105 HFSPlusVolumeHeader *vhp;
106 SVCB *vcb = GPtr->calculatedVCB;
107 ReleaseBlockOptions rbOptions;
108 BlockDescriptor block;
109
110 vhp = (HFSPlusVolumeHeader *) NULL;
111 rbOptions = kReleaseBlock;
112
113 err = GetVolumeBlock(vcb, kIDSector, kGetBlock, &block);
114 if (err) return (0);
115
116 mdbp = (HFSMasterDirectoryBlock *) block.buffer;
117
118 if (mdbp->drSigWord == kHFSPlusSigWord || mdbp->drSigWord == kHFSXSigWord) {
119 vhp = (HFSPlusVolumeHeader *) block.buffer;
120
121 } else if (mdbp->drSigWord == kHFSSigWord) {
122
123 if (mdbp->drEmbedSigWord == kHFSPlusSigWord) {
124 UInt32 vhSector;
125 UInt32 blkSectors;
126
127 blkSectors = mdbp->drAlBlkSiz / 512;
128 vhSector = mdbp->drAlBlSt;
129 vhSector += blkSectors * mdbp->drEmbedExtent.startBlock;
130 vhSector += kIDSector;
131
132 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
133 err = GetVolumeBlock(vcb, vhSector, kGetBlock, &block);
134 if (err) return (0);
135
136 vhp = (HFSPlusVolumeHeader *) block.buffer;
137 mdbp = (HFSMasterDirectoryBlock *) NULL;
138
139 }
140 }
141
142 if ((vhp != NULL) && (ValidVolumeHeader(vhp) == noErr)) {
143 result = ((vhp->attributes & kHFSVolumeJournaledMask) != 0);
144 if (journal_bit_only == true) {
145 goto out;
146 }
147
148 // even if journaling is enabled for this volume, we'll return
149 // false if it wasn't unmounted cleanly and it was previously
150 // mounted by someone that doesn't know about journaling.
151 // or if lastMountedVersion is kFSKMountVersion
152 if ( vhp->lastMountedVersion == kFSKMountVersion ||
153 (vhp->attributes & kHFSVolumeInconsistentMask) ||
154 ((vhp->lastMountedVersion != kHFSJMountVersion) &&
155 (vhp->attributes & kHFSVolumeUnmountedMask) == 0)) {
156 result = 0;
157 }
158 } else {
159 result = 0;
160 }
161
162 out:
163 (void) ReleaseVolumeBlock(vcb, &block, rbOptions);
164
165 return (result);
166 }
167
168 /*
169 * Get the JournalInfoBlock from a volume.
170 *
171 * It borrows code to get the volume header. Note that it
172 * uses the primary volume header, not the alternate one.
173 * It returns 0 on success, or an error value.
174 * If requested, it will also set the block size (as a 32-bit
175 * value), via bsizep -- this is useful because the journal code
176 * needs to know the volume blocksize, but it doesn't necessarily
177 * have the header.
178 *
179 * Note also that it does direct reads, rather than going through
180 * the cache code. This simplifies getting the JIB.
181 */
182
183 static OSErr
184 GetJournalInfoBlock(SGlobPtr GPtr, JournalInfoBlock *jibp, UInt32 *bsizep)
185 {
186 #define kIDSector 2
187
188 OSErr err;
189 int result = 0;
190 UInt32 jiBlk = 0;
191 HFSMasterDirectoryBlock *mdbp;
192 HFSPlusVolumeHeader *vhp;
193 SVCB *vcb = GPtr->calculatedVCB;
194 ReleaseBlockOptions rbOptions;
195 BlockDescriptor block;
196 size_t blockSize = 0;
197 off_t embeddedOffset = 0;
198
199 vhp = (HFSPlusVolumeHeader *) NULL;
200 rbOptions = kReleaseBlock;
201
202 if (jibp == NULL)
203 return paramErr;
204
205 err = GetVolumeBlock(vcb, kIDSector, kGetBlock, &block);
206 if (err) return (err);
207
208 mdbp = (HFSMasterDirectoryBlock *) block.buffer;
209
210 if (mdbp->drSigWord == kHFSPlusSigWord || mdbp->drSigWord == kHFSXSigWord) {
211 vhp = (HFSPlusVolumeHeader *) block.buffer;
212
213 } else if (mdbp->drSigWord == kHFSSigWord) {
214
215 if (mdbp->drEmbedSigWord == kHFSPlusSigWord) {
216 UInt32 vhSector;
217 UInt32 blkSectors;
218
219 blkSectors = mdbp->drAlBlkSiz / 512;
220 vhSector = mdbp->drAlBlSt;
221 vhSector += blkSectors * mdbp->drEmbedExtent.startBlock;
222 vhSector += kIDSector;
223
224 embeddedOffset = (mdbp->drEmbedExtent.startBlock * mdbp->drAlBlkSiz) + (mdbp->drAlBlSt * Blk_Size);
225 if (debug)
226 plog("Embedded offset is %lld\n", embeddedOffset);
227
228 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
229 err = GetVolumeBlock(vcb, vhSector, kGetBlock, &block);
230 if (err) return (err);
231
232 vhp = (HFSPlusVolumeHeader *) block.buffer;
233 mdbp = (HFSMasterDirectoryBlock *) NULL;
234
235 }
236 }
237
238 if (vhp == NULL) {
239 result = paramErr;
240 goto out;
241 }
242 if ((err = ValidVolumeHeader(vhp)) != noErr) {
243 result = err;
244 goto out;
245 }
246
247 // journalInfoBlock is not automatically swapped
248 jiBlk = SW32(vhp->journalInfoBlock);
249 blockSize = vhp->blockSize;
250 (void)ReleaseVolumeBlock(vcb, &block, rbOptions);
251
252 if (jiBlk) {
253 int jfd = GPtr->DrvNum;
254 uint8_t block[blockSize];
255 ssize_t nread;
256
257 nread = pread(jfd, block, blockSize, (off_t)jiBlk * blockSize + embeddedOffset);
258 if (nread == blockSize) {
259 if (jibp)
260 memcpy(jibp, block, sizeof(JournalInfoBlock));
261 if (bsizep)
262 *bsizep = (UInt32)blockSize;
263 result = 0;
264 } else {
265 if (debug) {
266 plog("%s: Tried to read JIB, got %zd\n", __FUNCTION__, nread);
267 result = EINVAL;
268 }
269 }
270 }
271
272 out:
273 return (result);
274 }
275
276 /*
277 * Journal checksum calculation, taken directly from TN1150.
278 */
279 static int
280 calc_checksum(unsigned char *ptr, int len)
281 {
282 int i, cksum=0;
283
284 for(i=0; i < len; i++, ptr++) {
285 cksum = (cksum << 8) ^ (cksum + *ptr);
286 }
287
288 return (~cksum);
289 }
290
291 /*
292 * The journal_header structure is not defined in <hfs/hfs_format.h>;
293 * it's described in TN1150. It is on disk in the endian mode that was
294 * used to write it, so we may or may not need to swap the fields.
295 */
296 typedef struct journal_header {
297 UInt32 magic;
298 UInt32 endian;
299 UInt64 start;
300 UInt64 end;
301 UInt64 size;
302 UInt32 blhdr_size;
303 UInt32 checksum;
304 UInt32 jhdr_size;
305 UInt32 sequence_num;
306 } journal_header;
307
308 #define JOURNAL_HEADER_MAGIC 0x4a4e4c78
309 #define ENDIAN_MAGIC 0x12345678
310 #define JOURNAL_HEADER_CKSUM_SIZE (offsetof(struct journal_header, sequence_num))
311
312 /*
313 * Determine if a journal is empty.
314 * This code can use an in-filesystem, or external, journal.
315 * In general, it returns 0 if the journal exists, and appears to
316 * be non-empty (that is, start and end in the journal header are
317 * the same); it will return 1 if it exists and is empty, or if
318 * there was a problem getting the journal. (This behaviour was
319 * chosen because it mimics the existing behaviour of fsck_hfs,
320 * which has traditionally done nothing with the journal. Future
321 * versions may be more demanding.)
322 *
323 * <jp> is an OUT parameter: the contents of the structure it points
324 * to are filled in by this routine. (The reasoning for doing this
325 * is because this rountine has to open the journal info block, and read
326 * from the journal device, so putting this in another function was
327 * duplicative and error-prone. By making it a structure instead of
328 * discrete arguments, it can also be extended in the future if necessary.)
329 */
330 int
331 IsJournalEmpty(SGlobPtr GPtr, fsckJournalInfo_t *jp)
332 {
333 int retval = 1;
334 OSErr result;
335 OSErr err = 0;
336 JournalInfoBlock jib;
337 UInt32 bsize;
338
339 result = GetJournalInfoBlock(GPtr, &jib, &bsize);
340 if (result == 0) {
341 /* jib is not byte swapped */
342 /* If the journal needs to be initialized, it's empty. */
343 if ((SW32(jib.flags) & kJIJournalNeedInitMask) == 0) {
344 off_t hdrOffset = SW64(jib.offset);
345 struct journal_header *jhdr;
346 uint8_t block[bsize];
347 ssize_t nread;
348 int jfd = -1;
349
350 /* If it's an external journal, kJIJournalInSFMask will not be set */
351 if (SW32(jib.flags) & kJIJournalInFSMask) {
352 jfd = dup(GPtr->DrvNum);
353 jp->name = strdup(GPtr->deviceNode);
354 } else {
355 char **namePtr = jp ? &jp->name : NULL;
356 if (debug)
357 plog("External Journal device\n");
358 jfd = OpenDeviceByUUID(&jib.ext_jnl_uuid, namePtr);
359 }
360 if (jfd == -1) {
361 if (debug) {
362 plog("Unable to get journal file descriptor, journal flags = %#x\n", SW32(jib.flags));
363 }
364 goto out;
365 }
366 if (jp) {
367 jp->jnlfd = jfd;
368 jp->jnlOffset = SW64(jib.offset);
369 jp->jnlSize = SW64(jib.size);
370 }
371
372 nread = pread(jfd, block, bsize, hdrOffset);
373 if (nread == -1) {
374 if (debug) {
375 plog("Could not read journal from descriptor %d: %s", jfd, strerror(errno));
376 }
377 err = errno;
378 } else if (nread != bsize) {
379 if (debug) {
380 plog("Only read %zd bytes from journal (expected %zd)", nread, bsize);
381 err = EINVAL;
382 }
383 }
384 if (jp == NULL)
385 close(jfd);
386 /* We got the journal header, now we need to check it */
387 if (err == noErr) {
388 int swap = 0;
389 UInt32 cksum = 0;
390
391 jhdr = (struct journal_header*)block;
392
393 if (jhdr->magic == JOURNAL_HEADER_MAGIC ||
394 SW32(jhdr->magic) == JOURNAL_HEADER_MAGIC) {
395 if (jhdr->endian == ENDIAN_MAGIC)
396 swap = 0;
397 else if (SW32(jhdr->endian) == ENDIAN_MAGIC)
398 swap = 1;
399 else
400 swap = 2;
401
402 if (swap != 2) {
403 cksum = swap ? SW32(jhdr->checksum) : jhdr->checksum;
404 UInt32 calc_sum;
405 jhdr->checksum = 0;
406 /* Checksum calculation needs the checksum field to be zero. */
407 calc_sum = calc_checksum((unsigned char*)jhdr, JOURNAL_HEADER_CKSUM_SIZE);
408 /* But, for now, this is for debugging purposes only */
409 if (calc_sum != cksum) {
410 if (debug)
411 plog("Journal checksum doesn't match: orig %x != calc %x\n", cksum, calc_sum);
412 }
413 /* We have a journal, we got the header, now we check the start and end */
414 if (jhdr->start != jhdr->end) {
415 retval = 0;
416 if (debug)
417 plog("Non-empty journal: start = %lld, end = %lld\n",
418 swap ? SW64(jhdr->start) : jhdr->start,
419 swap ? SW64(jhdr->end) : jhdr->end);
420 }
421 }
422 }
423 }
424 }
425 }
426 out:
427 return retval;
428 }
429
430 /*
431 * The functions checks whether the volume is clean or dirty. It
432 * also marks the volume as clean/dirty depending on the type
433 * of operation specified. It modifies the volume header only
434 * if the old values are not same as the new values. If the volume
435 * header is updated, it also sets the last mounted version for HFS+.
436 *
437 * Input:
438 * GPtr - Pointer to scavenger global area
439 * operation - Type of operation to perform
440 * kCheckVolume, // check if volume is clean/dirty
441 * kMarkVolumeDirty, // mark the volume dirty
442 * kMarkVolumeClean // mark the volume clean
443 *
444 * Output:
445 * modified - true if the VH/MDB was modified, otherwise false.
446 * Return Value -
447 * -1 - if the volume is not an HFS/HFS+ volume
448 * 0 - if the volume was dirty or marked dirty
449 * 1 - if the volume was clean or marked clean
450 * If the operation requested was to mark the volume clean/dirty,
451 * the return value is dependent on type of operation (described above).
452 */
453 int CheckForClean(SGlobPtr GPtr, UInt8 operation, Boolean *modified)
454 {
455 enum { unknownVolume = -1, cleanUnmount = 1, dirtyUnmount = 0};
456 int result = unknownVolume;
457 Boolean update = false;
458 HFSMasterDirectoryBlock *mdbp;
459 HFSPlusVolumeHeader *vhp;
460 BlockDescriptor block;
461 ReleaseBlockOptions rbOptions;
462 UInt64 blockNum;
463 SVCB *vcb;
464
465 *modified = false;
466 vcb = GPtr->calculatedVCB;
467 block.buffer = NULL;
468 rbOptions = kReleaseBlock;
469
470 /* Get the block number for VH/MDB */
471 GetVolumeObjectBlockNum(&blockNum);
472 if (blockNum == 0) {
473 if (fsckGetVerbosity(GPtr->context) >= kDebugLog)
474 plog( "\t%s - unknown volume type \n", __FUNCTION__ );
475 goto ExitThisRoutine;
476 }
477
478 /* Get VH or MDB depending on the type of volume */
479 result = GetVolumeObjectPrimaryBlock(&block);
480 if (result) {
481 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
482 plog( "\t%s - could not get VHB/MDB at block %qd \n", __FUNCTION__, blockNum );
483 result = unknownVolume;
484 goto ExitThisRoutine;
485 }
486
487 result = cleanUnmount;
488
489 if (VolumeObjectIsHFSPlus()) {
490 vhp = (HFSPlusVolumeHeader *) block.buffer;
491
492 /* Check unmount bit and volume inconsistent bit */
493 if (((vhp->attributes & kHFSVolumeUnmountedMask) == 0) ||
494 (vhp->attributes & kHFSVolumeInconsistentMask))
495 result = dirtyUnmount;
496
497 /* Check last mounted version. If kFSKMountVersion, bad
498 * journal was encountered during mount. Force dirty volume.
499 */
500
501 if (vhp->lastMountedVersion == kFSKMountVersion) {
502 GPtr->JStat |= S_BadJournal;
503 RcdError (GPtr, E_BadJournal);
504 result = dirtyUnmount;
505 }
506
507 if (operation == kMarkVolumeDirty) {
508 /* Mark volume was not unmounted cleanly */
509 if (vhp->attributes & kHFSVolumeUnmountedMask) {
510 vhp->attributes &= ~kHFSVolumeUnmountedMask;
511 update = true;
512 }
513 /* Mark volume inconsistent */
514 if ((vhp->attributes & kHFSVolumeInconsistentMask) == 0) {
515 vhp->attributes |= kHFSVolumeInconsistentMask;
516 update = true;
517 }
518 } else if (operation == kMarkVolumeClean) {
519 /* Mark volume was unmounted cleanly */
520 if ((vhp->attributes & kHFSVolumeUnmountedMask) == 0) {
521 vhp->attributes |= kHFSVolumeUnmountedMask;
522 update = true;
523 }
524 /* Mark volume consistent */
525 if (vhp->attributes & kHFSVolumeInconsistentMask) {
526 vhp->attributes &= ~kHFSVolumeInconsistentMask;
527 update = true;
528 }
529 }
530
531 /* If any changes to VH, update the last mounted version */
532 if (update == true) {
533 vhp->lastMountedVersion = kFSCKMountVersion;
534 }
535 } else if (VolumeObjectIsHFS()) {
536 mdbp = (HFSMasterDirectoryBlock *) block.buffer;
537
538 /* Check unmount bit and volume inconsistent bit */
539 if (((mdbp->drAtrb & kHFSVolumeUnmountedMask) == 0) ||
540 (mdbp->drAtrb & kHFSVolumeInconsistentMask))
541 result = dirtyUnmount;
542
543 if (operation == kMarkVolumeDirty) {
544 /* Mark volume was not unmounted cleanly */
545 if (mdbp->drAtrb & kHFSVolumeUnmountedMask) {
546 mdbp->drAtrb &= ~kHFSVolumeUnmountedMask;
547 update = true;
548 }
549 /* Mark volume inconsistent */
550 if ((mdbp->drAtrb & kHFSVolumeInconsistentMask) == 0) {
551 mdbp->drAtrb |= kHFSVolumeInconsistentMask;
552 update = true;
553 }
554 } else if (operation == kMarkVolumeClean) {
555 /* Mark volume was unmounted cleanly */
556 if ((mdbp->drAtrb & kHFSVolumeUnmountedMask) == 0) {
557 mdbp->drAtrb |= kHFSVolumeUnmountedMask;
558 update = true;
559 }
560 /* Mark volume consistent */
561 if (mdbp->drAtrb & kHFSVolumeInconsistentMask) {
562 mdbp->drAtrb &= ~kHFSVolumeInconsistentMask;
563 update = true;
564 }
565 }
566 }
567
568 ExitThisRoutine:
569 if (update == true) {
570 *modified = true;
571 rbOptions = kForceWriteBlock;
572 /* Set appropriate return value */
573 if (operation == kMarkVolumeDirty) {
574 result = dirtyUnmount;
575 } else if (operation == kMarkVolumeClean) {
576 result = cleanUnmount;
577 }
578 }
579 if (block.buffer != NULL)
580 (void) ReleaseVolumeBlock(vcb, &block, rbOptions);
581
582 return (result);
583 }
584
585 /*------------------------------------------------------------------------------
586
587 Function: IVChk - (Initial Volume Check)
588
589 Function: Performs an initial check of the volume to be scavenged to confirm
590 that the volume can be accessed and that it is a HFS/HFS+ volume.
591
592 Input: GPtr - pointer to scavenger global area
593
594 Output: IVChk - function result:
595 0 = no error
596 n = error code
597 ------------------------------------------------------------------------------*/
598 #define kBitsPerSector 4096
599
600 OSErr IVChk( SGlobPtr GPtr )
601 {
602 OSErr err;
603 HFSMasterDirectoryBlock * myMDBPtr;
604 HFSPlusVolumeHeader * myVHBPtr;
605 UInt64 numBlk;
606 UInt32 numABlks;
607 UInt32 minABlkSz;
608 UInt32 maxNumberOfAllocationBlocks;
609 UInt32 realAllocationBlockSize;
610 UInt32 realTotalBlocks;
611 UInt32 i;
612 BTreeControlBlock *btcb;
613 SVCB *vcb = GPtr->calculatedVCB;
614 VolumeObjectPtr myVOPtr;
615 UInt64 blockNum;
616 UInt64 totalSectors;
617 BlockDescriptor myBlockDescriptor;
618
619 // Set up
620 GPtr->TarID = AMDB_FNum; // target = alt MDB
621 GPtr->TarBlock = 0;
622 maxNumberOfAllocationBlocks = 0xFFFFFFFF;
623 realAllocationBlockSize = 0;
624 realTotalBlocks = 0;
625
626 myBlockDescriptor.buffer = NULL;
627 myVOPtr = GetVolumeObjectPtr( );
628
629 // check volume size
630 if ( myVOPtr->totalDeviceSectors < 3 ) {
631 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
632 plog("\tinvalid device information for volume - total sectors = %qd sector size = %d \n",
633 myVOPtr->totalDeviceSectors, myVOPtr->sectorSize);
634 return( 123 );
635 }
636
637 GetVolumeObjectBlockNum( &blockNum );
638 if ( blockNum == 0 || myVOPtr->volumeType == kUnknownVolumeType ) {
639 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
640 plog( "\t%s - unknown volume type \n", __FUNCTION__ );
641 err = R_BadSig; /* doesn't bear the HFS signature */
642 goto ReleaseAndBail;
643 }
644
645 // get Volume Header (HFS+) or Master Directory (HFS) block
646 err = GetVolumeObjectVHBorMDB( &myBlockDescriptor );
647 if ( err != noErr ) {
648 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
649 plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err );
650 goto ReleaseAndBail;
651 }
652 myMDBPtr = (HFSMasterDirectoryBlock *) myBlockDescriptor.buffer;
653
654 // if this is an HFS (kHFSVolumeType) volume and the MDB indicates this
655 // might contain an embedded HFS+ volume then we need to scan
656 // for an embedded HFS+ volume. I'm told there were some old problems
657 // where we could lose track of the embedded volume.
658 if ( VolumeObjectIsHFS( ) &&
659 (myMDBPtr->drEmbedSigWord != 0 ||
660 myMDBPtr->drEmbedExtent.blockCount != 0 ||
661 myMDBPtr->drEmbedExtent.startBlock != 0) ) {
662
663 err = ScavengeVolumeType( GPtr, myMDBPtr, &myVOPtr->volumeType );
664 if ( err == E_InvalidMDBdrAlBlSt )
665 err = RcdMDBEmbededVolDescriptionErr( GPtr, E_InvalidMDBdrAlBlSt, myMDBPtr );
666
667 if ( VolumeObjectIsEmbeddedHFSPlus( ) ) {
668 // we changed volume types so let's get the VHB
669 (void) ReleaseVolumeBlock( vcb, &myBlockDescriptor, kReleaseBlock );
670 myBlockDescriptor.buffer = NULL;
671 myMDBPtr = NULL;
672 err = GetVolumeObjectVHB( &myBlockDescriptor );
673 if ( err != noErr ) {
674 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
675 plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err );
676 WriteError( GPtr, E_InvalidVolumeHeader, 1, 0 );
677 err = E_InvalidVolumeHeader;
678 goto ReleaseAndBail;
679 }
680
681 GetVolumeObjectBlockNum( &blockNum ); // get the new Volume header block number
682 }
683 else {
684 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
685 plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err );
686 WriteError( GPtr, E_InvalidVolumeHeader, 1, 0 );
687 err = E_InvalidVolumeHeader;
688 goto ReleaseAndBail;
689 }
690 }
691
692 totalSectors = ( VolumeObjectIsEmbeddedHFSPlus( ) ) ? myVOPtr->totalEmbeddedSectors : myVOPtr->totalDeviceSectors;
693
694 // indicate what type of volume we are dealing with
695 if ( VolumeObjectIsHFSPlus( ) ) {
696
697 myVHBPtr = (HFSPlusVolumeHeader *) myBlockDescriptor.buffer;
698 if (myVHBPtr->attributes & kHFSVolumeJournaledMask) {
699 fsckPrint(GPtr->context, hfsJournalVolCheck);
700 } else {
701 fsckPrint(GPtr->context, hfsCheckNoJnl);
702 }
703 GPtr->numExtents = kHFSPlusExtentDensity;
704 vcb->vcbSignature = kHFSPlusSigWord;
705
706 // Further populate the VCB with VolumeHeader info
707 vcb->vcbAlBlSt = myVOPtr->embeddedOffset / 512;
708 vcb->vcbEmbeddedOffset = myVOPtr->embeddedOffset;
709 realAllocationBlockSize = myVHBPtr->blockSize;
710 realTotalBlocks = myVHBPtr->totalBlocks;
711 vcb->vcbNextCatalogID = myVHBPtr->nextCatalogID;
712 vcb->vcbCreateDate = myVHBPtr->createDate;
713 vcb->vcbAttributes = myVHBPtr->attributes & kHFSCatalogNodeIDsReused;
714
715 if ( myVHBPtr->attributesFile.totalBlocks == 0 )
716 vcb->vcbAttributesFile = NULL; /* XXX memory leak ? */
717
718 // Make sure the Extents B-Tree is set to use 16-bit key lengths.
719 // We access it before completely setting up the control block.
720 btcb = (BTreeControlBlock *) vcb->vcbExtentsFile->fcbBtree;
721 btcb->attributes |= kBTBigKeysMask;
722
723 // catch the case where the volume allocation block count is greater than
724 // maximum number of device allocation blocks. - bug 2916021
725 numABlks = (UInt32)(myVOPtr->totalDeviceSectors / ( myVHBPtr->blockSize / Blk_Size ));
726 if ( myVHBPtr->totalBlocks > numABlks ) {
727 RcdError( GPtr, E_NABlks );
728 err = E_NABlks;
729 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) {
730 plog( "\t%s - volume header total allocation blocks is greater than device size \n", __FUNCTION__ );
731 plog( "\tvolume allocation block count %d device allocation block count %d \n",
732 myVHBPtr->totalBlocks, numABlks );
733 }
734 goto ReleaseAndBail;
735 }
736 }
737 else if ( VolumeObjectIsHFS( ) ) {
738
739 // fsckPrint(GPtr->context, fsckCheckingVolume);
740 fsckPrint(GPtr->context, hfsCheckHFS);
741
742 GPtr->numExtents = kHFSExtentDensity;
743 vcb->vcbSignature = myMDBPtr->drSigWord;
744 maxNumberOfAllocationBlocks = 0xFFFF;
745 // set up next file ID, CheckBTreeKey makse sure we are under this value
746 vcb->vcbNextCatalogID = myMDBPtr->drNxtCNID;
747 vcb->vcbCreateDate = myMDBPtr->drCrDate;
748
749 realAllocationBlockSize = myMDBPtr->drAlBlkSiz;
750 realTotalBlocks = myMDBPtr->drNmAlBlks;
751 }
752
753 GPtr->TarBlock = blockNum; // target block
754
755 // verify volume allocation info
756 // Note: i is the number of sectors per allocation block
757 numBlk = totalSectors;
758 minABlkSz = Blk_Size; // init minimum ablock size
759 // loop while #ablocks won't fit
760 for( i = 2; numBlk > maxNumberOfAllocationBlocks; i++ ) {
761 minABlkSz = i * Blk_Size; // jack up minimum
762 numBlk = totalSectors / i; // recompute #ablocks, assuming this size
763 }
764
765 numABlks = (UInt32)numBlk;
766 vcb->vcbBlockSize = realAllocationBlockSize;
767 numABlks = (UInt32)(totalSectors / ( realAllocationBlockSize / Blk_Size ));
768 if ( VolumeObjectIsHFSPlus( ) ) {
769 // HFS Plus allocation block size must be power of 2
770 if ( (realAllocationBlockSize < minABlkSz) ||
771 (realAllocationBlockSize & (realAllocationBlockSize - 1)) != 0 )
772 realAllocationBlockSize = 0;
773 }
774 else {
775 if ( (realAllocationBlockSize < minABlkSz) ||
776 (realAllocationBlockSize > Max_ABSiz) ||
777 ((realAllocationBlockSize % Blk_Size) != 0) )
778 realAllocationBlockSize = 0;
779 }
780
781 if ( realAllocationBlockSize == 0 ) {
782 RcdError( GPtr, E_ABlkSz );
783 err = E_ABlkSz; // bad allocation block size
784 goto ReleaseAndBail;
785 }
786
787 vcb->vcbTotalBlocks = realTotalBlocks;
788 vcb->vcbFreeBlocks = 0;
789
790 // Only do these tests on HFS volumes, since they are either
791 // or, getting the VolumeHeader would have already failed.
792 if ( VolumeObjectIsHFS( ) ) {
793 UInt32 bitMapSizeInSectors;
794
795 // Calculate the volume bitmap size
796 bitMapSizeInSectors = ( numABlks + kBitsPerSector - 1 ) / kBitsPerSector; // VBM size in blocks
797
798 //¥¥ Calculate the validaty of HFS Allocation blocks, I think realTotalBlocks == numABlks
799 numABlks = (UInt32)((totalSectors - 3 - bitMapSizeInSectors) / (realAllocationBlockSize / Blk_Size)); // actual # of alloc blks
800
801 if ( realTotalBlocks > numABlks ) {
802 RcdError( GPtr, E_NABlks );
803 err = E_NABlks; // invalid number of allocation blocks
804 goto ReleaseAndBail;
805 }
806
807 if ( myMDBPtr->drVBMSt <= MDB_BlkN ) {
808 RcdError(GPtr,E_VBMSt);
809 err = E_VBMSt; // invalid VBM start block
810 goto ReleaseAndBail;
811 }
812 vcb->vcbVBMSt = myMDBPtr->drVBMSt;
813
814 if (myMDBPtr->drAlBlSt < (myMDBPtr->drVBMSt + bitMapSizeInSectors)) {
815 RcdError(GPtr,E_ABlkSt);
816 err = E_ABlkSt; // invalid starting alloc block
817 goto ReleaseAndBail;
818 }
819 vcb->vcbAlBlSt = myMDBPtr->drAlBlSt;
820 }
821
822 ReleaseAndBail:
823 if (myBlockDescriptor.buffer != NULL)
824 (void) ReleaseVolumeBlock(vcb, &myBlockDescriptor, kReleaseBlock);
825
826 return( err );
827 }
828
829
830 static OSErr ScavengeVolumeType( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb, UInt32 *volumeType )
831 {
832 UInt64 vHSector;
833 UInt64 startSector;
834 UInt64 altVHSector;
835 UInt64 hfsPlusSectors = 0;
836 UInt32 sectorsPerBlock;
837 UInt32 numSectorsToSearch;
838 OSErr err;
839 HFSPlusVolumeHeader *volumeHeader;
840 HFSExtentDescriptor embededExtent;
841 SVCB *calculatedVCB = GPtr->calculatedVCB;
842 VolumeObjectPtr myVOPtr;
843 UInt16 embedSigWord = mdb->drEmbedSigWord;
844 BlockDescriptor block;
845
846 /*
847 * If all of the embedded volume information is zero, then assume
848 * this really is a plain HFS disk like it says. Otherwise, if
849 * you reinitialize a large HFS Plus volume as HFS, the original
850 * embedded volume's volume header and alternate volume header will
851 * still be there, and we'll try to repair the embedded volume.
852 */
853 if (embedSigWord == 0 &&
854 mdb->drEmbedExtent.blockCount == 0 &&
855 mdb->drEmbedExtent.startBlock == 0)
856 {
857 *volumeType = kHFSVolumeType;
858 return noErr;
859 }
860
861 myVOPtr = GetVolumeObjectPtr( );
862 *volumeType = kEmbededHFSPlusVolumeType; // Assume HFS+
863
864 //
865 // First see if it is an HFS+ volume and the relevent structures look OK
866 //
867 if ( embedSigWord == kHFSPlusSigWord )
868 {
869 /* look for primary volume header */
870 vHSector = (UInt64)mdb->drAlBlSt +
871 ((UInt64)(mdb->drAlBlkSiz / Blk_Size) * (UInt64)mdb->drEmbedExtent.startBlock) + 2;
872
873 err = GetVolumeBlock(calculatedVCB, vHSector, kGetBlock, &block);
874 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
875 if ( err != noErr ) goto AssumeHFS;
876
877 myVOPtr->primaryVHB = vHSector;
878 err = ValidVolumeHeader( volumeHeader );
879 (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock);
880 if ( err == noErr ) {
881 myVOPtr->flags |= kVO_PriVHBOK;
882 return( noErr );
883 }
884 }
885
886 sectorsPerBlock = mdb->drAlBlkSiz / Blk_Size;
887
888 // Search the end of the disk to see if a Volume Header is present at all
889 if ( embedSigWord != kHFSPlusSigWord )
890 {
891 numSectorsToSearch = mdb->drAlBlkSiz / Blk_Size;
892 startSector = myVOPtr->totalDeviceSectors - 4 - numSectorsToSearch;
893
894 err = SeekVolumeHeader( GPtr, startSector, numSectorsToSearch, &altVHSector );
895 if ( err != noErr ) goto AssumeHFS;
896
897 // We found the Alt VH, so this must be a damaged embeded HFS+ volume
898 // Now Scavenge for the Primary VolumeHeader
899 myVOPtr->alternateVHB = altVHSector;
900 myVOPtr->flags |= kVO_AltVHBOK;
901 startSector = mdb->drAlBlSt + (4 * sectorsPerBlock); // Start looking at 4th HFS allocation block
902 numSectorsToSearch = 10 * sectorsPerBlock; // search for VH in next 10 allocation blocks
903
904 err = SeekVolumeHeader( GPtr, startSector, numSectorsToSearch, &vHSector );
905 if ( err != noErr ) goto AssumeHFS;
906
907 myVOPtr->primaryVHB = vHSector;
908 myVOPtr->flags |= kVO_PriVHBOK;
909 hfsPlusSectors = altVHSector - vHSector + 1 + 2 + 1; // numSectors + BB + end
910
911 // Fix the embeded extent
912 embededExtent.blockCount = hfsPlusSectors / sectorsPerBlock;
913 embededExtent.startBlock = (vHSector - 2 - mdb->drAlBlSt ) / sectorsPerBlock;
914 embedSigWord = kHFSPlusSigWord;
915
916 myVOPtr->embeddedOffset =
917 (embededExtent.startBlock * mdb->drAlBlkSiz) + (mdb->drAlBlSt * Blk_Size);
918 }
919 else
920 {
921 embedSigWord = mdb->drEmbedSigWord;
922 embededExtent.blockCount = mdb->drEmbedExtent.blockCount;
923 embededExtent.startBlock = mdb->drEmbedExtent.startBlock;
924 }
925
926 if ( embedSigWord == kHFSPlusSigWord )
927 {
928 startSector = 2 + mdb->drAlBlSt +
929 ((UInt64)embededExtent.startBlock * (mdb->drAlBlkSiz / Blk_Size));
930
931 err = SeekVolumeHeader( GPtr, startSector, mdb->drAlBlkSiz / Blk_Size, &vHSector );
932 if ( err != noErr ) goto AssumeHFS;
933
934 // Now replace the bad fields and mark the error
935 mdb->drEmbedExtent.blockCount = embededExtent.blockCount;
936 mdb->drEmbedExtent.startBlock = embededExtent.startBlock;
937 mdb->drEmbedSigWord = kHFSPlusSigWord;
938 mdb->drAlBlSt += vHSector - startSector; // Fix the bad field
939 myVOPtr->totalEmbeddedSectors = (mdb->drAlBlkSiz / Blk_Size) * mdb->drEmbedExtent.blockCount;
940 myVOPtr->embeddedOffset =
941 (mdb->drEmbedExtent.startBlock * mdb->drAlBlkSiz) + (mdb->drAlBlSt * Blk_Size);
942 myVOPtr->primaryVHB = vHSector;
943 myVOPtr->flags |= kVO_PriVHBOK;
944
945 GPtr->VIStat = GPtr->VIStat | S_MDB; // write out our MDB
946 return( E_InvalidMDBdrAlBlSt );
947 }
948
949 AssumeHFS:
950 *volumeType = kHFSVolumeType;
951 return( noErr );
952
953 } /* ScavengeVolumeType */
954
955
956 static OSErr SeekVolumeHeader( SGlobPtr GPtr, UInt64 startSector, UInt32 numSectors, UInt64 *vHSector )
957 {
958 OSErr err;
959 HFSPlusVolumeHeader *volumeHeader;
960 SVCB *calculatedVCB = GPtr->calculatedVCB;
961 BlockDescriptor block;
962
963 for ( *vHSector = startSector ; *vHSector < startSector + numSectors ; (*vHSector)++ )
964 {
965 err = GetVolumeBlock(calculatedVCB, *vHSector, kGetBlock, &block);
966 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
967 if ( err != noErr ) return( err );
968
969 err = ValidVolumeHeader(volumeHeader);
970
971 (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock);
972 if ( err == noErr )
973 return( noErr );
974 }
975
976 return( fnfErr );
977 }
978
979
980 #if 0 // not used at this time
981 static OSErr CheckWrapperExtents( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb )
982 {
983 OSErr err = noErr;
984
985 // See if Norton Disk Doctor 2.0 corrupted the catalog's first extent
986 if ( mdb->drCTExtRec[0].startBlock >= mdb->drEmbedExtent.startBlock)
987 {
988 // Fix the field in the in-memory copy, and record the error
989 mdb->drCTExtRec[0].startBlock = mdb->drXTExtRec[0].startBlock + mdb->drXTExtRec[0].blockCount;
990 GPtr->VIStat = GPtr->VIStat | S_MDB; // write out our MDB
991 err = RcdInvalidWrapperExtents( GPtr, E_InvalidWrapperExtents );
992 }
993
994 return err;
995 }
996 #endif
997
998 /*------------------------------------------------------------------------------
999
1000 Function: CreateExtentsBTreeControlBlock
1001
1002 Function: Create the calculated ExtentsBTree Control Block
1003
1004 Input: GPtr - pointer to scavenger global area
1005
1006 Output: - 0 = no error
1007 n = error code
1008 ------------------------------------------------------------------------------*/
1009
1010 OSErr CreateExtentsBTreeControlBlock( SGlobPtr GPtr )
1011 {
1012 OSErr err;
1013 SInt32 size;
1014 UInt32 numABlks;
1015 BTHeaderRec header;
1016 BTreeControlBlock * btcb;
1017 SVCB * vcb;
1018 BlockDescriptor block;
1019 Boolean isHFSPlus;
1020
1021 // Set up
1022 isHFSPlus = VolumeObjectIsHFSPlus( );
1023 GPtr->TarID = kHFSExtentsFileID; // target = extent file
1024 GPtr->TarBlock = kHeaderNodeNum; // target block = header node
1025 vcb = GPtr->calculatedVCB;
1026 btcb = GPtr->calculatedExtentsBTCB;
1027 block.buffer = NULL;
1028
1029 // get Volume Header (HFS+) or Master Directory (HFS) block
1030 err = GetVolumeObjectVHBorMDB( &block );
1031 if (err) goto exit;
1032 //
1033 // check out allocation info for the Extents File
1034 //
1035 if (isHFSPlus)
1036 {
1037 HFSPlusVolumeHeader *volumeHeader;
1038
1039 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
1040
1041 CopyMemory(volumeHeader->extentsFile.extents, GPtr->calculatedExtentsFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1042
1043 err = CheckFileExtents( GPtr, kHFSExtentsFileID, kDataFork, NULL, (void *)GPtr->calculatedExtentsFCB->fcbExtents32, &numABlks); // check out extent info
1044
1045 if (err) goto exit;
1046
1047 if ( volumeHeader->extentsFile.totalBlocks != numABlks ) // check out the PEOF
1048 {
1049 RcdError( GPtr, E_ExtPEOF );
1050 err = E_ExtPEOF;
1051 if (debug)
1052 plog("Extents File totalBlocks = %u, numABlks = %u\n", volumeHeader->extentsFile.totalBlocks, numABlks);
1053 goto exit;
1054 }
1055 else
1056 {
1057 GPtr->calculatedExtentsFCB->fcbLogicalSize = volumeHeader->extentsFile.logicalSize; // Set Extents tree's LEOF
1058 GPtr->calculatedExtentsFCB->fcbPhysicalSize = (UInt64)volumeHeader->extentsFile.totalBlocks *
1059 (UInt64)volumeHeader->blockSize; // Set Extents tree's PEOF
1060 }
1061
1062 //
1063 // Set up the minimal BTreeControlBlock structure
1064 //
1065
1066 // Read the BTreeHeader from disk & also validate it's node size.
1067 err = GetBTreeHeader(GPtr, GPtr->calculatedExtentsFCB, &header);
1068 if (err) goto exit;
1069
1070 btcb->maxKeyLength = kHFSPlusExtentKeyMaximumLength; // max key length
1071 btcb->keyCompareProc = (void *)CompareExtentKeysPlus;
1072 btcb->attributes |=kBTBigKeysMask; // HFS+ Extent files have 16-bit key length
1073 btcb->leafRecords = header.leafRecords;
1074 btcb->treeDepth = header.treeDepth;
1075 btcb->rootNode = header.rootNode;
1076 btcb->firstLeafNode = header.firstLeafNode;
1077 btcb->lastLeafNode = header.lastLeafNode;
1078
1079 btcb->nodeSize = header.nodeSize;
1080 btcb->totalNodes = (UInt32)( GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize );
1081 btcb->freeNodes = btcb->totalNodes; // start with everything free
1082
1083 // Make sure the header nodes size field is correct by looking at the 1st record offset
1084 err = CheckNodesFirstOffset( GPtr, btcb );
1085 if ( (err != noErr) && (btcb->nodeSize != 1024) ) // default HFS+ Extents node size is 1024
1086 {
1087 btcb->nodeSize = 1024;
1088 btcb->totalNodes = (UInt32)( GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize );
1089 btcb->freeNodes = btcb->totalNodes; // start with everything free
1090
1091 err = CheckNodesFirstOffset( GPtr, btcb );
1092 if (err) goto exit;
1093
1094 GPtr->EBTStat |= S_BTH; // update the Btree header
1095 }
1096 }
1097 else // Classic HFS
1098 {
1099 HFSMasterDirectoryBlock *alternateMDB;
1100
1101 alternateMDB = (HFSMasterDirectoryBlock *) block.buffer;
1102
1103 CopyMemory(alternateMDB->drXTExtRec, GPtr->calculatedExtentsFCB->fcbExtents16, sizeof(HFSExtentRecord) );
1104 // ExtDataRecToExtents(alternateMDB->drXTExtRec, GPtr->calculatedExtentsFCB->fcbExtents);
1105
1106
1107 err = CheckFileExtents( GPtr, kHFSExtentsFileID, kDataFork, NULL, (void *)GPtr->calculatedExtentsFCB->fcbExtents16, &numABlks); /* check out extent info */
1108 if (err) goto exit;
1109
1110 if (alternateMDB->drXTFlSize != ((UInt64)numABlks * (UInt64)GPtr->calculatedVCB->vcbBlockSize))// check out the PEOF
1111 {
1112 RcdError(GPtr,E_ExtPEOF);
1113 err = E_ExtPEOF;
1114 if (debug)
1115 plog("Alternate MDB drXTFlSize = %llu, should be %llu\n", (long long)alternateMDB->drXTFlSize, (long long)numABlks * (UInt64)GPtr->calculatedVCB->vcbBlockSize);
1116 goto exit;
1117 }
1118 else
1119 {
1120 GPtr->calculatedExtentsFCB->fcbPhysicalSize = alternateMDB->drXTFlSize; // set up PEOF and EOF in FCB
1121 GPtr->calculatedExtentsFCB->fcbLogicalSize = GPtr->calculatedExtentsFCB->fcbPhysicalSize;
1122 }
1123
1124 //
1125 // Set up the minimal BTreeControlBlock structure
1126 //
1127
1128 // Read the BTreeHeader from disk & also validate it's node size.
1129 err = GetBTreeHeader(GPtr, GPtr->calculatedExtentsFCB, &header);
1130 if (err) goto exit;
1131
1132 btcb->maxKeyLength = kHFSExtentKeyMaximumLength; // max key length
1133 btcb->keyCompareProc = (void *)CompareExtentKeys;
1134 btcb->leafRecords = header.leafRecords;
1135 btcb->treeDepth = header.treeDepth;
1136 btcb->rootNode = header.rootNode;
1137 btcb->firstLeafNode = header.firstLeafNode;
1138 btcb->lastLeafNode = header.lastLeafNode;
1139
1140 btcb->nodeSize = header.nodeSize;
1141 btcb->totalNodes = (UInt32)(GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize );
1142 btcb->freeNodes = btcb->totalNodes; // start with everything free
1143
1144 // Make sure the header nodes size field is correct by looking at the 1st record offset
1145 err = CheckNodesFirstOffset( GPtr, btcb );
1146 if (err) goto exit;
1147 }
1148
1149 if ( header.btreeType != kHFSBTreeType )
1150 {
1151 GPtr->EBTStat |= S_ReservedBTH; // Repair reserved fields in Btree header
1152 }
1153
1154 //
1155 // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes.
1156 //
1157 btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions
1158 if ( btcb->refCon == nil ) {
1159 err = R_NoMem;
1160 goto exit;
1161 }
1162 size = (btcb->totalNodes + 7) / 8; // size of BTree bit map
1163 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap
1164 if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil )
1165 {
1166 err = R_NoMem;
1167 goto exit;
1168 }
1169
1170 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is
1171 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes;// keep track of real free nodes for progress
1172 exit:
1173 if ( block.buffer != NULL )
1174 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1175
1176 return (err);
1177 }
1178
1179
1180
1181 /*------------------------------------------------------------------------------
1182
1183 Function: CheckNodesFirstOffset
1184
1185 Function: Minimal check verifies that the 1st offset is within bounds. If it's not
1186 the nodeSize may be wrong. In the future this routine could be modified
1187 to try different size values until one fits.
1188
1189 ------------------------------------------------------------------------------*/
1190 #define GetRecordOffset(btreePtr,node,index) (*(short *) ((UInt8 *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize))
1191 static OSErr CheckNodesFirstOffset( SGlobPtr GPtr, BTreeControlBlock *btcb )
1192 {
1193 NodeRec nodeRec;
1194 UInt16 offset;
1195 OSErr err;
1196
1197 (void) SetFileBlockSize(btcb->fcbPtr, btcb->nodeSize);
1198
1199 err = GetNode( btcb, kHeaderNodeNum, &nodeRec );
1200
1201 if ( err == noErr )
1202 {
1203 offset = GetRecordOffset( btcb, (NodeDescPtr)nodeRec.buffer, 0 );
1204 if ( (offset < sizeof (BTNodeDescriptor)) || // offset < minimum
1205 (offset & 1) || // offset is odd
1206 (offset >= btcb->nodeSize) ) // offset beyond end of node
1207 {
1208 if (debug) fprintf(stderr, "%s(%d): offset is wrong\n", __FUNCTION__, __LINE__);
1209 err = fsBTInvalidNodeErr;
1210 }
1211 }
1212
1213 if ( err != noErr )
1214 RcdError( GPtr, E_InvalidNodeSize );
1215
1216 (void) ReleaseNode(btcb, &nodeRec);
1217
1218 return( err );
1219 }
1220
1221
1222
1223 /*------------------------------------------------------------------------------
1224
1225 Function: ExtBTChk - (Extent BTree Check)
1226
1227 Function: Verifies the extent BTree structure.
1228
1229 Input: GPtr - pointer to scavenger global area
1230
1231 Output: ExtBTChk - function result:
1232 0 = no error
1233 n = error code
1234 ------------------------------------------------------------------------------*/
1235
1236 OSErr ExtBTChk( SGlobPtr GPtr )
1237 {
1238 OSErr err;
1239
1240 // Set up
1241 GPtr->TarID = kHFSExtentsFileID; // target = extent file
1242 GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB
1243
1244 //
1245 // check out the BTree structure
1246 //
1247
1248 err = BTCheck(GPtr, kCalculatedExtentRefNum, NULL);
1249 ReturnIfError( err ); // invalid extent file BTree
1250
1251 //
1252 // check out the allocation map structure
1253 //
1254
1255 err = BTMapChk( GPtr, kCalculatedExtentRefNum );
1256 ReturnIfError( err ); // Invalid extent BTree map
1257
1258 //
1259 // Make sure unused nodes in the B-tree are zero filled.
1260 //
1261 err = BTCheckUnusedNodes(GPtr, kCalculatedExtentRefNum, &GPtr->EBTStat);
1262 ReturnIfError( err );
1263
1264 //
1265 // compare BTree header record on disk with scavenger's BTree header record
1266 //
1267
1268 err = CmpBTH( GPtr, kCalculatedExtentRefNum );
1269 ReturnIfError( err );
1270
1271 //
1272 // compare BTree map on disk with scavenger's BTree map
1273 //
1274
1275 err = CmpBTM( GPtr, kCalculatedExtentRefNum );
1276
1277 return( err );
1278 }
1279
1280
1281
1282 /*------------------------------------------------------------------------------
1283
1284 Function: BadBlockFileExtentCheck - (Check extents of bad block file)
1285
1286 Function:
1287 Verifies the extents of bad block file (kHFSBadBlockFileID) that
1288 exist in extents Btree.
1289
1290 Note that the extents for other file IDs < kHFSFirstUserCatalogNodeID
1291 are being taken care in the following functions:
1292
1293 kHFSExtentsFileID - CreateExtentsBTreeControlBlock
1294 kHFSCatalogFileID - CreateCatalogBTreeControlBlock
1295 kHFSAllocationFileID - CreateExtendedAllocationsFCB
1296 kHFSStartupFileID - CreateExtendedAllocationsFCB
1297 kHFSAttributesFileID - CreateAttributesBTreeControlBlock
1298
1299 Input: GPtr - pointer to scavenger global area
1300
1301 Output: BadBlockFileExtentCheck - function result:
1302 0 = no error
1303 +n = error code
1304 ------------------------------------------------------------------------------*/
1305
1306 OSErr BadBlockFileExtentCheck( SGlobPtr GPtr )
1307 {
1308 UInt32 attributes;
1309 void *p;
1310 OSErr result;
1311 SVCB *vcb;
1312 Boolean isHFSPlus;
1313 BlockDescriptor block;
1314
1315 isHFSPlus = VolumeObjectIsHFSPlus( );
1316 block.buffer = NULL;
1317
1318 //
1319 // process the bad block extents (created by the disk init pkg to hide badspots)
1320 //
1321 vcb = GPtr->calculatedVCB;
1322
1323 result = GetVolumeObjectVHBorMDB( &block );
1324 if ( result != noErr ) goto ExitThisRoutine; // error, could't get it
1325
1326 p = (void *) block.buffer;
1327 attributes = isHFSPlus == true ? ((HFSPlusVolumeHeader*)p)->attributes : ((HFSMasterDirectoryBlock*)p)->drAtrb;
1328
1329 //¥¥ Does HFS+ honnor the same mask?
1330 if ( attributes & kHFSVolumeSparedBlocksMask ) // if any badspots
1331 {
1332 HFSPlusExtentRecord zeroXdr; // dummy passed to 'CheckFileExtents'
1333 UInt32 numBadBlocks;
1334
1335 ClearMemory ( zeroXdr, sizeof( HFSPlusExtentRecord ) );
1336 result = CheckFileExtents( GPtr, kHFSBadBlockFileID, kDataFork, NULL, (void *)zeroXdr, &numBadBlocks); // check and mark bitmap
1337 }
1338
1339 ExitThisRoutine:
1340 if ( block.buffer != NULL )
1341 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1342
1343 return (result);
1344 }
1345
1346
1347 /*------------------------------------------------------------------------------
1348
1349 Function: CreateCatalogBTreeControlBlock
1350
1351 Function: Create the calculated CatalogBTree Control Block
1352
1353 Input: GPtr - pointer to scavenger global area
1354
1355 Output: - 0 = no error
1356 n = error code
1357 ------------------------------------------------------------------------------*/
1358 OSErr CreateCatalogBTreeControlBlock( SGlobPtr GPtr )
1359 {
1360 OSErr err;
1361 SInt32 size;
1362 UInt32 numABlks;
1363 BTHeaderRec header;
1364 BTreeControlBlock * btcb;
1365 SVCB * vcb;
1366 BlockDescriptor block;
1367 Boolean isHFSPlus;
1368
1369 // Set up
1370 isHFSPlus = VolumeObjectIsHFSPlus( );
1371 GPtr->TarID = kHFSCatalogFileID;
1372 GPtr->TarBlock = kHeaderNodeNum;
1373 vcb = GPtr->calculatedVCB;
1374 btcb = GPtr->calculatedCatalogBTCB;
1375 block.buffer = NULL;
1376
1377 err = GetVolumeObjectVHBorMDB( &block );
1378 if ( err != noErr ) goto ExitThisRoutine; // error, could't get it
1379 //
1380 // check out allocation info for the Catalog File
1381 //
1382 if (isHFSPlus)
1383 {
1384 HFSPlusVolumeHeader * volumeHeader;
1385
1386 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
1387
1388 CopyMemory(volumeHeader->catalogFile.extents, GPtr->calculatedCatalogFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1389
1390 err = CheckFileExtents( GPtr, kHFSCatalogFileID, kDataFork, NULL, (void *)GPtr->calculatedCatalogFCB->fcbExtents32, &numABlks);
1391 if (err) goto exit;
1392
1393 if ( volumeHeader->catalogFile.totalBlocks != numABlks )
1394 {
1395 RcdError( GPtr, E_CatPEOF );
1396 err = E_CatPEOF;
1397 goto exit;
1398 }
1399 else
1400 {
1401 GPtr->calculatedCatalogFCB->fcbLogicalSize = volumeHeader->catalogFile.logicalSize;
1402 GPtr->calculatedCatalogFCB->fcbPhysicalSize = (UInt64)volumeHeader->catalogFile.totalBlocks *
1403 (UInt64)volumeHeader->blockSize;
1404 }
1405
1406 //
1407 // Set up the minimal BTreeControlBlock structure
1408 //
1409
1410 // read the BTreeHeader from disk & also validate it's node size.
1411 err = GetBTreeHeader(GPtr, GPtr->calculatedCatalogFCB, &header);
1412 if (err) goto exit;
1413
1414 btcb->maxKeyLength = kHFSPlusCatalogKeyMaximumLength; // max key length
1415
1416 /*
1417 * Figure out the type of key string compare
1418 * (case-insensitive or case-sensitive)
1419 *
1420 * To do: should enforce an "HX" volume is require for kHFSBinaryCompare.
1421 */
1422 if (header.keyCompareType == kHFSBinaryCompare)
1423 {
1424 btcb->keyCompareProc = (void *)CaseSensitiveCatalogKeyCompare;
1425 fsckPrint(GPtr->context, hfsCaseSensitive);
1426 }
1427 else
1428 {
1429 btcb->keyCompareProc = (void *)CompareExtendedCatalogKeys;
1430 }
1431 btcb->keyCompareType = header.keyCompareType;
1432 btcb->leafRecords = header.leafRecords;
1433 btcb->nodeSize = header.nodeSize;
1434 btcb->totalNodes = (UInt32)( GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize );
1435 btcb->freeNodes = btcb->totalNodes; // start with everything free
1436 btcb->attributes |=(kBTBigKeysMask + kBTVariableIndexKeysMask); // HFS+ Catalog files have large, variable-sized keys
1437
1438 btcb->treeDepth = header.treeDepth;
1439 btcb->rootNode = header.rootNode;
1440 btcb->firstLeafNode = header.firstLeafNode;
1441 btcb->lastLeafNode = header.lastLeafNode;
1442
1443
1444 // Make sure the header nodes size field is correct by looking at the 1st record offset
1445 err = CheckNodesFirstOffset( GPtr, btcb );
1446 if ( (err != noErr) && (btcb->nodeSize != 4096) ) // default HFS+ Catalog node size is 4096
1447 {
1448 btcb->nodeSize = 4096;
1449 btcb->totalNodes = (UInt32)( GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize );
1450 btcb->freeNodes = btcb->totalNodes; // start with everything free
1451
1452 err = CheckNodesFirstOffset( GPtr, btcb );
1453 if (err) goto exit;
1454
1455 GPtr->CBTStat |= S_BTH; // update the Btree header
1456 }
1457 }
1458 else // HFS
1459 {
1460 HFSMasterDirectoryBlock *alternateMDB;
1461
1462 alternateMDB = (HFSMasterDirectoryBlock *) block.buffer;
1463
1464 CopyMemory( alternateMDB->drCTExtRec, GPtr->calculatedCatalogFCB->fcbExtents16, sizeof(HFSExtentRecord) );
1465 // ExtDataRecToExtents(alternateMDB->drCTExtRec, GPtr->calculatedCatalogFCB->fcbExtents);
1466
1467 err = CheckFileExtents( GPtr, kHFSCatalogFileID, kDataFork, NULL, (void *)GPtr->calculatedCatalogFCB->fcbExtents16, &numABlks); /* check out extent info */
1468 if (err) goto exit;
1469
1470 if (alternateMDB->drCTFlSize != ((UInt64)numABlks * (UInt64)vcb->vcbBlockSize)) // check out the PEOF
1471 {
1472 RcdError( GPtr, E_CatPEOF );
1473 err = E_CatPEOF;
1474 goto exit;
1475 }
1476 else
1477 {
1478 GPtr->calculatedCatalogFCB->fcbPhysicalSize = alternateMDB->drCTFlSize; // set up PEOF and EOF in FCB
1479 GPtr->calculatedCatalogFCB->fcbLogicalSize = GPtr->calculatedCatalogFCB->fcbPhysicalSize;
1480 }
1481
1482 //
1483 // Set up the minimal BTreeControlBlock structure
1484 //
1485
1486 // read the BTreeHeader from disk & also validate it's node size.
1487 err = GetBTreeHeader(GPtr, GPtr->calculatedCatalogFCB, &header);
1488 if (err) goto exit;
1489
1490 btcb->maxKeyLength = kHFSCatalogKeyMaximumLength; // max key length
1491 btcb->keyCompareProc = (void *) CompareCatalogKeys;
1492 btcb->leafRecords = header.leafRecords;
1493 btcb->nodeSize = header.nodeSize;
1494 btcb->totalNodes = (UInt32)(GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize );
1495 btcb->freeNodes = btcb->totalNodes; // start with everything free
1496
1497 btcb->treeDepth = header.treeDepth;
1498 btcb->rootNode = header.rootNode;
1499 btcb->firstLeafNode = header.firstLeafNode;
1500 btcb->lastLeafNode = header.lastLeafNode;
1501
1502 // Make sure the header nodes size field is correct by looking at the 1st record offset
1503 err = CheckNodesFirstOffset( GPtr, btcb );
1504 if (err) goto exit;
1505 }
1506 #if 0
1507 plog(" Catalog B-tree is %qd bytes\n", (UInt64)btcb->totalNodes * (UInt64) btcb->nodeSize);
1508 #endif
1509
1510 if ( header.btreeType != kHFSBTreeType )
1511 {
1512 GPtr->CBTStat |= S_ReservedBTH; // Repair reserved fields in Btree header
1513 }
1514
1515 //
1516 // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes.
1517 //
1518
1519 btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions
1520 if ( btcb->refCon == nil ) {
1521 err = R_NoMem;
1522 goto exit;
1523 }
1524 size = (btcb->totalNodes + 7) / 8; // size of BTree bit map
1525 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap
1526 if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil )
1527 {
1528 err = R_NoMem;
1529 goto exit;
1530 }
1531
1532 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is
1533 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes; // keep track of real free nodes for progress
1534
1535 /* it should be OK at this point to get volume name and stuff it into our global */
1536 {
1537 OSErr result;
1538 UInt16 recSize;
1539 CatalogKey key;
1540 CatalogRecord record;
1541
1542 BuildCatalogKey( kHFSRootFolderID, NULL, isHFSPlus, &key );
1543 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, NULL, &record, &recSize, NULL );
1544 if ( result == noErr ) {
1545 if ( isHFSPlus ) {
1546 size_t len;
1547 HFSPlusCatalogThread * recPtr = &record.hfsPlusThread;
1548 (void) utf_encodestr( recPtr->nodeName.unicode,
1549 recPtr->nodeName.length * 2,
1550 GPtr->volumeName, &len, sizeof(GPtr->volumeName) );
1551 GPtr->volumeName[len] = '\0';
1552 }
1553 else {
1554 HFSCatalogThread * recPtr = &record.hfsThread;
1555 bcopy( &recPtr->nodeName[1], GPtr->volumeName, recPtr->nodeName[0] );
1556 GPtr->volumeName[ recPtr->nodeName[0] ] = '\0';
1557 }
1558 fsckPrint(GPtr->context, fsckVolumeName, GPtr->volumeName);
1559 }
1560 }
1561
1562 exit:
1563 ExitThisRoutine:
1564 if ( block.buffer != NULL )
1565 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1566
1567 return (err);
1568 }
1569
1570
1571 /*------------------------------------------------------------------------------
1572
1573 Function: CreateExtendedAllocationsFCB
1574
1575 Function: Create the calculated ExtentsBTree Control Block for
1576 kHFSAllocationFileID and kHFSStartupFileID.
1577
1578 Input: GPtr - pointer to scavenger global area
1579
1580 Output: - 0 = no error
1581 n = error code
1582 ------------------------------------------------------------------------------*/
1583 OSErr CreateExtendedAllocationsFCB( SGlobPtr GPtr )
1584 {
1585 OSErr err = 0;
1586 UInt32 numABlks;
1587 SVCB * vcb;
1588 Boolean isHFSPlus;
1589 BlockDescriptor block;
1590
1591 // Set up
1592 isHFSPlus = VolumeObjectIsHFSPlus( );
1593 GPtr->TarID = kHFSAllocationFileID;
1594 GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB
1595 vcb = GPtr->calculatedVCB;
1596 block.buffer = NULL;
1597
1598 //
1599 // check out allocation info for the allocation File
1600 //
1601
1602 if ( isHFSPlus )
1603 {
1604 SFCB * fcb;
1605 HFSPlusVolumeHeader *volumeHeader;
1606
1607 err = GetVolumeObjectVHB( &block );
1608 if ( err != noErr )
1609 goto exit;
1610 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
1611
1612 fcb = GPtr->calculatedAllocationsFCB;
1613 CopyMemory( volumeHeader->allocationFile.extents, fcb->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1614
1615 err = CheckFileExtents( GPtr, kHFSAllocationFileID, kDataFork, NULL, (void *)fcb->fcbExtents32, &numABlks);
1616 if (err) goto exit;
1617
1618 //
1619 // The allocation file will get processed in whole allocation blocks, or
1620 // maximal-sized cache blocks, whichever is smaller. This means the cache
1621 // doesn't need to cope with buffers that are larger than a cache block.
1622 if (vcb->vcbBlockSize < fscache.BlockSize)
1623 (void) SetFileBlockSize (fcb, vcb->vcbBlockSize);
1624 else
1625 (void) SetFileBlockSize (fcb, fscache.BlockSize);
1626
1627 if ( volumeHeader->allocationFile.totalBlocks != numABlks )
1628 {
1629 RcdError( GPtr, E_CatPEOF );
1630 err = E_CatPEOF;
1631 goto exit;
1632 }
1633 else
1634 {
1635 fcb->fcbLogicalSize = volumeHeader->allocationFile.logicalSize;
1636 fcb->fcbPhysicalSize = (UInt64) volumeHeader->allocationFile.totalBlocks *
1637 (UInt64) volumeHeader->blockSize;
1638 }
1639
1640 /* while we're here, also get startup file extents... */
1641 fcb = GPtr->calculatedStartupFCB;
1642 CopyMemory( volumeHeader->startupFile.extents, fcb->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1643
1644 err = CheckFileExtents( GPtr, kHFSStartupFileID, kDataFork, NULL, (void *)fcb->fcbExtents32, &numABlks);
1645 if (err) goto exit;
1646
1647 fcb->fcbLogicalSize = volumeHeader->startupFile.logicalSize;
1648 fcb->fcbPhysicalSize = (UInt64) volumeHeader->startupFile.totalBlocks *
1649 (UInt64) volumeHeader->blockSize;
1650 }
1651
1652 exit:
1653 if (block.buffer)
1654 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1655
1656 return (err);
1657
1658 }
1659
1660
1661 /*------------------------------------------------------------------------------
1662
1663 Function: CatHChk - (Catalog Hierarchy Check)
1664
1665 Function: Verifies the catalog hierarchy.
1666
1667 Input: GPtr - pointer to scavenger global area
1668
1669 Output: CatHChk - function result:
1670 0 = no error
1671 n = error code
1672 ------------------------------------------------------------------------------*/
1673
1674 OSErr CatHChk( SGlobPtr GPtr )
1675 {
1676 SInt16 i;
1677 OSErr result;
1678 UInt16 recSize;
1679 SInt16 selCode;
1680 UInt32 hint;
1681 UInt32 dirCnt;
1682 UInt32 filCnt;
1683 SInt16 rtdirCnt;
1684 SInt16 rtfilCnt;
1685 SVCB *calculatedVCB;
1686 SDPR *dprP;
1687 SDPR *dprP1;
1688 CatalogKey foundKey;
1689 Boolean validKeyFound;
1690 CatalogKey key;
1691 CatalogRecord record;
1692 CatalogRecord record2;
1693 HFSPlusCatalogFolder *largeCatalogFolderP;
1694 HFSPlusCatalogFile *largeCatalogFileP;
1695 HFSCatalogFile *smallCatalogFileP;
1696 HFSCatalogFolder *smallCatalogFolderP;
1697 CatalogName catalogName;
1698 UInt32 valence;
1699 CatalogRecord threadRecord;
1700 HFSCatalogNodeID parID;
1701 Boolean isHFSPlus;
1702
1703 // set up
1704 isHFSPlus = VolumeObjectIsHFSPlus( );
1705 calculatedVCB = GPtr->calculatedVCB;
1706 GPtr->TarID = kHFSCatalogFileID; /* target = catalog file */
1707 GPtr->TarBlock = 0; /* no target block yet */
1708
1709 //
1710 // position to the beginning of catalog
1711 //
1712
1713 //¥¥ Can we ignore this part by just taking advantage of setting the selCode = 0x8001;
1714 {
1715 BuildCatalogKey( 1, (const CatalogName *)nil, isHFSPlus, &key );
1716 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint );
1717
1718 GPtr->TarBlock = hint; /* set target block */
1719 if ( result != btNotFound )
1720 {
1721 RcdError( GPtr, E_CatRec );
1722 return( E_CatRec );
1723 }
1724 }
1725
1726 GPtr->DirLevel = 1;
1727 dprP = &(GPtr->DirPTPtr)[0];
1728 dprP->directoryID = 1;
1729
1730 dirCnt = filCnt = rtdirCnt = rtfilCnt = 0;
1731
1732 result = noErr;
1733 selCode = 0x8001; /* start with root directory */
1734
1735 //
1736 // enumerate the entire catalog
1737 //
1738 while ( (GPtr->DirLevel > 0) && (result == noErr) )
1739 {
1740 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
1741
1742 validKeyFound = true;
1743 record.recordType = 0;
1744
1745 // get the next record
1746 result = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &foundKey, &record, &recSize, &hint );
1747
1748 GPtr->TarBlock = hint; /* set target block */
1749 if ( result != noErr )
1750 {
1751 if ( result == btNotFound )
1752 {
1753 result = noErr;
1754 validKeyFound = false;
1755 }
1756 else
1757 {
1758 result = IntError( GPtr, result ); /* error from BTGetRecord */
1759 return( result );
1760 }
1761 }
1762 selCode = 1; /* get next rec from now on */
1763
1764 GPtr->itemsProcessed++;
1765
1766 //
1767 // if same ParID ...
1768 //
1769 parID = isHFSPlus == true ? foundKey.hfsPlus.parentID : foundKey.hfs.parentID;
1770 if ( (validKeyFound == true) && (parID == dprP->directoryID) )
1771 {
1772 dprP->offspringIndex++; /* increment offspring index */
1773
1774 // if new directory ...
1775
1776 if ( record.recordType == kHFSPlusFolderRecord )
1777 {
1778 result = CheckForStop( GPtr ); ReturnIfError( result ); // Permit the user to interrupt
1779
1780 largeCatalogFolderP = (HFSPlusCatalogFolder *) &record;
1781 GPtr->TarID = largeCatalogFolderP->folderID; // target ID = directory ID
1782 GPtr->CNType = record.recordType; // target CNode type = directory ID
1783 CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus );
1784
1785 if ( dprP->directoryID > 1 )
1786 {
1787 GPtr->DirLevel++; // we have a new directory level
1788 dirCnt++;
1789 }
1790 if ( dprP->directoryID == kHFSRootFolderID ) // bump root dir count
1791 rtdirCnt++;
1792
1793 if ( GPtr->DirLevel > GPtr->dirPathCount )
1794 {
1795 void *ptr;
1796
1797 ptr = realloc(GPtr->DirPTPtr, (GPtr->dirPathCount + CMMaxDepth) * sizeof(SDPR));
1798 if (ptr == nil)
1799 {
1800 fsckPrint(GPtr->context, E_CatDepth, GPtr->dirPathCount);
1801 return noErr; /* abort this check, but let other checks proceed */
1802 }
1803 ClearMemory((char *)ptr + (GPtr->dirPathCount * sizeof(SDPR)), (CMMaxDepth * sizeof(SDPR)));
1804 GPtr->dirPathCount += CMMaxDepth;
1805 GPtr->DirPTPtr = ptr;
1806 }
1807
1808 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
1809 dprP->directoryID = largeCatalogFolderP->folderID;
1810 dprP->offspringIndex = 1;
1811 dprP->directoryHint = hint;
1812 dprP->parentDirID = foundKey.hfsPlus.parentID;
1813 CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &dprP->directoryName, isHFSPlus );
1814
1815 for ( i = 1; i < GPtr->DirLevel; i++ )
1816 {
1817 dprP1 = &(GPtr->DirPTPtr)[i - 1];
1818 if (dprP->directoryID == dprP1->directoryID)
1819 {
1820 RcdError( GPtr,E_DirLoop ); // loop in directory hierarchy
1821 return( E_DirLoop );
1822 }
1823 }
1824
1825 /*
1826 * Find thread record
1827 */
1828 BuildCatalogKey( dprP->directoryID, (const CatalogName *) nil, isHFSPlus, &key );
1829 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint );
1830 if ( result != noErr ) {
1831 struct MissingThread *mtp;
1832
1833 /* Report the error */
1834 fsckPrint(GPtr->context, E_NoThd, dprP->directoryID);
1835
1836 /* HFS will exit here */
1837 if ( !isHFSPlus )
1838 return (E_NoThd);
1839 /*
1840 * A directory thread is missing. If we can find this
1841 * ID on the missing-thread list then we know where the
1842 * child entries reside and can resume our enumeration.
1843 */
1844 for (mtp = GPtr->missingThreadList; mtp != NULL; mtp = mtp->link) {
1845 if (mtp->threadID == dprP->directoryID) {
1846 mtp->thread.recordType = kHFSPlusFolderThreadRecord;
1847 mtp->thread.parentID = dprP->parentDirID;
1848 CopyCatalogName(&dprP->directoryName, (CatalogName *)&mtp->thread.nodeName, isHFSPlus);
1849
1850 /* Reposition to the first child of target directory */
1851 result = SearchBTreeRecord(GPtr->calculatedCatalogFCB, &mtp->nextKey,
1852 kNoHint, &foundKey, &threadRecord, &recSize, &hint);
1853 if (result) {
1854 return (E_NoThd);
1855 }
1856 selCode = 0; /* use current record instead of next */
1857 break;
1858 }
1859 }
1860 if (selCode != 0) {
1861 /*
1862 * A directory thread is missing but we know this
1863 * directory has no children (since we didn't find
1864 * its ID on the missing-thread list above).
1865 *
1866 * At this point we can resume the enumeration at
1867 * our previous position in our parent directory.
1868 */
1869 goto resumeAtParent;
1870 }
1871 }
1872 dprP->threadHint = hint;
1873 GPtr->TarBlock = hint;
1874 }
1875
1876 // LargeCatalogFile
1877 else if ( record.recordType == kHFSPlusFileRecord )
1878 {
1879 largeCatalogFileP = (HFSPlusCatalogFile *) &record;
1880 GPtr->TarID = largeCatalogFileP->fileID; // target ID = file number
1881 GPtr->CNType = record.recordType; // target CNode type = thread
1882 CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus );
1883 filCnt++;
1884 if (dprP->directoryID == kHFSRootFolderID)
1885 rtfilCnt++;
1886 }
1887
1888 else if ( record.recordType == kHFSFolderRecord )
1889 {
1890 result = CheckForStop( GPtr ); ReturnIfError( result ); // Permit the user to interrupt
1891
1892 smallCatalogFolderP = (HFSCatalogFolder *) &record;
1893 GPtr->TarID = smallCatalogFolderP->folderID; /* target ID = directory ID */
1894 GPtr->CNType = record.recordType; /* target CNode type = directory ID */
1895 CopyCatalogName( (const CatalogName *) &key.hfs.nodeName, &GPtr->CName, isHFSPlus ); /* target CName = directory name */
1896
1897 if (dprP->directoryID > 1)
1898 {
1899 GPtr->DirLevel++; /* we have a new directory level */
1900 dirCnt++;
1901 }
1902 if (dprP->directoryID == kHFSRootFolderID) /* bump root dir count */
1903 rtdirCnt++;
1904
1905 if ( GPtr->DirLevel > GPtr->dirPathCount )
1906 {
1907 void *ptr;
1908
1909 ptr = realloc(GPtr->DirPTPtr, (GPtr->dirPathCount + CMMaxDepth) * sizeof(SDPR));
1910 if (ptr == nil)
1911 {
1912 fsckPrint(GPtr->context, E_CatDepth, GPtr->dirPathCount);
1913 return noErr; /* abort this check, but let other checks proceed */
1914 }
1915 ClearMemory((char *)ptr + (GPtr->dirPathCount * sizeof(SDPR)), (CMMaxDepth * sizeof(SDPR)));
1916 GPtr->dirPathCount += CMMaxDepth;
1917 GPtr->DirPTPtr = ptr;
1918 }
1919
1920 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
1921 dprP->directoryID = smallCatalogFolderP->folderID;
1922 dprP->offspringIndex = 1;
1923 dprP->directoryHint = hint;
1924 dprP->parentDirID = foundKey.hfs.parentID;
1925
1926 CopyCatalogName( (const CatalogName *) &foundKey.hfs.nodeName, &dprP->directoryName, isHFSPlus );
1927
1928 for (i = 1; i < GPtr->DirLevel; i++)
1929 {
1930 dprP1 = &(GPtr->DirPTPtr)[i - 1];
1931 if (dprP->directoryID == dprP1->directoryID)
1932 {
1933 RcdError( GPtr,E_DirLoop ); /* loop in directory hierarchy */
1934 return( E_DirLoop );
1935 }
1936 }
1937
1938 BuildCatalogKey( dprP->directoryID, (const CatalogName *)0, isHFSPlus, &key );
1939 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint );
1940 if (result != noErr )
1941 {
1942 result = IntError(GPtr,result); /* error from BTSearch */
1943 return(result);
1944 }
1945 dprP->threadHint = hint; /* save hint for thread */
1946 GPtr->TarBlock = hint; /* set target block */
1947 }
1948
1949 // HFSCatalogFile...
1950 else if ( record.recordType == kHFSFileRecord )
1951 {
1952 smallCatalogFileP = (HFSCatalogFile *) &record;
1953 GPtr->TarID = smallCatalogFileP->fileID; /* target ID = file number */
1954 GPtr->CNType = record.recordType; /* target CNode type = thread */
1955 CopyCatalogName( (const CatalogName *) &foundKey.hfs.nodeName, &GPtr->CName, isHFSPlus ); /* target CName = directory name */
1956 filCnt++;
1957 if (dprP->directoryID == kHFSRootFolderID)
1958 rtfilCnt++;
1959 }
1960
1961 // Unknown/Bad record type
1962 else
1963 {
1964 M_DebugStr("\p Unknown-Bad record type");
1965 return( 123 );
1966 }
1967 }
1968
1969 //
1970 // if not same ParID or no record
1971 //
1972 else if ( (record.recordType == kHFSFileThreadRecord) || (record.recordType == kHFSPlusFileThreadRecord) ) /* it's a file thread, skip past it */
1973 {
1974 GPtr->TarID = parID; // target ID = file number
1975 GPtr->CNType = record.recordType; // target CNode type = thread
1976 GPtr->CName.ustr.length = 0; // no target CName
1977 }
1978
1979 else
1980 {
1981 resumeAtParent:
1982 GPtr->TarID = dprP->directoryID; /* target ID = current directory ID */
1983 GPtr->CNType = record.recordType; /* target CNode type = directory */
1984 CopyCatalogName( (const CatalogName *) &dprP->directoryName, &GPtr->CName, isHFSPlus ); // copy the string name
1985
1986 // re-locate current directory
1987 CopyCatalogName( (const CatalogName *) &dprP->directoryName, &catalogName, isHFSPlus );
1988 BuildCatalogKey( dprP->parentDirID, (const CatalogName *)&catalogName, isHFSPlus, &key );
1989 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, dprP->directoryHint, &foundKey, &record2, &recSize, &hint );
1990
1991 if ( result != noErr )
1992 {
1993 result = IntError(GPtr,result); /* error from BTSearch */
1994 return(result);
1995 }
1996 GPtr->TarBlock = hint; /* set target block */
1997
1998
1999 valence = isHFSPlus == true ? record2.hfsPlusFolder.valence : (UInt32)record2.hfsFolder.valence;
2000
2001 if ( valence != dprP->offspringIndex -1 ) /* check its valence */
2002 if ( ( result = RcdValErr( GPtr, E_DirVal, dprP->offspringIndex -1, valence, dprP->parentDirID ) ) )
2003 return( result );
2004
2005 GPtr->DirLevel--; /* move up a level */
2006
2007 if(GPtr->DirLevel > 0)
2008 {
2009 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
2010 GPtr->TarID = dprP->directoryID; /* target ID = current directory ID */
2011 GPtr->CNType = record.recordType; /* target CNode type = directory */
2012 CopyCatalogName( (const CatalogName *) &dprP->directoryName, &GPtr->CName, isHFSPlus );
2013 }
2014 }
2015 } // end while
2016
2017 //
2018 // verify directory and file counts (all nonfatal, repairable errors)
2019 //
2020 if (!isHFSPlus && (rtdirCnt != calculatedVCB->vcbNmRtDirs)) /* check count of dirs in root */
2021 if ( ( result = RcdValErr(GPtr,E_RtDirCnt,rtdirCnt,calculatedVCB->vcbNmRtDirs,0) ) )
2022 return( result );
2023
2024 if (!isHFSPlus && (rtfilCnt != calculatedVCB->vcbNmFls)) /* check count of files in root */
2025 if ( ( result = RcdValErr(GPtr,E_RtFilCnt,rtfilCnt,calculatedVCB->vcbNmFls,0) ) )
2026 return( result );
2027
2028 if (dirCnt != calculatedVCB->vcbFolderCount) /* check count of dirs in volume */
2029 if ( ( result = RcdValErr(GPtr,E_DirCnt,dirCnt,calculatedVCB->vcbFolderCount,0) ) )
2030 return( result );
2031
2032 if (filCnt != calculatedVCB->vcbFileCount) /* check count of files in volume */
2033 if ( ( result = RcdValErr(GPtr,E_FilCnt,filCnt,calculatedVCB->vcbFileCount,0) ) )
2034 return( result );
2035
2036 return( noErr );
2037
2038 } /* end of CatHChk */
2039
2040
2041
2042 /*------------------------------------------------------------------------------
2043
2044 Function: CreateAttributesBTreeControlBlock
2045
2046 Function: Create the calculated AttributesBTree Control Block
2047
2048 Input: GPtr - pointer to scavenger global area
2049
2050 Output: - 0 = no error
2051 n = error code
2052 ------------------------------------------------------------------------------*/
2053 OSErr CreateAttributesBTreeControlBlock( SGlobPtr GPtr )
2054 {
2055 OSErr err = 0;
2056 SInt32 size;
2057 UInt32 numABlks;
2058 BTreeControlBlock * btcb;
2059 SVCB * vcb;
2060 Boolean isHFSPlus;
2061 BTHeaderRec header;
2062 BlockDescriptor block;
2063
2064 // Set up
2065 isHFSPlus = VolumeObjectIsHFSPlus( );
2066 GPtr->TarID = kHFSAttributesFileID;
2067 GPtr->TarBlock = kHeaderNodeNum;
2068 block.buffer = NULL;
2069 btcb = GPtr->calculatedAttributesBTCB;
2070 vcb = GPtr->calculatedVCB;
2071
2072 //
2073 // check out allocation info for the Attributes File
2074 //
2075
2076 if (isHFSPlus)
2077 {
2078 HFSPlusVolumeHeader *volumeHeader;
2079
2080 err = GetVolumeObjectVHB( &block );
2081 if ( err != noErr )
2082 goto exit;
2083 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
2084
2085 CopyMemory( volumeHeader->attributesFile.extents, GPtr->calculatedAttributesFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) );
2086
2087 err = CheckFileExtents( GPtr, kHFSAttributesFileID, kDataFork, NULL, (void *)GPtr->calculatedAttributesFCB->fcbExtents32, &numABlks);
2088 if (err) goto exit;
2089
2090 if ( volumeHeader->attributesFile.totalBlocks != numABlks ) // check out the PEOF
2091 {
2092 RcdError( GPtr, E_CatPEOF );
2093 err = E_CatPEOF;
2094 goto exit;
2095 }
2096 else
2097 {
2098 GPtr->calculatedAttributesFCB->fcbLogicalSize = (UInt64) volumeHeader->attributesFile.logicalSize; // Set Attributes tree's LEOF
2099 GPtr->calculatedAttributesFCB->fcbPhysicalSize = (UInt64) volumeHeader->attributesFile.totalBlocks *
2100 (UInt64) volumeHeader->blockSize; // Set Attributes tree's PEOF
2101 }
2102
2103 //
2104 // See if we actually have an attributes BTree
2105 //
2106 if (numABlks == 0)
2107 {
2108 btcb->maxKeyLength = 0;
2109 btcb->keyCompareProc = 0;
2110 btcb->leafRecords = 0;
2111 btcb->nodeSize = 0;
2112 btcb->totalNodes = 0;
2113 btcb->freeNodes = 0;
2114 btcb->attributes = 0;
2115
2116 btcb->treeDepth = 0;
2117 btcb->rootNode = 0;
2118 btcb->firstLeafNode = 0;
2119 btcb->lastLeafNode = 0;
2120
2121 // GPtr->calculatedVCB->attributesRefNum = 0;
2122 GPtr->calculatedVCB->vcbAttributesFile = NULL;
2123 }
2124 else
2125 {
2126 // read the BTreeHeader from disk & also validate it's node size.
2127 err = GetBTreeHeader(GPtr, GPtr->calculatedAttributesFCB, &header);
2128 if (err) goto exit;
2129
2130 btcb->maxKeyLength = kAttributeKeyMaximumLength; // max key length
2131 btcb->keyCompareProc = (void *)CompareAttributeKeys;
2132 btcb->leafRecords = header.leafRecords;
2133 btcb->nodeSize = header.nodeSize;
2134 btcb->totalNodes = (UInt32)( GPtr->calculatedAttributesFCB->fcbPhysicalSize / btcb->nodeSize );
2135 btcb->freeNodes = btcb->totalNodes; // start with everything free
2136 btcb->attributes |=(kBTBigKeysMask + kBTVariableIndexKeysMask); // HFS+ Attributes files have large, variable-sized keys
2137
2138 btcb->treeDepth = header.treeDepth;
2139 btcb->rootNode = header.rootNode;
2140 btcb->firstLeafNode = header.firstLeafNode;
2141 btcb->lastLeafNode = header.lastLeafNode;
2142
2143 //
2144 // Make sure the header nodes size field is correct by looking at the 1st record offset
2145 //
2146 err = CheckNodesFirstOffset( GPtr, btcb );
2147 if (err) goto exit;
2148 }
2149 }
2150 else
2151 {
2152 btcb->maxKeyLength = 0;
2153 btcb->keyCompareProc = 0;
2154 btcb->leafRecords = 0;
2155 btcb->nodeSize = 0;
2156 btcb->totalNodes = 0;
2157 btcb->freeNodes = 0;
2158 btcb->attributes = 0;
2159
2160 btcb->treeDepth = 0;
2161 btcb->rootNode = 0;
2162 btcb->firstLeafNode = 0;
2163 btcb->lastLeafNode = 0;
2164
2165 GPtr->calculatedVCB->vcbAttributesFile = NULL;
2166 }
2167
2168 //
2169 // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes.
2170 //
2171 btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions
2172 if ( btcb->refCon == nil ) {
2173 err = R_NoMem;
2174 goto exit;
2175 }
2176
2177 if (btcb->totalNodes == 0)
2178 {
2179 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = nil;
2180 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = 0;
2181 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = 0;
2182 }
2183 else
2184 {
2185 if ( btcb->refCon == nil ) {
2186 err = R_NoMem;
2187 goto exit;
2188 }
2189 size = (btcb->totalNodes + 7) / 8; // size of BTree bit map
2190 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap
2191 if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil )
2192 {
2193 err = R_NoMem;
2194 goto exit;
2195 }
2196
2197 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is
2198 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes; // keep track of real free nodes for progress
2199 }
2200
2201 exit:
2202 if (block.buffer)
2203 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
2204
2205 return (err);
2206 }
2207
2208 /*
2209 * Function: RecordLastAttrBits
2210 *
2211 * Description:
2212 * Updates the Chinese Remainder Theorem buckets with extended attribute
2213 * information for the previous fileID stored in the global structure.
2214 *
2215 * Input:
2216 * GPtr - pointer to scavenger global area
2217 * * GPtr->lastAttrInfo.fileID - fileID of last attribute seen
2218 *
2219 * Output: Nothing
2220 */
2221 static void RecordLastAttrBits(SGlobPtr GPtr)
2222 {
2223 /* lastAttrInfo structure is initialized to zero and hence ignore
2224 * recording information for fileID = 0. fileIDs < 16 (except for
2225 * fileID = 2) can have extended attributes but do not have
2226 * corresponding entry in catalog Btree. Ignore recording these
2227 * fileIDs for Chinese Remainder Theorem buckets. Currently we only
2228 * set extended attributes for fileID = 1 among these fileIDs
2229 * and this can change in future (see 3984119)
2230 */
2231 if ((GPtr->lastAttrInfo.fileID == 0) ||
2232 ((GPtr->lastAttrInfo.fileID < kHFSFirstUserCatalogNodeID) &&
2233 (GPtr->lastAttrInfo.fileID != kHFSRootFolderID))) {
2234 return;
2235 }
2236
2237 if (GPtr->lastAttrInfo.hasSecurity == true) {
2238 /* fileID has both extended attribute and ACL */
2239 RecordXAttrBits(GPtr, kHFSHasAttributesMask | kHFSHasSecurityMask,
2240 GPtr->lastAttrInfo.fileID, kCalculatedAttributesRefNum);
2241 GPtr->lastAttrInfo.hasSecurity = false;
2242 } else {
2243 /* fileID only has extended attribute */
2244 RecordXAttrBits(GPtr, kHFSHasAttributesMask,
2245 GPtr->lastAttrInfo.fileID, kCalculatedAttributesRefNum);
2246 }
2247 }
2248
2249 /*
2250 * Function: setLastAttrAllocInfo
2251 *
2252 * Description:
2253 * Set the global structure of last extended attribute with
2254 * the allocation block information. Also set the isValid to true
2255 * to indicate that the data is valid and should be used to verify
2256 * allocation blocks.
2257 *
2258 * Input:
2259 * GPtr - pointer to scavenger global area
2260 * totalBlocks - total blocks allocated by the attribute
2261 * logicalSize - logical size of the attribute
2262 * calculatedBlocks - blocks accounted by the attribute in current extent
2263 *
2264 * Output: Nothing
2265 */
2266 static void setLastAttrAllocInfo(SGlobPtr GPtr, u_int32_t totalBlocks,
2267 u_int64_t logicalSize, u_int32_t calculatedTotalBlocks)
2268 {
2269 GPtr->lastAttrInfo.totalBlocks = totalBlocks;
2270 GPtr->lastAttrInfo.logicalSize = logicalSize;
2271 GPtr->lastAttrInfo.calculatedTotalBlocks = calculatedTotalBlocks;
2272 GPtr->lastAttrInfo.isValid = true;
2273 }
2274
2275 /*
2276 * Function: CheckLastAttrAllocation
2277 *
2278 * Description:
2279 * Checks the allocation block information stored for the last
2280 * extended attribute seen during extended attribute BTree traversal.
2281 * Always resets the information stored for last EA allocation.
2282 *
2283 * Input: GPtr - pointer to scavenger global area
2284 *
2285 * Output: int - function result:
2286 * zero - no error
2287 * non-zero - error
2288 */
2289 static int CheckLastAttrAllocation(SGlobPtr GPtr)
2290 {
2291 int result = 0;
2292 u_int64_t bytes;
2293
2294 if (GPtr->lastAttrInfo.isValid == true) {
2295 if (GPtr->lastAttrInfo.totalBlocks !=
2296 GPtr->lastAttrInfo.calculatedTotalBlocks) {
2297 result = RecordBadAllocation(GPtr->lastAttrInfo.fileID,
2298 GPtr->lastAttrInfo.attrname, kEAData,
2299 GPtr->lastAttrInfo.totalBlocks,
2300 GPtr->lastAttrInfo.calculatedTotalBlocks);
2301 } else {
2302 bytes = (u_int64_t)GPtr->lastAttrInfo.calculatedTotalBlocks *
2303 (u_int64_t)GPtr->calculatedVCB->vcbBlockSize;
2304 if (GPtr->lastAttrInfo.logicalSize > bytes) {
2305 result = RecordTruncation(GPtr->lastAttrInfo.fileID,
2306 GPtr->lastAttrInfo.attrname, kEAData,
2307 GPtr->lastAttrInfo.logicalSize, bytes);
2308 }
2309 }
2310
2311 /* Invalidate information in the global structure */
2312 GPtr->lastAttrInfo.isValid = false;
2313 }
2314
2315 return (result);
2316 }
2317
2318 /*------------------------------------------------------------------------------
2319 Function: CheckAttributeRecord
2320
2321 Description:
2322 This is call back function called for all leaf records in
2323 Attribute BTree during the verify and repair stage. The basic
2324 functionality of the function is same during verify and repair
2325 stages except that whenever it finds corruption, the verify
2326 stage prints message and the repair stage repairs it. In the verify
2327 stage, this function accounts for allocation blocks used
2328 by extent-based extended attributes and also updates the chinese
2329 remainder theorem buckets corresponding the extended attribute
2330 and security bit.
2331
2332 1. Only in the verify stage, if the fileID or attribute name of current
2333 extended attribute are not same as the previous attribute, check the
2334 allocation block counts for the previous attribute.
2335
2336 2. Only in the verify stage, If the fileID of current attribute is not
2337 same as the previous attribute, record the previous fileID information
2338 for Chinese Remainder Theorem.
2339
2340 3. For attribute type,
2341 kHFSPlusAttrForkData:
2342 ---------------------
2343 Do all of the following during verify stage and nothing in repair
2344 stage -
2345
2346 Check the start block for extended attribute from the key. If not
2347 zero, print error.
2348
2349 Account for blocks occupied by this extent and store the allocation
2350 information for this extent to check in future. Also update the
2351 last attribute information in the global structure.
2352
2353 kHFSPlusAttrExtents:
2354 --------------------
2355 If the current attribute's fileID is not same as previous fileID, or
2356 if the previous recordType is not a valid forkData or overflow extent
2357 record, report an error in verify stage or mark it for deletion in
2358 repair stage.
2359
2360 Do all of the following during verify stage and nothing in repair
2361 stage -
2362
2363 Check the start block for extended attribute from the key. If not
2364 equal to the total blocks seen uptil last attribtue, print error.
2365
2366 Account for blocks occupied by this extent. Update previous
2367 attribute allocation information with blocks seen in current
2368 extent. Also update last attribute block information in the global
2369 structure.
2370
2371 kHFSPlusAttrInlineData:
2372 -----------------------
2373 Only in the verify stage, check if the start block in the key is
2374 equal to zero. If not, print error.
2375
2376 Unknown type:
2377 -------------
2378 In verify stage, report error. In repair stage, mark the record
2379 to delete.
2380
2381 4. If a record is marked for deletion, delete the record.
2382
2383 5. Before exiting from the function, always do the following -
2384 a. Indicate if the extended attribute was an ACL
2385 b. Update previous fileID and recordType with current information.
2386 c. Update previous attribute name with current attribute name.
2387
2388 Input: GPtr - pointer to scavenger global area
2389 key - key for current attribute
2390 rec - attribute record
2391 reclen - length of the record
2392
2393 Output: int - function result:
2394 0 = no error
2395 n = error code
2396 ------------------------------------------------------------------------------*/
2397 int
2398 CheckAttributeRecord(SGlobPtr GPtr, const HFSPlusAttrKey *key, const HFSPlusAttrRecord *rec, UInt16 reclen)
2399 {
2400 int result = 0;
2401 unsigned char attrname[XATTR_MAXNAMELEN+1];
2402 size_t attrlen;
2403 u_int32_t blocks;
2404 u_int32_t fileID;
2405 struct attributeInfo *prevAttr;
2406 Boolean isSameAttr = true;
2407 Boolean doDelete = false;
2408 u_int16_t dfaStage = GetDFAStage();
2409
2410 /* Assert if volume is not HFS Plus */
2411 assert(VolumeObjectIsHFSPlus() == true);
2412
2413 prevAttr = &(GPtr->lastAttrInfo);
2414 fileID = key->fileID;
2415 /* Convert unicode attribute name to UTF-8 string */
2416 (void) utf_encodestr(key->attrName, key->attrNameLen * 2, attrname, &attrlen, sizeof(attrname));
2417 attrname[attrlen] = '\0';
2418
2419 /* Compare the current attribute to last attribute seen */
2420 if ((fileID != prevAttr->fileID) ||
2421 (strcmp((char *)attrname, (char *)prevAttr->attrname) != 0)) {
2422 isSameAttr = false;
2423 }
2424
2425 /* We check allocation block information and record EA information for
2426 * CRT bucket in verify stage and hence no need to do it again in
2427 * repair stage.
2428 */
2429 if (dfaStage == kVerifyStage) {
2430 /* Different attribute - check allocation block information */
2431 if (isSameAttr == false) {
2432 result = CheckLastAttrAllocation(GPtr);
2433 if (result) {
2434 goto update_out;
2435 }
2436 }
2437
2438 /* Different fileID - record information in CRT bucket */
2439 if (fileID != prevAttr->fileID) {
2440 RecordLastAttrBits(GPtr);
2441 }
2442 }
2443
2444 switch (rec->recordType) {
2445 case kHFSPlusAttrForkData: {
2446 /* Check start block only in verify stage to avoid printing message
2447 * in repair stage. Note that this corruption is not repairable
2448 * currently. Also check extents only in verify stage to avoid
2449 * false overlap extents error.
2450 */
2451
2452 if (dfaStage == kVerifyStage) {
2453 /* Start block in the key should be zero */
2454 if (key->startBlock != 0) {
2455 RcdError(GPtr, E_ABlkSt);
2456 result = E_ABlkSt;
2457 goto err_out;
2458 }
2459
2460 HFSPlusForkData forkData;
2461 memcpy((void*)(&forkData), (void*)(&rec->forkData.theFork), sizeof(HFSPlusForkData));
2462 /* Check the extent information and record overlapping extents, if any */
2463 result = CheckFileExtents (GPtr, fileID, kEAData, attrname,
2464 &forkData.extents, &blocks);
2465 if (result) {
2466 goto update_out;
2467 }
2468
2469 /* Store allocation information to check in future */
2470 (void) setLastAttrAllocInfo(GPtr, rec->forkData.theFork.totalBlocks,
2471 rec->forkData.theFork.logicalSize, blocks);
2472 }
2473 break;
2474 }
2475
2476 case kHFSPlusAttrExtents: {
2477 /* Different attribute/fileID or incorrect previous record type */
2478 if ((isSameAttr == false) ||
2479 ((prevAttr->recordType != kHFSPlusAttrExtents) &&
2480 (prevAttr->recordType != kHFSPlusAttrForkData))) {
2481 if (dfaStage == kRepairStage) {
2482 /* Delete record in repair stage */
2483 doDelete = true;
2484 } else {
2485 /* Report error in verify stage */
2486 RcdError(GPtr, E_AttrRec);
2487 GPtr->ABTStat |= S_AttrRec;
2488 goto err_out;
2489 }
2490 }
2491
2492 /* Check start block only in verify stage to avoid printing message
2493 * in repair stage. Note that this corruption is not repairable
2494 * currently. Also check extents only in verify stage to avoid
2495 * false overlap extents error.
2496 */
2497 if (dfaStage == kVerifyStage) {
2498 /* startBlock in the key should be equal to total blocks
2499 * seen uptil last attribute.
2500 */
2501 if (key->startBlock != prevAttr->calculatedTotalBlocks) {
2502 RcdError(GPtr, E_ABlkSt);
2503 result = E_ABlkSt;
2504 goto err_out;
2505 }
2506
2507 /* Check the extent information and record overlapping extents, if any */
2508 result = CheckFileExtents (GPtr, fileID, kEAData, attrname,
2509 rec->overflowExtents.extents, &blocks);
2510 if (result) {
2511 goto update_out;
2512 }
2513
2514 /* Increment the blocks seen uptil now for this attribute */
2515 prevAttr->calculatedTotalBlocks += blocks;
2516 }
2517 break;
2518 }
2519
2520 case kHFSPlusAttrInlineData: {
2521 /* Check start block only in verify stage to avoid printing message
2522 * in repair stage.
2523 */
2524 if (dfaStage == kVerifyStage) {
2525 /* Start block in the key should be zero */
2526 if (key->startBlock != 0) {
2527 RcdError(GPtr, E_ABlkSt);
2528 result = E_ABlkSt;
2529 goto err_out;
2530 }
2531 }
2532 break;
2533 }
2534
2535 default: {
2536 /* Unknown attribute record */
2537 if (dfaStage == kRepairStage) {
2538 /* Delete record in repair stage */
2539 doDelete = true;
2540 } else {
2541 /* Report error in verify stage */
2542 RcdError(GPtr, E_AttrRec);
2543 GPtr->ABTStat |= S_AttrRec;
2544 goto err_out;
2545 }
2546 break;
2547 }
2548 };
2549
2550 if (doDelete == true) {
2551 result = DeleteBTreeRecord(GPtr->calculatedAttributesFCB, key);
2552 DPRINTF (d_info|d_xattr, "%s: Deleting attribute %s for fileID %d, type = %d\n", __FUNCTION__, attrname, key->fileID, rec->recordType);
2553 if (result) {
2554 DPRINTF (d_error|d_xattr, "%s: Error in deleting record for %s for fileID %d, type = %d\n", __FUNCTION__, attrname, key->fileID, rec->recordType);
2555 }
2556
2557 /* Set flags to mark header and map dirty */
2558 GPtr->ABTStat |= S_BTH + S_BTM;
2559 goto err_out;
2560 }
2561
2562 update_out:
2563 /* Note that an ACL exists for this fileID */
2564 if (strcmp((char *)attrname, KAUTH_FILESEC_XATTR) == 0) {
2565 prevAttr->hasSecurity = true;
2566 }
2567
2568 /* Always update the last recordType, fileID and attribute name before exiting */
2569 prevAttr->recordType = rec->recordType;
2570 prevAttr->fileID = fileID;
2571 (void) strlcpy((char *)prevAttr->attrname, (char *)attrname, sizeof(prevAttr->attrname));
2572
2573 goto out;
2574
2575 err_out:
2576 /* If the current record is invalid/bogus, decide whether to update
2577 * fileID stored in global structure for future comparison based on the
2578 * previous fileID.
2579 * If the current bogus record's fileID is different from fileID of the
2580 * previous good record, we do not want to account for bogus fileID in
2581 * the Chinese Remainder Theorem when we see next good record.
2582 * Hence reset the fileID in global structure to dummy value. Example,
2583 * if the fileIDs are 10 15 20 and record with ID=15 is bogus, we do not
2584 * want to account for record with ID=15.
2585 * If the current bogus record's fileID is same as the fileID of the
2586 * previous good record, we want to account for this fileID in the
2587 * next good record we see after this bogus record. Hence do not
2588 * reset the fileID to dummy value. Example, if the records have fileID
2589 * 10 10 30 and the second record with ID=10 is bogus, we want to
2590 * account for ID=10 when we see record with ID=30.
2591 */
2592 if (prevAttr->fileID != fileID) {
2593 prevAttr->fileID = 0;
2594 }
2595
2596 out:
2597 return(result);
2598 }
2599
2600 /* Function: RecordXAttrBits
2601 *
2602 * Description:
2603 * This function increments the prime number buckets for the associated
2604 * prime bucket set based on the flags and btreetype to determine
2605 * the discrepancy between the attribute btree and catalog btree for
2606 * extended attribute data consistency. This function is based on
2607 * Chinese Remainder Theorem.
2608 *
2609 * Alogrithm:
2610 * 1. If none of kHFSHasAttributesMask or kHFSHasSecurity mask is set,
2611 * return.
2612 * 2. Based on btreetype and the flags, determine which prime number
2613 * bucket should be updated. Initialize pointers accordingly.
2614 * 3. Divide the fileID with pre-defined prime numbers. Store the
2615 * remainder.
2616 * 4. Increment each prime number bucket at an offset of the
2617 * corresponding remainder with one.
2618 *
2619 * Input: 1. GPtr - pointer to global scavenger area
2620 * 2. flags - can include kHFSHasAttributesMask and/or kHFSHasSecurityMask
2621 * 3. fileid - fileID for which particular extended attribute is seen
2622 * 4. btreetye - can be kHFSPlusCatalogRecord or kHFSPlusAttributeRecord
2623 * indicates which btree prime number bucket should be incremented
2624 *
2625 * Output: nil
2626 */
2627 void RecordXAttrBits(SGlobPtr GPtr, UInt16 flags, HFSCatalogNodeID fileid, UInt16 btreetype)
2628 {
2629 PrimeBuckets *cur_attr = NULL;
2630 PrimeBuckets *cur_sec = NULL;
2631
2632 if ( ((flags & kHFSHasAttributesMask) == 0) &&
2633 ((flags & kHFSHasSecurityMask) == 0) ) {
2634 /* No attributes exists for this fileID */
2635 goto out;
2636 }
2637
2638 /* Determine which bucket are we updating */
2639 if (btreetype == kCalculatedCatalogRefNum) {
2640 /* Catalog BTree buckets */
2641 if (flags & kHFSHasAttributesMask) {
2642 cur_attr = &(GPtr->CBTAttrBucket);
2643 GPtr->cat_ea_count++;
2644 }
2645 if (flags & kHFSHasSecurityMask) {
2646 cur_sec = &(GPtr->CBTSecurityBucket);
2647 GPtr->cat_acl_count++;
2648 }
2649 } else if (btreetype == kCalculatedAttributesRefNum) {
2650 /* Attribute BTree buckets */
2651 if (flags & kHFSHasAttributesMask) {
2652 cur_attr = &(GPtr->ABTAttrBucket);
2653 GPtr->attr_ea_count++;
2654 }
2655 if (flags & kHFSHasSecurityMask) {
2656 cur_sec = &(GPtr->ABTSecurityBucket);
2657 GPtr->attr_acl_count++;
2658 }
2659 } else {
2660 /* Incorrect btreetype found */
2661 goto out;
2662 }
2663
2664 if (cur_attr) {
2665 add_prime_bucket_uint32(cur_attr, fileid);
2666 }
2667
2668 if (cur_sec) {
2669 add_prime_bucket_uint32(cur_sec, fileid);
2670 }
2671
2672 out:
2673 return;
2674 }
2675
2676 /* Function: CompareXattrPrimeBuckets
2677 *
2678 * Description:
2679 * This function compares the prime number buckets for catalog btree
2680 * and attribute btree for the given attribute type (normal attribute
2681 * bit or security bit).
2682 *
2683 * Input: 1. GPtr - pointer to global scavenger area
2684 * 2. BitMask - indicate which attribute type should be compared.
2685 * can include kHFSHasAttributesMask and/or kHFSHasSecurityMask
2686 * Output: zero - buckets were compared successfully
2687 * non-zero - buckets were not compared
2688 */
2689 static int CompareXattrPrimeBuckets(SGlobPtr GPtr, UInt16 BitMask)
2690 {
2691 int result = 1;
2692 PrimeBuckets *cat; /* Catalog BTree */
2693 PrimeBuckets *attr; /* Attribute BTree */
2694
2695 /* Find the correct PrimeBuckets to compare */
2696 if (BitMask & kHFSHasAttributesMask) {
2697 /* Compare buckets for attribute bit */
2698 cat = &(GPtr->CBTAttrBucket);
2699 attr = &(GPtr->ABTAttrBucket);
2700 } else if (BitMask & kHFSHasSecurityMask) {
2701 /* Compare buckets for security bit */
2702 cat = &(GPtr->CBTSecurityBucket);
2703 attr = &(GPtr->ABTSecurityBucket);
2704 } else {
2705 plog ("%s: Incorrect BitMask found.\n", __FUNCTION__);
2706 goto out;
2707 }
2708
2709 result = compare_prime_buckets(cat, attr);
2710 if (result) {
2711 char catbtree[32], attrbtree[32];
2712 /* Unequal values found, set the error bit in ABTStat */
2713 if (BitMask & kHFSHasAttributesMask) {
2714 fsckPrint(GPtr->context, E_IncorrectAttrCount);
2715 sprintf (catbtree, "%u", GPtr->cat_ea_count);
2716 sprintf (attrbtree, "%u", GPtr->attr_ea_count);
2717 fsckPrint(GPtr->context, E_BadValue, attrbtree, catbtree);
2718 GPtr->ABTStat |= S_AttributeCount;
2719 } else {
2720 fsckPrint(GPtr->context, E_IncorrectSecurityCount);
2721 sprintf (catbtree, "%u", GPtr->cat_acl_count);
2722 sprintf (attrbtree, "%u", GPtr->attr_acl_count);
2723 fsckPrint (GPtr->context, E_BadValue, attrbtree, catbtree);
2724 GPtr->ABTStat |= S_SecurityCount;
2725 }
2726 }
2727
2728 result = 0;
2729
2730 out:
2731 return result;
2732 }
2733
2734 /*------------------------------------------------------------------------------
2735
2736 Function: AttrBTChk - (Attributes BTree Check)
2737
2738 Function: Verifies the attributes BTree structure.
2739
2740 Input: GPtr - pointer to scavenger global area
2741
2742 Output: ExtBTChk - function result:
2743 0 = no error
2744 n = error code
2745 ------------------------------------------------------------------------------*/
2746
2747 OSErr AttrBTChk( SGlobPtr GPtr )
2748 {
2749 OSErr err;
2750
2751 //
2752 // If this volume has no attributes BTree, then skip this check
2753 //
2754 if (GPtr->calculatedVCB->vcbAttributesFile == NULL)
2755 return noErr;
2756
2757 // Write the status message here to avoid potential confusion to user.
2758 fsckPrint(GPtr->context, hfsExtAttrBTCheck);
2759
2760 // Set up
2761 GPtr->TarID = kHFSAttributesFileID; // target = attributes file
2762 GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB
2763
2764 //
2765 // check out the BTree structure
2766 //
2767
2768 err = BTCheck( GPtr, kCalculatedAttributesRefNum, (CheckLeafRecordProcPtr)CheckAttributeRecord);
2769 ReturnIfError( err ); // invalid attributes file BTree
2770
2771 // check the allocation block information about the last attribute
2772 err = CheckLastAttrAllocation(GPtr);
2773 ReturnIfError(err);
2774
2775 // record the last fileID for Chinese Remainder Theorem comparison
2776 RecordLastAttrBits(GPtr);
2777
2778 // compare the attributes prime buckets calculated from catalog btree and attribute btree
2779 err = CompareXattrPrimeBuckets(GPtr, kHFSHasAttributesMask);
2780 ReturnIfError( err );
2781
2782 // compare the security prime buckets calculated from catalog btree and attribute btree
2783 err = CompareXattrPrimeBuckets(GPtr, kHFSHasSecurityMask);
2784 ReturnIfError( err );
2785
2786 //
2787 // check out the allocation map structure
2788 //
2789
2790 err = BTMapChk( GPtr, kCalculatedAttributesRefNum );
2791 ReturnIfError( err ); // Invalid attributes BTree map
2792
2793 //
2794 // Make sure unused nodes in the B-tree are zero filled.
2795 //
2796 err = BTCheckUnusedNodes(GPtr, kCalculatedAttributesRefNum, &GPtr->ABTStat);
2797 ReturnIfError( err );
2798
2799 //
2800 // compare BTree header record on disk with scavenger's BTree header record
2801 //
2802
2803 err = CmpBTH( GPtr, kCalculatedAttributesRefNum );
2804 ReturnIfError( err );
2805
2806 //
2807 // compare BTree map on disk with scavenger's BTree map
2808 //
2809
2810 err = CmpBTM( GPtr, kCalculatedAttributesRefNum );
2811
2812 return( err );
2813 }
2814
2815
2816 /*------------------------------------------------------------------------------
2817
2818 Name: RcdValErr - (Record Valence Error)
2819
2820 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
2821 list, to describe an incorrect valence count for possible repair.
2822
2823 Input: GPtr - ptr to scavenger global data
2824 type - error code (E_xxx), which should be >0
2825 correct - the correct valence, as computed here
2826 incorrect - the incorrect valence as found in volume
2827 parid - the parent id, if S_Valence error
2828
2829 Output: 0 - no error
2830 R_NoMem - not enough mem to allocate record
2831 ------------------------------------------------------------------------------*/
2832
2833 static int RcdValErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID parid ) /* the ParID, if needed */
2834 {
2835 RepairOrderPtr p; /* the new node we compile */
2836 SInt16 n; /* size of node we allocate */
2837 Boolean isHFSPlus;
2838 char goodStr[32], badStr[32];
2839
2840 isHFSPlus = VolumeObjectIsHFSPlus( );
2841 fsckPrint(GPtr->context, type);
2842 sprintf(goodStr, "%u", correct);
2843 sprintf(badStr, "%u", incorrect);
2844 fsckPrint(GPtr->context, E_BadValue, goodStr, badStr);
2845
2846 if (type == E_DirVal) /* if normal directory valence error */
2847 n = CatalogNameSize( &GPtr->CName, isHFSPlus);
2848 else
2849 n = 0; /* other errors don't need the name */
2850
2851 p = AllocMinorRepairOrder( GPtr,n ); /* get the node */
2852 if (p==NULL) /* quit if out of room */
2853 return (R_NoMem);
2854
2855 p->type = type; /* save error info */
2856 p->correct = correct;
2857 p->incorrect = incorrect;
2858 p->parid = parid;
2859
2860 if ( n != 0 ) /* if name needed */
2861 CopyCatalogName( (const CatalogName *) &GPtr->CName, (CatalogName*)&p->name, isHFSPlus );
2862
2863 GPtr->CatStat |= S_Valence; /* set flag to trigger repair */
2864
2865 return( noErr ); /* successful return */
2866 }
2867
2868 /*------------------------------------------------------------------------------
2869
2870 Name: RcdHsFldCntErr - (Record HasFolderCount)
2871
2872 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
2873 list, to describe folder flag missing the HasFolderCount bit
2874
2875 Input: GPtr - ptr to scavenger global data
2876 type - error code (E_xxx), which should be >0
2877 correct - the folder mask, as computed here
2878 incorrect - the folder mask, as found in volume
2879 fid - the folder id
2880
2881 Output: 0 - no error
2882 R_NoMem - not enough mem to allocate record
2883 ------------------------------------------------------------------------------*/
2884
2885 int RcdHsFldCntErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID fid )
2886 {
2887 RepairOrderPtr p; /* the new node we compile */
2888 char goodStr[32], badStr[32];
2889 fsckPrint(GPtr->context, type, fid);
2890 sprintf(goodStr, "%#x", correct);
2891 sprintf(badStr, "%#x", incorrect);
2892 fsckPrint(GPtr->context, E_BadValue, goodStr, badStr);
2893
2894 p = AllocMinorRepairOrder( GPtr,0 ); /* get the node */
2895 if (p==NULL) /* quit if out of room */
2896 return (R_NoMem);
2897
2898 p->type = type; /* save error info */
2899 p->correct = correct;
2900 p->incorrect = incorrect;
2901 p->parid = fid;
2902
2903 return( noErr ); /* successful return */
2904 }
2905 /*------------------------------------------------------------------------------
2906
2907 Name: RcdFCntErr - (Record Folder Count)
2908
2909 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
2910 list, to describe an incorrect folder count for possible repair.
2911
2912 Input: GPtr - ptr to scavenger global data
2913 type - error code (E_xxx), which should be >0
2914 correct - the correct folder count, as computed here
2915 incorrect - the incorrect folder count as found in volume
2916 fid - the folder id
2917
2918 Output: 0 - no error
2919 R_NoMem - not enough mem to allocate record
2920 ------------------------------------------------------------------------------*/
2921
2922 int RcdFCntErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID fid )
2923 {
2924 RepairOrderPtr p; /* the new node we compile */
2925 char goodStr[32], badStr[32];
2926
2927 fsckPrint(GPtr->context, type, fid);
2928 sprintf(goodStr, "%u", correct);
2929 sprintf(badStr, "%u", incorrect);
2930 fsckPrint(GPtr->context, E_BadValue, goodStr, badStr);
2931
2932 p = AllocMinorRepairOrder( GPtr,0 ); /* get the node */
2933 if (p==NULL) /* quit if out of room */
2934 return (R_NoMem);
2935
2936 p->type = type; /* save error info */
2937 p->correct = correct;
2938 p->incorrect = incorrect;
2939 p->parid = fid;
2940
2941 return( noErr ); /* successful return */
2942 }
2943
2944 /*------------------------------------------------------------------------------
2945
2946 Name: RcdMDBAllocationBlockStartErr - (Record Allocation Block Start Error)
2947
2948 Function: Allocates a RepairOrder node and linking it into the 'GPtr->RepairP'
2949 list, to describe the error for possible repair.
2950
2951 Input: GPtr - ptr to scavenger global data
2952 type - error code (E_xxx), which should be >0
2953 correct - the correct valence, as computed here
2954 incorrect - the incorrect valence as found in volume
2955
2956 Output: 0 - no error
2957 R_NoMem - not enough mem to allocate record
2958 ------------------------------------------------------------------------------*/
2959
2960 static OSErr RcdMDBEmbededVolDescriptionErr( SGlobPtr GPtr, OSErr type, HFSMasterDirectoryBlock *mdb )
2961 {
2962 RepairOrderPtr p; // the new node we compile
2963 EmbededVolDescription *desc;
2964
2965 RcdError( GPtr, type ); // first, record the error
2966
2967 p = AllocMinorRepairOrder( GPtr, sizeof(EmbededVolDescription) ); // get the node
2968 if ( p == nil ) return( R_NoMem );
2969
2970 p->type = type; // save error info
2971 desc = (EmbededVolDescription *) &(p->name);
2972 desc->drAlBlSt = mdb->drAlBlSt;
2973 desc->drEmbedSigWord = mdb->drEmbedSigWord;
2974 desc->drEmbedExtent.startBlock = mdb->drEmbedExtent.startBlock;
2975 desc->drEmbedExtent.blockCount = mdb->drEmbedExtent.blockCount;
2976
2977 GPtr->VIStat |= S_InvalidWrapperExtents; // set flag to trigger repair
2978
2979 return( noErr ); // successful return
2980 }
2981
2982
2983 #if 0 // not used at this time
2984 /*------------------------------------------------------------------------------
2985
2986 Name: RcdInvalidWrapperExtents - (Record Invalid Wrapper Extents)
2987
2988 Function: Allocates a RepairOrder node and linking it into the 'GPtr->RepairP'
2989 list, to describe the error for possible repair.
2990
2991 Input: GPtr - ptr to scavenger global data
2992 type - error code (E_xxx), which should be >0
2993 correct - the correct valence, as computed here
2994 incorrect - the incorrect valence as found in volume
2995
2996 Output: 0 - no error
2997 R_NoMem - not enough mem to allocate record
2998 ------------------------------------------------------------------------------*/
2999
3000 static OSErr RcdInvalidWrapperExtents( SGlobPtr GPtr, OSErr type )
3001 {
3002 RepairOrderPtr p; // the new node we compile
3003
3004 RcdError( GPtr, type ); // first, record the error
3005
3006 p = AllocMinorRepairOrder( GPtr, 0 ); // get the node
3007 if ( p == nil ) return( R_NoMem );
3008
3009 p->type = type; // save error info
3010
3011 GPtr->VIStat |= S_BadMDBdrAlBlSt; // set flag to trigger repair
3012
3013 return( noErr ); // successful return
3014 }
3015 #endif
3016
3017
3018 #if 0 // We just check and fix them in SRepair.c
3019 /*------------------------------------------------------------------------------
3020
3021 Name: RcdOrphanedExtentErr
3022
3023 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
3024 list, to describe an locked volume name for possible repair.
3025
3026 Input: GPtr - ptr to scavenger global data
3027 type - error code (E_xxx), which should be >0
3028 incorrect - the incorrect file flags as found in file record
3029
3030 Output: 0 - no error
3031 R_NoMem - not enough mem to allocate record
3032 ------------------------------------------------------------------------------*/
3033
3034 static OSErr RcdOrphanedExtentErr ( SGlobPtr GPtr, SInt16 type, void *theKey )
3035 {
3036 RepairOrderPtr p; /* the new node we compile */
3037 SInt16 n; /* size of node we allocate */
3038 Boolean isHFSPlus;
3039
3040 isHFSPlus = VolumeObjectIsHFSPlus( );
3041 RcdError( GPtr,type ); /* first, record the error */
3042
3043 if ( isHFSPlus )
3044 n = sizeof( HFSPlusExtentKey );
3045 else
3046 n = sizeof( HFSExtentKey );
3047
3048 p = AllocMinorRepairOrder( GPtr, n ); /* get the node */
3049 if ( p == NULL ) /* quit if out of room */
3050 return( R_NoMem );
3051
3052 CopyMemory( theKey, p->name, n ); /* copy in the key */
3053
3054 p->type = type; /* save error info */
3055
3056 GPtr->EBTStat |= S_OrphanedExtent; /* set flag to trigger repair */
3057
3058 return( noErr ); /* successful return */
3059 }
3060 #endif
3061
3062
3063 /*------------------------------------------------------------------------------
3064
3065 Function: VInfoChk - (Volume Info Check)
3066
3067 Function: Verifies volume level information.
3068
3069 Input: GPtr - pointer to scavenger global area
3070
3071 Output: VInfoChk - function result:
3072 0 = no error
3073 n = error code
3074 ------------------------------------------------------------------------------*/
3075
3076 OSErr VInfoChk( SGlobPtr GPtr )
3077 {
3078 OSErr result;
3079 UInt16 recSize;
3080 Boolean isHFSPlus;
3081 UInt32 hint;
3082 UInt64 maxClump;
3083 SVCB *vcb;
3084 VolumeObjectPtr myVOPtr;
3085 CatalogRecord record;
3086 CatalogKey foundKey;
3087 BlockDescriptor altBlock;
3088 BlockDescriptor priBlock;
3089
3090 vcb = GPtr->calculatedVCB;
3091 altBlock.buffer = priBlock.buffer = NULL;
3092 isHFSPlus = VolumeObjectIsHFSPlus( );
3093 myVOPtr = GetVolumeObjectPtr( );
3094
3095 // locate the catalog record for the root directoryÉ
3096 result = GetBTreeRecord( GPtr->calculatedCatalogFCB, 0x8001, &foundKey, &record, &recSize, &hint );
3097 GPtr->TarID = kHFSCatalogFileID; /* target = catalog */
3098 GPtr->TarBlock = hint; /* target block = returned hint */
3099 if ( result != noErr )
3100 {
3101 result = IntError( GPtr, result );
3102 return( result );
3103 }
3104
3105 GPtr->TarID = AMDB_FNum; // target = alternate MDB or VHB
3106 GetVolumeObjectAlternateBlockNum( &GPtr->TarBlock );
3107 result = GetVolumeObjectAlternateBlock( &altBlock );
3108
3109 // invalidate if we have not marked the alternate as OK
3110 if ( isHFSPlus ) {
3111 if ( (myVOPtr->flags & kVO_AltVHBOK) == 0 )
3112 result = badMDBErr;
3113 }
3114 else if ( (myVOPtr->flags & kVO_AltMDBOK) == 0 ) {
3115 result = badMDBErr;
3116 }
3117 if ( result != noErr ) {
3118 GPtr->VIStat = GPtr->VIStat | S_MDB;
3119 if ( VolumeObjectIsHFS( ) ) {
3120 WriteError( GPtr, E_MDBDamaged, 0, 0 );
3121 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3122 plog("\tinvalid alternate MDB at %qd result %d \n", GPtr->TarBlock, result);
3123 }
3124 else {
3125 WriteError( GPtr, E_VolumeHeaderDamaged, 0, 0 );
3126 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3127 plog("\tinvalid alternate VHB at %qd result %d \n", GPtr->TarBlock, result);
3128 }
3129 result = noErr;
3130 goto exit;
3131 }
3132
3133 GPtr->TarID = MDB_FNum; // target = primary MDB or VHB
3134 GetVolumeObjectPrimaryBlockNum( &GPtr->TarBlock );
3135 result = GetVolumeObjectPrimaryBlock( &priBlock );
3136
3137 // invalidate if we have not marked the primary as OK
3138 if ( isHFSPlus ) {
3139 if ( (myVOPtr->flags & kVO_PriVHBOK) == 0 )
3140 result = badMDBErr;
3141 }
3142 else if ( (myVOPtr->flags & kVO_PriMDBOK) == 0 ) {
3143 result = badMDBErr;
3144 }
3145 if ( result != noErr ) {
3146 GPtr->VIStat = GPtr->VIStat | S_MDB;
3147 if ( VolumeObjectIsHFS( ) ) {
3148 WriteError( GPtr, E_MDBDamaged, 1, 0 );
3149 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3150 plog("\tinvalid primary MDB at %qd result %d \n", GPtr->TarBlock, result);
3151 }
3152 else {
3153 WriteError( GPtr, E_VolumeHeaderDamaged, 1, 0 );
3154 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3155 plog("\tinvalid primary VHB at %qd result %d \n", GPtr->TarBlock, result);
3156 }
3157 result = noErr;
3158 goto exit;
3159 }
3160
3161 // check to see that embedded HFS plus volumes still have both (alternate and primary) MDBs
3162 if ( VolumeObjectIsEmbeddedHFSPlus( ) &&
3163 ( (myVOPtr->flags & kVO_PriMDBOK) == 0 || (myVOPtr->flags & kVO_AltMDBOK) == 0 ) )
3164 {
3165 GPtr->VIStat |= S_WMDB;
3166 WriteError( GPtr, E_MDBDamaged, 0, 0 );
3167 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3168 plog("\tinvalid wrapper MDB \n");
3169 }
3170
3171 if ( isHFSPlus )
3172 {
3173 HFSPlusVolumeHeader * volumeHeader;
3174 HFSPlusVolumeHeader * alternateVolumeHeader;
3175
3176 alternateVolumeHeader = (HFSPlusVolumeHeader *) altBlock.buffer;
3177 volumeHeader = (HFSPlusVolumeHeader *) priBlock.buffer;
3178
3179 maxClump = (UInt64) (vcb->vcbTotalBlocks / 4) * vcb->vcbBlockSize; /* max clump = 1/4 volume size */
3180
3181 // check out creation and last mod dates
3182 vcb->vcbCreateDate = alternateVolumeHeader->createDate; // use creation date in alt MDB
3183 vcb->vcbModifyDate = volumeHeader->modifyDate; // don't change last mod date
3184 vcb->vcbCheckedDate = volumeHeader->checkedDate; // don't change checked date
3185
3186 // 3882639: Removed check for volume attributes in HFS Plus
3187 vcb->vcbAttributes = volumeHeader->attributes;
3188
3189 // verify allocation map ptr
3190 if ( volumeHeader->nextAllocation < vcb->vcbTotalBlocks )
3191 vcb->vcbNextAllocation = volumeHeader->nextAllocation;
3192 else
3193 vcb->vcbNextAllocation = 0;
3194
3195 // verify default clump sizes
3196 if ( (volumeHeader->rsrcClumpSize > 0) &&
3197 (volumeHeader->rsrcClumpSize <= kMaxClumpSize) &&
3198 ((volumeHeader->rsrcClumpSize % vcb->vcbBlockSize) == 0) )
3199 vcb->vcbRsrcClumpSize = volumeHeader->rsrcClumpSize;
3200 else if ( (alternateVolumeHeader->rsrcClumpSize > 0) &&
3201 (alternateVolumeHeader->rsrcClumpSize <= kMaxClumpSize) &&
3202 ((alternateVolumeHeader->rsrcClumpSize % vcb->vcbBlockSize) == 0) )
3203 vcb->vcbRsrcClumpSize = alternateVolumeHeader->rsrcClumpSize;
3204 else if (4ULL * vcb->vcbBlockSize <= kMaxClumpSize)
3205 vcb->vcbRsrcClumpSize = 4 * vcb->vcbBlockSize;
3206 else
3207 vcb->vcbRsrcClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3208
3209 if ( vcb->vcbRsrcClumpSize > kMaxClumpSize )
3210 vcb->vcbRsrcClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3211
3212 if ( (volumeHeader->dataClumpSize > 0) && (volumeHeader->dataClumpSize <= kMaxClumpSize) &&
3213 ((volumeHeader->dataClumpSize % vcb->vcbBlockSize) == 0) )
3214 vcb->vcbDataClumpSize = volumeHeader->dataClumpSize;
3215 else if ( (alternateVolumeHeader->dataClumpSize > 0) &&
3216 (alternateVolumeHeader->dataClumpSize <= kMaxClumpSize) &&
3217 ((alternateVolumeHeader->dataClumpSize % vcb->vcbBlockSize) == 0) )
3218 vcb->vcbDataClumpSize = alternateVolumeHeader->dataClumpSize;
3219 else if (4ULL * vcb->vcbBlockSize <= kMaxClumpSize)
3220 vcb->vcbDataClumpSize = 4 * vcb->vcbBlockSize;
3221 else
3222 vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3223
3224 if ( vcb->vcbDataClumpSize > kMaxClumpSize )
3225 vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3226
3227 /* Verify next CNode ID.
3228 * If volumeHeader->nextCatalogID < vcb->vcbNextCatalogID, probably
3229 * nextCatalogID has wrapped around.
3230 * If volumeHeader->nextCatalogID > vcb->vcbNextCatalogID, probably
3231 * many files were created and deleted, followed by no new file
3232 * creation.
3233 */
3234 if ( (volumeHeader->nextCatalogID > vcb->vcbNextCatalogID) )
3235 vcb->vcbNextCatalogID = volumeHeader->nextCatalogID;
3236
3237 //¥¥TBD location and unicode? volumename
3238 // verify the volume name
3239 result = ChkCName( GPtr, (const CatalogName*) &foundKey.hfsPlus.nodeName, isHFSPlus );
3240
3241 // verify last backup date and backup seqence number
3242 vcb->vcbBackupDate = volumeHeader->backupDate; /* don't change last backup date */
3243
3244 // verify write count
3245 vcb->vcbWriteCount = volumeHeader->writeCount; /* don't change write count */
3246
3247 // check out extent file clump size
3248 if ( ((volumeHeader->extentsFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3249 (volumeHeader->extentsFile.clumpSize <= maxClump) )
3250 vcb->vcbExtentsFile->fcbClumpSize = volumeHeader->extentsFile.clumpSize;
3251 else if ( ((alternateVolumeHeader->extentsFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3252 (alternateVolumeHeader->extentsFile.clumpSize <= maxClump) )
3253 vcb->vcbExtentsFile->fcbClumpSize = alternateVolumeHeader->extentsFile.clumpSize;
3254 else
3255 vcb->vcbExtentsFile->fcbClumpSize =
3256 (alternateVolumeHeader->extentsFile.extents[0].blockCount * vcb->vcbBlockSize);
3257
3258 // check out catalog file clump size
3259 if ( ((volumeHeader->catalogFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3260 (volumeHeader->catalogFile.clumpSize <= maxClump) )
3261 vcb->vcbCatalogFile->fcbClumpSize = volumeHeader->catalogFile.clumpSize;
3262 else if ( ((alternateVolumeHeader->catalogFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3263 (alternateVolumeHeader->catalogFile.clumpSize <= maxClump) )
3264 vcb->vcbCatalogFile->fcbClumpSize = alternateVolumeHeader->catalogFile.clumpSize;
3265 else
3266 vcb->vcbCatalogFile->fcbClumpSize =
3267 (alternateVolumeHeader->catalogFile.extents[0].blockCount * vcb->vcbBlockSize);
3268
3269 // check out allocations file clump size
3270 if ( ((volumeHeader->allocationFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3271 (volumeHeader->allocationFile.clumpSize <= maxClump) )
3272 vcb->vcbAllocationFile->fcbClumpSize = volumeHeader->allocationFile.clumpSize;
3273 else if ( ((alternateVolumeHeader->allocationFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3274 (alternateVolumeHeader->allocationFile.clumpSize <= maxClump) )
3275 vcb->vcbAllocationFile->fcbClumpSize = alternateVolumeHeader->allocationFile.clumpSize;
3276 else
3277 vcb->vcbAllocationFile->fcbClumpSize =
3278 (alternateVolumeHeader->allocationFile.extents[0].blockCount * vcb->vcbBlockSize);
3279
3280 // check out attribute file clump size
3281 if (vcb->vcbAttributesFile) {
3282 if ( ((volumeHeader->attributesFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3283 (volumeHeader->attributesFile.clumpSize <= maxClump) &&
3284 (volumeHeader->attributesFile.clumpSize != 0))
3285 vcb->vcbAttributesFile->fcbClumpSize = volumeHeader->attributesFile.clumpSize;
3286 else if ( ((alternateVolumeHeader->attributesFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3287 (alternateVolumeHeader->attributesFile.clumpSize <= maxClump) &&
3288 (alternateVolumeHeader->attributesFile.clumpSize != 0))
3289 vcb->vcbAttributesFile->fcbClumpSize = alternateVolumeHeader->attributesFile.clumpSize;
3290 else if (vcb->vcbCatalogFile->fcbClumpSize != 0)
3291 // The original attribute clump may be too small, use catalog's
3292 vcb->vcbAttributesFile->fcbClumpSize = vcb->vcbCatalogFile->fcbClumpSize;
3293 else
3294 vcb->vcbAttributesFile->fcbClumpSize =
3295 alternateVolumeHeader->attributesFile.extents[0].blockCount * vcb->vcbBlockSize;
3296 }
3297
3298 CopyMemory( volumeHeader->finderInfo, vcb->vcbFinderInfo, sizeof(vcb->vcbFinderInfo) );
3299
3300 // Now compare verified Volume Header info (in the form of a vcb) with Volume Header info on disk
3301 result = CompareVolumeHeader( GPtr, volumeHeader );
3302
3303 // check to see that embedded volume info is correct in both wrapper MDBs
3304 CheckEmbeddedVolInfoInMDBs( GPtr );
3305
3306 }
3307 else // HFS
3308 {
3309 HFSMasterDirectoryBlock *mdbP;
3310 HFSMasterDirectoryBlock *alternateMDB;
3311
3312 //
3313 // get volume name from BTree Key
3314 //
3315
3316 alternateMDB = (HFSMasterDirectoryBlock *) altBlock.buffer;
3317 mdbP = (HFSMasterDirectoryBlock *) priBlock.buffer;
3318
3319 maxClump = (UInt64) (vcb->vcbTotalBlocks / 4) * vcb->vcbBlockSize; /* max clump = 1/4 volume size */
3320
3321 // check out creation and last mod dates
3322 vcb->vcbCreateDate = alternateMDB->drCrDate; /* use creation date in alt MDB */
3323 vcb->vcbModifyDate = mdbP->drLsMod; /* don't change last mod date */
3324
3325 // verify volume attribute flags
3326 if ( (mdbP->drAtrb & VAtrb_Msk) == 0 )
3327 vcb->vcbAttributes = mdbP->drAtrb;
3328 else
3329 vcb->vcbAttributes = VAtrb_DFlt;
3330
3331 // verify allocation map ptr
3332 if ( mdbP->drAllocPtr < vcb->vcbTotalBlocks )
3333 vcb->vcbNextAllocation = mdbP->drAllocPtr;
3334 else
3335 vcb->vcbNextAllocation = 0;
3336
3337 // verify default clump size
3338 if ( (mdbP->drClpSiz > 0) &&
3339 (mdbP->drClpSiz <= maxClump) &&
3340 ((mdbP->drClpSiz % vcb->vcbBlockSize) == 0) )
3341 vcb->vcbDataClumpSize = mdbP->drClpSiz;
3342 else if ( (alternateMDB->drClpSiz > 0) &&
3343 (alternateMDB->drClpSiz <= maxClump) &&
3344 ((alternateMDB->drClpSiz % vcb->vcbBlockSize) == 0) )
3345 vcb->vcbDataClumpSize = alternateMDB->drClpSiz;
3346 else
3347 vcb->vcbDataClumpSize = 4 * vcb->vcbBlockSize;
3348
3349 if ( vcb->vcbDataClumpSize > kMaxClumpSize )
3350 vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3351
3352 // verify next CNode ID
3353 if ( (mdbP->drNxtCNID > vcb->vcbNextCatalogID) && (mdbP->drNxtCNID <= (vcb->vcbNextCatalogID + 4096)) )
3354 vcb->vcbNextCatalogID = mdbP->drNxtCNID;
3355
3356 // verify the volume name
3357 result = ChkCName( GPtr, (const CatalogName*) &vcb->vcbVN, isHFSPlus );
3358 if ( result == noErr )
3359 if ( CmpBlock( mdbP->drVN, vcb->vcbVN, vcb->vcbVN[0] + 1 ) == 0 )
3360 CopyMemory( mdbP->drVN, vcb->vcbVN, kHFSMaxVolumeNameChars + 1 ); /* ...we have a good one */
3361
3362 // verify last backup date and backup seqence number
3363 vcb->vcbBackupDate = mdbP->drVolBkUp; /* don't change last backup date */
3364 vcb->vcbVSeqNum = mdbP->drVSeqNum; /* don't change last backup sequence # */
3365
3366 // verify write count
3367 vcb->vcbWriteCount = mdbP->drWrCnt; /* don't change write count */
3368
3369 // check out extent file and catalog clump sizes
3370 if ( ((mdbP->drXTClpSiz % vcb->vcbBlockSize) == 0) && (mdbP->drXTClpSiz <= maxClump) )
3371 vcb->vcbExtentsFile->fcbClumpSize = mdbP->drXTClpSiz;
3372 else if ( ((alternateMDB->drXTClpSiz % vcb->vcbBlockSize) == 0) && (alternateMDB->drXTClpSiz <= maxClump) )
3373 vcb->vcbExtentsFile->fcbClumpSize = alternateMDB->drXTClpSiz;
3374 else
3375 vcb->vcbExtentsFile->fcbClumpSize = (alternateMDB->drXTExtRec[0].blockCount * vcb->vcbBlockSize);
3376
3377 if ( ((mdbP->drCTClpSiz % vcb->vcbBlockSize) == 0) && (mdbP->drCTClpSiz <= maxClump) )
3378 vcb->vcbCatalogFile->fcbClumpSize = mdbP->drCTClpSiz;
3379 else if ( ((alternateMDB->drCTClpSiz % vcb->vcbBlockSize) == 0) && (alternateMDB->drCTClpSiz <= maxClump) )
3380 vcb->vcbCatalogFile->fcbClumpSize = alternateMDB->drCTClpSiz;
3381 else
3382 vcb->vcbCatalogFile->fcbClumpSize = (alternateMDB->drCTExtRec[0].blockCount * vcb->vcbBlockSize);
3383
3384 // just copy Finder info for now
3385 CopyMemory(mdbP->drFndrInfo, vcb->vcbFinderInfo, sizeof(mdbP->drFndrInfo));
3386
3387 // now compare verified MDB info with MDB info on disk
3388 result = CmpMDB( GPtr, mdbP);
3389 }
3390
3391 exit:
3392 if (priBlock.buffer)
3393 (void) ReleaseVolumeBlock(vcb, &priBlock, kReleaseBlock);
3394 if (altBlock.buffer)
3395 (void) ReleaseVolumeBlock(vcb, &altBlock, kReleaseBlock);
3396
3397 return (result);
3398
3399 } /* end of VInfoChk */
3400
3401
3402 /*------------------------------------------------------------------------------
3403
3404 Function: VLockedChk - (Volume Name Locked Check)
3405
3406 Function: Makes sure the volume name isn't locked. If it is locked, generate a repair order.
3407
3408 This function is not called if file sharing is operating.
3409
3410 Input: GPtr - pointer to scavenger global area
3411
3412 Output: VInfoChk - function result:
3413 0 = no error
3414 n = error code
3415 ------------------------------------------------------------------------------*/
3416
3417 OSErr VLockedChk( SGlobPtr GPtr )
3418 {
3419 UInt32 hint;
3420 CatalogKey foundKey;
3421 CatalogRecord record;
3422 UInt16 recSize;
3423 OSErr result;
3424 UInt16 frFlags;
3425 Boolean isHFSPlus;
3426 SVCB *calculatedVCB = GPtr->calculatedVCB;
3427 VolumeObjectPtr myVOPtr;
3428
3429 myVOPtr = GetVolumeObjectPtr( );
3430 isHFSPlus = VolumeObjectIsHFSPlus( );
3431 GPtr->TarID = kHFSCatalogFileID; /* target = catalog file */
3432 GPtr->TarBlock = 0; /* no target block yet */
3433
3434 //
3435 // locate the catalog record for the root directory
3436 //
3437 result = GetBTreeRecord( GPtr->calculatedCatalogFCB, 0x8001, &foundKey, &record, &recSize, &hint );
3438
3439 if ( result)
3440 {
3441 RcdError( GPtr, E_EntryNotFound );
3442 return( E_EntryNotFound );
3443 }
3444
3445 // put the volume name in the VCB
3446 if ( isHFSPlus == false )
3447 {
3448 CopyMemory( foundKey.hfs.nodeName, calculatedVCB->vcbVN, sizeof(calculatedVCB->vcbVN) );
3449 }
3450 else if ( myVOPtr->volumeType != kPureHFSPlusVolumeType )
3451 {
3452 HFSMasterDirectoryBlock *mdbP;
3453 BlockDescriptor block;
3454
3455 block.buffer = NULL;
3456 if ( (myVOPtr->flags & kVO_PriMDBOK) != 0 )
3457 result = GetVolumeObjectPrimaryMDB( &block );
3458 else
3459 result = GetVolumeObjectAlternateMDB( &block );
3460 if ( result == noErr ) {
3461 mdbP = (HFSMasterDirectoryBlock *) block.buffer;
3462 CopyMemory( mdbP->drVN, calculatedVCB->vcbVN, sizeof(mdbP->drVN) );
3463 }
3464 if ( block.buffer != NULL )
3465 (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock );
3466 ReturnIfError(result);
3467 }
3468 else // Because we don't have the unicode converters, just fill it with a dummy name.
3469 {
3470 CopyMemory( "\x0dPure HFS Plus", calculatedVCB->vcbVN, sizeof(Str27) );
3471 }
3472
3473 GPtr->TarBlock = hint;
3474 if ( isHFSPlus )
3475 CopyCatalogName( (const CatalogName *)&foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus );
3476 else
3477 CopyCatalogName( (const CatalogName *)&foundKey.hfs.nodeName, &GPtr->CName, isHFSPlus );
3478
3479 if ( (record.recordType == kHFSPlusFolderRecord) || (record.recordType == kHFSFolderRecord) )
3480 {
3481 frFlags = record.recordType == kHFSPlusFolderRecord ?
3482 record.hfsPlusFolder.userInfo.frFlags :
3483 record.hfsFolder.userInfo.frFlags;
3484
3485 if ( frFlags & fNameLocked ) // name locked bit set?
3486 RcdNameLockedErr( GPtr, E_LockedDirName, frFlags );
3487 }
3488
3489 return( noErr );
3490 }
3491
3492
3493 /*------------------------------------------------------------------------------
3494
3495 Name: RcdNameLockedErr
3496
3497 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
3498 list, to describe an locked volume name for possible repair.
3499
3500 Input: GPtr - ptr to scavenger global data
3501 type - error code (E_xxx), which should be >0
3502 incorrect - the incorrect file flags as found in file record
3503
3504 Output: 0 - no error
3505 R_NoMem - not enough mem to allocate record
3506 ------------------------------------------------------------------------------*/
3507
3508 static int RcdNameLockedErr( SGlobPtr GPtr, SInt16 type, UInt32 incorrect ) /* for a consistency check */
3509 {
3510 RepairOrderPtr p; /* the new node we compile */
3511 int n; /* size of node we allocate */
3512 Boolean isHFSPlus;
3513
3514 isHFSPlus = VolumeObjectIsHFSPlus( );
3515 RcdError( GPtr, type ); /* first, record the error */
3516
3517 n = CatalogNameSize( &GPtr->CName, isHFSPlus );
3518
3519 p = AllocMinorRepairOrder( GPtr, n ); /* get the node */
3520 if ( p==NULL ) /* quit if out of room */
3521 return ( R_NoMem );
3522
3523 CopyCatalogName( (const CatalogName *) &GPtr->CName, (CatalogName*)&p->name, isHFSPlus );
3524
3525 p->type = type; /* save error info */
3526 p->correct = incorrect & ~fNameLocked; /* mask off the name locked bit */
3527 p->incorrect = incorrect;
3528 p->maskBit = (UInt16)fNameLocked;
3529 p->parid = 1;
3530
3531 GPtr->CatStat |= S_LockedDirName; /* set flag to trigger repair */
3532
3533 return( noErr ); /* successful return */
3534 }
3535
3536 /*------------------------------------------------------------------------------
3537
3538 Name: RecordBadExtent
3539
3540 Function: Allocates a RepairOrder for repairing bad extent.
3541
3542 Input: GPtr - ptr to scavenger global data
3543 fileID - fileID of the file with bad extent
3544 forkType - bad extent's fork type
3545 startBlock - start block of the bad extent record
3546 badExtentIndex - index of bad extent entry in the extent record
3547
3548 Output: 0 - no error
3549 R_NoMem - not enough mem to allocate record
3550 ------------------------------------------------------------------------------*/
3551
3552 static int RecordBadExtent(SGlobPtr GPtr, UInt32 fileID, UInt8 forkType,
3553 UInt32 startBlock, UInt32 badExtentIndex)
3554 {
3555 RepairOrderPtr p;
3556 Boolean isHFSPlus;
3557
3558 isHFSPlus = VolumeObjectIsHFSPlus();
3559
3560 p = AllocMinorRepairOrder(GPtr, 0);
3561 if (p == NULL) {
3562 return(R_NoMem);
3563 }
3564
3565 p->type = E_ExtEnt;
3566 p->forkType = forkType;
3567 p->correct = badExtentIndex;
3568 p->hint = startBlock;
3569 p->parid = fileID;
3570
3571 GPtr->CatStat |= S_BadExtent;
3572 return (0);
3573 }
3574
3575 /*
3576 * Build a catalog node thread key.
3577 */
3578 __unused static void
3579 buildthreadkey(UInt32 parentID, int std_hfs, CatalogKey *key)
3580 {
3581 if (std_hfs) {
3582 key->hfs.keyLength = kHFSCatalogKeyMinimumLength;
3583 key->hfs.reserved = 0;
3584 key->hfs.parentID = parentID;
3585 key->hfs.nodeName[0] = 0;
3586 } else {
3587 key->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength;
3588 key->hfsPlus.parentID = parentID;
3589 key->hfsPlus.nodeName.length = 0;
3590 }
3591 }
3592
3593
3594 static void
3595 printpath(SGlobPtr GPtr, UInt32 fileID)
3596 {
3597 int result;
3598 char path[PATH_MAX * 4];
3599 unsigned int pathlen = PATH_MAX * 4;
3600
3601 if (fileID < kHFSFirstUserCatalogNodeID) {
3602 switch(fileID) {
3603 case kHFSExtentsFileID:
3604 printf("$Extents_Overflow_File\n");
3605 return;
3606 case kHFSCatalogFileID:
3607 printf("$Catalog_File\n");
3608 return;
3609 case kHFSAllocationFileID:
3610 printf("$Allocation_Bitmap_File\n");
3611 return;
3612 case kHFSAttributesFileID:
3613 printf("$Attributes_File\n");
3614 return;
3615 default:
3616 printf("$File_ID_%d\n", fileID);
3617 return;
3618 }
3619 }
3620
3621 result = GetFileNamePathByID(GPtr, fileID, path, &pathlen, NULL, NULL, NULL);
3622 if (result) {
3623 printf ("error %d getting path for id=%u\n", result, fileID);
3624 }
3625
3626 printf("\"ROOT_OF_VOLUME%s\" (file id=%u)\n", path, fileID);
3627 }
3628
3629 void
3630 CheckPhysicalMatch(SVCB *vcb, UInt32 startblk, UInt32 blkcount, UInt32 fileNumber, UInt8 forkType)
3631 {
3632 int i;
3633 u_int64_t blk, blk1, blk2;
3634 u_int64_t offset;
3635
3636 offset = (u_int64_t) startblk * (u_int64_t) vcb->vcbBlockSize;
3637
3638 if (vcb->vcbSignature == kHFSPlusSigWord)
3639 offset += vcb->vcbEmbeddedOffset; // offset into the wrapper
3640 else
3641 offset += vcb->vcbAlBlSt * 512ULL; // offset to start of volume
3642
3643 blk1 = offset / gBlockSize;
3644 blk2 = blk1 + ((blkcount * vcb->vcbBlockSize) / gBlockSize);
3645
3646 for (i = 0; i < gBlkListEntries; ++i) {
3647 blk = gBlockList[i];
3648
3649 if (blk >= blk1 && blk < blk2) {
3650 // printf("block %d is in file %d\n", blk, fileNumber);
3651 /* Do we need to grow the found blocks list? */
3652 if (gFoundBlockEntries % FOUND_BLOCKS_QUANTUM == 0) {
3653 struct found_blocks *new_blocks;
3654 new_blocks = realloc(gFoundBlocksList, (gFoundBlockEntries + FOUND_BLOCKS_QUANTUM) * sizeof(struct found_blocks));
3655 if (new_blocks == NULL) {
3656 fprintf(stderr, "CheckPhysicalMatch: Out of memory!\n");
3657 return;
3658 }
3659 gFoundBlocksList = new_blocks;
3660 }
3661 gFoundBlocksList[gFoundBlockEntries].block = blk;
3662 gFoundBlocksList[gFoundBlockEntries].fileID = fileNumber;
3663 ++gFoundBlockEntries;
3664 }
3665 }
3666 }
3667
3668 static int compare_found_blocks(const void *x1_arg, const void *x2_arg)
3669 {
3670 const struct found_blocks *x1 = x1_arg;
3671 const struct found_blocks *x2 = x2_arg;
3672
3673 if (x1->block < x2->block)
3674 return -1;
3675 else if (x1->block > x2->block)
3676 return 1;
3677 else {
3678 if (x1->fileID < x2->fileID)
3679 return -1;
3680 else if (x1->fileID > x2->fileID)
3681 return 1;
3682 }
3683
3684 return 0;
3685 }
3686
3687 void
3688 dumpblocklist(SGlobPtr GPtr)
3689 {
3690 int i, j;
3691 u_int64_t block;
3692
3693 /* Sort the found blocks */
3694 qsort(gFoundBlocksList, gFoundBlockEntries, sizeof(struct found_blocks), compare_found_blocks);
3695
3696 /*
3697 * Print out the blocks with matching files. In the case of overlapped
3698 * extents, the same block number will be printed multiple times, with
3699 * each file containing an overlapping extent. If overlapping extents
3700 * come from the same file, then that path will be printed multiple times.
3701 */
3702 for (i = 0; i < gFoundBlockEntries; ++i) {
3703 block = gFoundBlocksList[i].block;
3704
3705 printf("block %llu:\t", (unsigned long long) block);
3706 printpath(GPtr, gFoundBlocksList[i].fileID);
3707
3708 /* Remove block from the gBlockList */
3709 for (j = 0; j < gBlkListEntries; ++j) {
3710 if (gBlockList[j] == block) {
3711 gBlockList[j] = gBlockList[--gBlkListEntries];
3712 break;
3713 }
3714 }
3715 }
3716
3717 /* Print out the blocks without matching files */
3718 for (j = 0; j < gBlkListEntries; ++j) {
3719 printf("block %llu:\t*** NO MATCH ***\n", (unsigned long long) gBlockList[j]);
3720 }
3721 }
3722
3723 /*------------------------------------------------------------------------------
3724
3725 Function: CheckFileExtents - (Check File Extents)
3726
3727 Description:
3728 Verifies the extent info for a file data or extented attribute data. It
3729 checks the correctness of extent data. If the extent information is
3730 correct/valid, it updates in-memory volume bitmap, total number of valid
3731 blocks for given file, and if overlapping extents exist, adds them to
3732 the overlap extents list. If the extent information is not correct, it
3733 considers the file truncated beyond the bad extent entry and reports
3734 only the total number of good blocks seen. Therefore the caller detects
3735 adds the extent information to repair order. It does not include the
3736 invalid extent and any extents after it for checking volume bitmap and
3737 hence overlapping extents. Note that currently the function
3738 returns error if invalid extent is found for system files or for
3739 extended attributes.
3740
3741 For data fork and resource fork of file - This function checks extent
3742 record present in catalog record as well as extent overflow records, if
3743 any, for given fileID.
3744
3745 For extended attribute data - This function only checks the extent record
3746 passed as parameter. If any extended attribute has overflow extents in
3747 the attribute btree, this function does not look them up. It is the left
3748 to the caller to check remaining extents for given file's extended attribute.
3749
3750 Input:
3751 GPtr - pointer to scavenger global area
3752 fileNumber - file number for fork/extended attribute
3753 forkType - fork type
3754 00 - kDataFork - data fork
3755 01 - kEAData - extended attribute data extent
3756 ff - kRsrcFork - resource fork
3757 attrname - if fork type is kEAData, attrname contains pointer to the
3758 name of extended attribute whose extent is being checked; else
3759 it should be NULL. Note that the function assumes that this is
3760 NULL-terminated string.
3761 extents - ptr to 1st extent record for the file
3762
3763 Output:
3764 CheckFileExtents - function result:
3765 noErr = no error
3766 n = error code
3767 blocksUsed - number of allocation blocks allocated to the file
3768 ------------------------------------------------------------------------------*/
3769
3770 OSErr CheckFileExtents( SGlobPtr GPtr, UInt32 fileNumber, UInt8 forkType,
3771 const unsigned char *attrname, const void *extents,
3772 UInt32 *blocksUsed)
3773 {
3774 UInt32 blockCount = 0;
3775 UInt32 extentBlockCount;
3776 UInt32 extentStartBlock;
3777 UInt32 hint;
3778 HFSPlusExtentKey key;
3779 HFSPlusExtentKey extentKey;
3780 HFSPlusExtentRecord extentRecord;
3781 UInt16 recSize;
3782 OSErr err = noErr;
3783 SInt16 i;
3784 Boolean firstRecord;
3785 Boolean isHFSPlus;
3786 unsigned int lastExtentIndex;
3787 Boolean foundBadExtent;
3788
3789 /* For all extended attribute extents, the attrname should not be NULL */
3790 if (forkType == kEAData) {
3791 assert(attrname != NULL);
3792 }
3793
3794 isHFSPlus = VolumeObjectIsHFSPlus( );
3795 firstRecord = true;
3796 foundBadExtent = false;
3797 lastExtentIndex = GPtr->numExtents;
3798
3799 while ( (extents != nil) && (err == noErr) )
3800 {
3801 // checkout the extent record first
3802 err = ChkExtRec( GPtr, fileNumber, extents, &lastExtentIndex );
3803 if (err != noErr) {
3804 DPRINTF (d_info, "%s: Bad extent for fileID %u in extent %u for startblock %u\n", __FUNCTION__, fileNumber, lastExtentIndex, blockCount);
3805 if (cur_debug_level & d_dump_record)
3806 {
3807 plog("Extents:\n");
3808 HexDump(extents, sizeof(HFSPlusExtentRecord), FALSE);
3809 plog("\n");
3810 }
3811
3812 /* Stop verification if bad extent is found for system file or EA */
3813 if ((fileNumber < kHFSFirstUserCatalogNodeID) ||
3814 (forkType == kEAData)) {
3815 break;
3816 }
3817
3818 /* store information about bad extent in repair order */
3819 (void) RecordBadExtent(GPtr, fileNumber, forkType, blockCount, lastExtentIndex);
3820 foundBadExtent = true;
3821 err = noErr;
3822 }
3823
3824 /* Check only till the last valid extent entry reported by ChkExtRec */
3825 for ( i=0 ; i<lastExtentIndex ; i++ ) // now checkout the extents
3826 {
3827 // HFS+/HFS moving extent fields into local variables for evaluation
3828 if ( isHFSPlus == true )
3829 {
3830 extentBlockCount = ((HFSPlusExtentDescriptor *)extents)[i].blockCount;
3831 extentStartBlock = ((HFSPlusExtentDescriptor *)extents)[i].startBlock;
3832 }
3833 else
3834 {
3835 extentBlockCount = ((HFSExtentDescriptor *)extents)[i].blockCount;
3836 extentStartBlock = ((HFSExtentDescriptor *)extents)[i].startBlock;
3837 }
3838
3839 if ( extentBlockCount == 0 )
3840 break;
3841
3842 if (gBlkListEntries != 0)
3843 CheckPhysicalMatch(GPtr->calculatedVCB, extentStartBlock, extentBlockCount, fileNumber, forkType);
3844
3845 err = CaptureBitmapBits(extentStartBlock, extentBlockCount);
3846 if (err == E_OvlExt) {
3847 err = AddExtentToOverlapList(GPtr, fileNumber, (char *)attrname, extentStartBlock, extentBlockCount, forkType);
3848 }
3849
3850 blockCount += extentBlockCount;
3851 }
3852
3853 if ( fileNumber == kHFSExtentsFileID ) // Extents file has no overflow extents
3854 break;
3855
3856 /* Found bad extent for this file, do not find any extents after
3857 * current extent. We assume that the file is truncated at the
3858 * bad extent entry
3859 */
3860 if (foundBadExtent == true) {
3861 break;
3862 }
3863
3864 /* For extended attributes, only check the extent passed as parameter. The
3865 * caller will take care of checking other extents, if any, for given
3866 * extended attribute.
3867 */
3868 if (forkType == kEAData) {
3869 break;
3870 }
3871
3872 if ( firstRecord == true )
3873 {
3874 firstRecord = false;
3875
3876 // Set up the extent key
3877 BuildExtentKey( isHFSPlus, forkType, fileNumber, blockCount, (void *)&key );
3878
3879 err = SearchBTreeRecord( GPtr->calculatedExtentsFCB, &key, kNoHint, (void *) &extentKey, (void *) &extentRecord, &recSize, &hint );
3880
3881 if ( err == btNotFound )
3882 {
3883 err = noErr; // no more extent records
3884 extents = nil;
3885 break;
3886 }
3887 else if ( err != noErr )
3888 {
3889 err = IntError( GPtr, err ); // error from SearchBTreeRecord
3890 return( err );
3891 }
3892 }
3893 else
3894 {
3895 err = GetBTreeRecord( GPtr->calculatedExtentsFCB, 1, &extentKey, extentRecord, &recSize, &hint );
3896
3897 if ( err == btNotFound )
3898 {
3899 err = noErr; // no more extent records
3900 extents = nil;
3901 break;
3902 }
3903 else if ( err != noErr )
3904 {
3905 err = IntError( GPtr, err ); /* error from BTGetRecord */
3906 return( err );
3907 }
3908
3909 // Check same file and fork
3910 if ( isHFSPlus )
3911 {
3912 if ( (extentKey.fileID != fileNumber) || (extentKey.forkType != forkType) )
3913 break;
3914 }
3915 else
3916 {
3917 if ( (((HFSExtentKey *) &extentKey)->fileID != fileNumber) || (((HFSExtentKey *) &extentKey)->forkType != forkType) )
3918 break;
3919 }
3920 }
3921
3922 extents = (void *) &extentRecord;
3923 }
3924
3925 *blocksUsed = blockCount;
3926
3927 return( err );
3928 }
3929
3930
3931 void BuildExtentKey( Boolean isHFSPlus, UInt8 forkType, HFSCatalogNodeID fileNumber, UInt32 blockNumber, void * key )
3932 {
3933 if ( isHFSPlus )
3934 {
3935 HFSPlusExtentKey *hfsPlusKey = (HFSPlusExtentKey*) key;
3936
3937 hfsPlusKey->keyLength = kHFSPlusExtentKeyMaximumLength;
3938 hfsPlusKey->forkType = forkType;
3939 hfsPlusKey->pad = 0;
3940 hfsPlusKey->fileID = fileNumber;
3941 hfsPlusKey->startBlock = blockNumber;
3942 }
3943 else
3944 {
3945 HFSExtentKey *hfsKey = (HFSExtentKey*) key;
3946
3947 hfsKey->keyLength = kHFSExtentKeyMaximumLength;
3948 hfsKey->forkType = forkType;
3949 hfsKey->fileID = fileNumber;
3950 hfsKey->startBlock = (UInt16) blockNumber;
3951 }
3952 }
3953
3954
3955
3956 //
3957 // Adds this extent to our OverlappedExtentList for later repair.
3958 //
3959 static OSErr AddExtentToOverlapList( SGlobPtr GPtr, HFSCatalogNodeID fileNumber, const char *attrname, UInt32 extentStartBlock, UInt32 extentBlockCount, UInt8 forkType )
3960 {
3961 size_t newHandleSize;
3962 ExtentInfo extentInfo;
3963 ExtentsTable **extentsTableH;
3964 size_t attrlen;
3965
3966 ClearMemory(&extentInfo, sizeof(extentInfo));
3967 extentInfo.fileID = fileNumber;
3968 extentInfo.startBlock = extentStartBlock;
3969 extentInfo.blockCount = extentBlockCount;
3970 extentInfo.forkType = forkType;
3971 /* store the name of extended attribute */
3972 if (forkType == kEAData) {
3973 assert(attrname != NULL);
3974
3975 attrlen = strlen(attrname) + 1;
3976 extentInfo.attrname = malloc(attrlen);
3977 if (extentInfo.attrname == NULL) {
3978 return(memFullErr);
3979 }
3980 strlcpy(extentInfo.attrname, attrname, attrlen);
3981 }
3982
3983 // If it's uninitialized
3984 if ( GPtr->overlappedExtents == nil )
3985 {
3986 GPtr->overlappedExtents = (ExtentsTable **) NewHandleClear( sizeof(ExtentsTable) );
3987 extentsTableH = GPtr->overlappedExtents;
3988 }
3989 else
3990 {
3991 extentsTableH = GPtr->overlappedExtents;
3992
3993 if ( ExtentInfoExists( extentsTableH, &extentInfo) == true )
3994 return( noErr );
3995
3996 // Grow the Extents table for a new entry.
3997 newHandleSize = ( sizeof(ExtentInfo) ) + ( GetHandleSize( (Handle)extentsTableH ) );
3998 SetHandleSize( (Handle)extentsTableH, newHandleSize );
3999 }
4000
4001 // Copy the new extents into the end of the table
4002 CopyMemory( &extentInfo, &((**extentsTableH).extentInfo[(**extentsTableH).count]), sizeof(ExtentInfo) );
4003
4004 // Update the overlap extent bit
4005 GPtr->VIStat |= S_OverlappingExtents;
4006
4007 // Update the extent table count
4008 (**extentsTableH).count++;
4009
4010 return( noErr );
4011 }
4012
4013
4014 /* Compare if the given extentInfo exsists in the extents table */
4015 static Boolean ExtentInfoExists( ExtentsTable **extentsTableH, ExtentInfo *extentInfo)
4016 {
4017 UInt32 i;
4018 ExtentInfo *aryExtentInfo;
4019
4020
4021 for ( i = 0 ; i < (**extentsTableH).count ; i++ )
4022 {
4023 aryExtentInfo = &((**extentsTableH).extentInfo[i]);
4024
4025 if ( extentInfo->fileID == aryExtentInfo->fileID )
4026 {
4027 if ( (extentInfo->startBlock == aryExtentInfo->startBlock) &&
4028 (extentInfo->blockCount == aryExtentInfo->blockCount) &&
4029 (extentInfo->forkType == aryExtentInfo->forkType) )
4030 {
4031 /* startBlock, blockCount, forkType are same.
4032 * Compare the extended attribute names, if they exist.
4033 */
4034
4035 /* If no attribute name exists, the two extents are same */
4036 if ((extentInfo->attrname == NULL) &&
4037 (aryExtentInfo->attrname == NULL)) {
4038 return(true);
4039 }
4040
4041 /* If only one attribute name exists, the two extents are not same */
4042 if (((extentInfo->attrname != NULL) && (aryExtentInfo->attrname == NULL)) ||
4043 ((extentInfo->attrname == NULL) && (aryExtentInfo->attrname != NULL))) {
4044 return(false);
4045 }
4046
4047 /* Both attribute name exist. Compare the names */
4048 if (!strcmp(extentInfo->attrname, aryExtentInfo->attrname)) {
4049 return (true);
4050 } else {
4051 return (false);
4052 }
4053
4054 }
4055 }
4056 }
4057
4058 return( false );
4059 }
4060
4061 /* Function : DoesOverlap
4062 *
4063 * Description:
4064 * This function takes a start block and the count of blocks in a
4065 * given extent and compares it against the list of overlapped
4066 * extents in the global structure.
4067 * This is useful in finding the original files that overlap with
4068 * the files found in catalog btree check. If a file is found
4069 * overlapping, it is added to the overlap list.
4070 *
4071 * Input:
4072 * 1. GPtr - global scavenger pointer.
4073 * 2. fileID - file ID being checked.
4074 * 3. attrname - name of extended attribute being checked, should be NULL for regular files
4075 * 4. startBlock - start block in extent.
4076 * 5. blockCount - total number of blocks in extent.
4077 * 6. forkType - type of fork being check (kDataFork, kRsrcFork, kEAData).
4078 *
4079 * Output: isOverlapped - Boolean value of true or false.
4080 */
4081 static Boolean DoesOverlap(SGlobPtr GPtr, UInt32 fileID, const char *attrname, UInt32 startBlock, UInt32 blockCount, UInt8 forkType)
4082 {
4083 int i;
4084 Boolean isOverlapped = false;
4085 ExtentInfo *curExtentInfo;
4086 ExtentsTable **extentsTableH = GPtr->overlappedExtents;
4087
4088 for (i = 0; i < (**extentsTableH).count; i++) {
4089 curExtentInfo = &((**extentsTableH).extentInfo[i]);
4090 /* Check extents */
4091 if (curExtentInfo->startBlock < startBlock) {
4092 if ((curExtentInfo->startBlock + curExtentInfo->blockCount) > startBlock) {
4093 isOverlapped = true;
4094 break;
4095 }
4096 } else { /* curExtentInfo->startBlock >= startBlock */
4097 if (curExtentInfo->startBlock < (startBlock + blockCount)) {
4098 isOverlapped = true;
4099 break;
4100 }
4101 }
4102 } /* for loop Extents Table */
4103
4104 /* Add this extent to overlap list */
4105 if (isOverlapped) {
4106 AddExtentToOverlapList(GPtr, fileID, attrname, startBlock, blockCount, forkType);
4107 }
4108
4109 return isOverlapped;
4110 } /* DoesOverlap */
4111
4112 /* Function : CheckHFSPlusExtentRecords
4113 *
4114 * Description:
4115 * For all valid extents, this function calls DoesOverlap to find
4116 * if a given extent is overlapping with another extent existing
4117 * in the overlap list.
4118 *
4119 * Input:
4120 * 1. GPtr - global scavenger pointer.
4121 * 2. fileID - file ID being checked.
4122 * 3. attrname - name of extended attribute being checked, should be NULL for regular files
4123 * 4. extent - extent information to check.
4124 * 5. forkType - type of fork being check (kDataFork, kRsrcFork, kEAData).
4125 *
4126 * Output: None.
4127 */
4128 static void CheckHFSPlusExtentRecords(SGlobPtr GPtr, UInt32 fileID, const char *attrname, HFSPlusExtentRecord extent, UInt8 forkType)
4129 {
4130 int i;
4131
4132 /* Check for overlapping extents for all extents in given extent data */
4133 for (i = 0; i < kHFSPlusExtentDensity; i++) {
4134 if (extent[i].startBlock == 0) {
4135 break;
4136 }
4137 DoesOverlap(GPtr, fileID, attrname, extent[i].startBlock, extent[i].blockCount, forkType);
4138 }
4139 return;
4140 } /* CheckHFSPlusExtentRecords */
4141
4142 /* Function : CheckHFSExtentRecords
4143 *
4144 * Description:
4145 * For all valid extents, this function calls DoesOverlap to find
4146 * if a given extent is overlapping with another extent existing
4147 * in the overlap list.
4148 *
4149 * Input:
4150 * 1. GPtr - global scavenger pointer.
4151 * 2. fileID - file ID being checked.
4152 * 3. extent - extent information to check.
4153 * 4. forkType - type of fork being check (kDataFork, kRsrcFork).
4154 *
4155 * Output: None.
4156 */
4157 static void CheckHFSExtentRecords(SGlobPtr GPtr, UInt32 fileID, HFSExtentRecord extent, UInt8 forkType)
4158 {
4159 int i;
4160
4161 /* Check for overlapping extents for all extents in given extents */
4162 for (i = 0; i < kHFSExtentDensity; i++) {
4163 if (extent[i].startBlock == 0) {
4164 break;
4165 }
4166 DoesOverlap(GPtr, fileID, NULL, extent[i].startBlock, extent[i].blockCount, forkType);
4167 }
4168 return;
4169 } /* CheckHFSExtentRecords */
4170
4171 /* Function: FindOrigOverlapFiles
4172 *
4173 * Description:
4174 * This function is called only if btree check results in
4175 * overlapped extents errors. The btree checks do not find
4176 * out the original files whose extents are overlapping with one
4177 * being reported in its check. This function finds out all the
4178 * original files whose that are being overlapped.
4179 *
4180 * This function relies on comparison of extents with Overlap list
4181 * created in verify stage. The list is also updated with the
4182 * overlapped extents found in this function.
4183 *
4184 * 1. Compare extents for all the files located in volume header.
4185 * 2. Traverse catalog btree and compare extents of all files.
4186 * 3. Traverse extents btree and compare extents for all entries.
4187 *
4188 * Input: GPtr - pointer to global scanvenger area.
4189 *
4190 * Output: err - function result
4191 * zero means success
4192 * non-zero means failure
4193 */
4194 int FindOrigOverlapFiles(SGlobPtr GPtr)
4195 {
4196 OSErr err = noErr;
4197 Boolean isHFSPlus;
4198
4199 UInt16 selCode; /* select access pattern for BTree */
4200 UInt16 recordSize;
4201 UInt32 hint;
4202
4203 CatalogRecord catRecord;
4204 CatalogKey catKey;
4205
4206 ExtentRecord extentRecord;
4207 ExtentKey extentKey;
4208
4209 HFSPlusAttrRecord attrRecord;
4210 HFSPlusAttrKey attrKey;
4211 char attrName[XATTR_MAXNAMELEN];
4212 size_t len;
4213
4214 SVCB *calculatedVCB = GPtr->calculatedVCB;
4215
4216 isHFSPlus = VolumeObjectIsHFSPlus();
4217
4218 /* Check file extents from volume header */
4219 if (isHFSPlus) {
4220 /* allocation file */
4221 if (calculatedVCB->vcbAllocationFile) {
4222 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbAllocationFile->fcbFileID, NULL,
4223 calculatedVCB->vcbAllocationFile->fcbExtents32, kDataFork);
4224 }
4225
4226 /* extents file */
4227 if (calculatedVCB->vcbExtentsFile) {
4228 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbExtentsFile->fcbFileID, NULL,
4229 calculatedVCB->vcbExtentsFile->fcbExtents32, kDataFork);
4230 }
4231
4232 /* catalog file */
4233 if (calculatedVCB->vcbCatalogFile) {
4234 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbCatalogFile->fcbFileID, NULL,
4235 calculatedVCB->vcbCatalogFile->fcbExtents32, kDataFork);
4236 }
4237
4238 /* attributes file */
4239 if (calculatedVCB->vcbAttributesFile) {
4240 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbAttributesFile->fcbFileID, NULL,
4241 calculatedVCB->vcbAttributesFile->fcbExtents32, kDataFork);
4242 }
4243
4244 /* startup file */
4245 if (calculatedVCB->vcbStartupFile) {
4246 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbStartupFile->fcbFileID, NULL,
4247 calculatedVCB->vcbStartupFile->fcbExtents32, kDataFork);
4248 }
4249 } else {
4250 /* extents file */
4251 if (calculatedVCB->vcbExtentsFile) {
4252 CheckHFSExtentRecords(GPtr, calculatedVCB->vcbExtentsFile->fcbFileID,
4253 calculatedVCB->vcbExtentsFile->fcbExtents16, kDataFork);
4254 }
4255
4256 /* catalog file */
4257 if (calculatedVCB->vcbCatalogFile) {
4258 CheckHFSExtentRecords(GPtr, calculatedVCB->vcbCatalogFile->fcbFileID,
4259 calculatedVCB->vcbCatalogFile->fcbExtents16, kDataFork);
4260 }
4261 }
4262
4263 /* Traverse the catalog btree */
4264 selCode = 0x8001; /* Get first record from BTree */
4265 err = GetBTreeRecord(GPtr->calculatedCatalogFCB, selCode, &catKey, &catRecord, &recordSize, &hint);
4266 if (err != noErr) {
4267 goto traverseExtents;
4268 }
4269 selCode = 1; /* Get next record */
4270 do {
4271 if ((catRecord.recordType == kHFSPlusFileRecord) ||
4272 (catRecord.recordType == kHFSFileRecord)) {
4273
4274 if (isHFSPlus) {
4275 /* HFSPlus data fork */
4276 CheckHFSPlusExtentRecords(GPtr, catRecord.hfsPlusFile.fileID, NULL,
4277 catRecord.hfsPlusFile.dataFork.extents, kDataFork);
4278
4279 /* HFSPlus resource fork */
4280 CheckHFSPlusExtentRecords(GPtr, catRecord.hfsPlusFile.fileID, NULL,
4281 catRecord.hfsPlusFile.resourceFork.extents, kRsrcFork);
4282 } else {
4283 /* HFS data extent */
4284 CheckHFSExtentRecords(GPtr, catRecord.hfsFile.fileID,
4285 catRecord.hfsFile.dataExtents, kDataFork);
4286
4287 /* HFS resource extent */
4288 CheckHFSExtentRecords(GPtr, catRecord.hfsFile.fileID,
4289 catRecord.hfsFile.rsrcExtents, kRsrcFork);
4290 }
4291 }
4292
4293 /* Access the next record */
4294 err = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &catKey, &catRecord, &recordSize, &hint );
4295 } while (err == noErr);
4296
4297 traverseExtents:
4298 /* Traverse the extents btree */
4299 selCode = 0x8001; /* Get first record from BTree */
4300 err = GetBTreeRecord(GPtr->calculatedExtentsFCB, selCode, &extentKey, &extentRecord, &recordSize, &hint);
4301 if (err != noErr) {
4302 goto traverseAttribute;
4303 }
4304 selCode = 1; /* Get next record */
4305 do {
4306 if (isHFSPlus) {
4307 CheckHFSPlusExtentRecords(GPtr, extentKey.hfsPlus.fileID, NULL,
4308 extentRecord.hfsPlus, extentKey.hfsPlus.forkType);
4309 } else {
4310 CheckHFSExtentRecords(GPtr, extentKey.hfs.fileID, extentRecord.hfs,
4311 extentKey.hfs.forkType);
4312 }
4313
4314 /* Access the next record */
4315 err = GetBTreeRecord(GPtr->calculatedExtentsFCB, selCode, &extentKey, &extentRecord, &recordSize, &hint);
4316 } while (err == noErr);
4317
4318 traverseAttribute:
4319 /* Extended attributes are only supported in HFS Plus */
4320 if (!isHFSPlus) {
4321 goto out;
4322 }
4323
4324 /* Traverse the attribute btree */
4325 selCode = 0x8001; /* Get first record from BTree */
4326 /* Warning: Attribute record of type kHFSPlusAttrInlineData may be
4327 * truncated on read! (4425232). This function only uses recordType
4328 * field from inline attribute record.
4329 */
4330 err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &attrKey, &attrRecord, &recordSize, &hint);
4331 if (err != noErr) {
4332 goto out;
4333 }
4334 selCode = 1; /* Get next record */
4335 do {
4336 if (attrRecord.recordType == kHFSPlusAttrForkData) {
4337 (void) utf_encodestr(attrKey.attrName, attrKey.attrNameLen * 2, (unsigned char *)attrName, &len, sizeof(attrName));
4338 attrName[len] = '\0';
4339
4340 CheckHFSPlusExtentRecords(GPtr, attrKey.fileID, attrName, attrRecord.forkData.theFork.extents, kEAData);
4341 } else if (attrRecord.recordType == kHFSPlusAttrExtents) {
4342 (void) utf_encodestr(attrKey.attrName, attrKey.attrNameLen * 2, (unsigned char *)attrName, &len, sizeof(attrName));
4343 attrName[len] = '\0';
4344
4345 CheckHFSPlusExtentRecords(GPtr, attrKey.fileID, attrName, attrRecord.overflowExtents.extents, kEAData);
4346 }
4347
4348 /* Access the next record
4349 * Warning: Attribute record of type kHFSPlusAttrInlineData may be
4350 * truncated on read! (4425232). This function only uses recordType
4351 * field from inline attribute record.
4352 */
4353 err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &attrKey, &attrRecord, &recordSize, &hint);
4354 } while (err == noErr);
4355
4356 out:
4357 if (err == btNotFound) {
4358 err = noErr;
4359 }
4360 return err;
4361 } /* FindOrigOverlapFiles */
4362
4363 /* Function: PrintOverlapFiles
4364 *
4365 * Description: Print the information about all unique overlapping files.
4366 * 1. Sort the overlap extent in increasing order of fileID
4367 * 2. For every unique fileID, prefix the string with fileID and find the
4368 * filename/path based on fileID.
4369 * If fileID > kHFSFirstUserCatalogNodeID, find path to file
4370 * Else, find name of the system file.
4371 * 3. Print the new string.
4372 * Note that the path is printed only for HFS Plus volumes and not for
4373 * plain HFS volumes. This is done by not allocating buffer for finding
4374 * file path.
4375 *
4376 * Input:
4377 * GPtr - Global scavenger structure pointer.
4378 *
4379 * Output:
4380 * nothing (void)
4381 */
4382 void PrintOverlapFiles (SGlobPtr GPtr)
4383 {
4384 OSErr err;
4385 ExtentsTable **extentsTableH;
4386 ExtentInfo *extentInfo;
4387 unsigned int numOverlapExtents;
4388 unsigned int buflen, filepathlen;
4389 char *filepath = NULL;
4390 UInt32 lastID = 0;
4391 Boolean printMsg;
4392 Boolean isHFSPlus;
4393 int i;
4394
4395 isHFSPlus = VolumeObjectIsHFSPlus();
4396
4397 extentsTableH = GPtr->overlappedExtents;
4398 numOverlapExtents = (**extentsTableH).count;
4399
4400 /* Sort the list according to file ID */
4401 qsort((**extentsTableH).extentInfo, numOverlapExtents, sizeof(ExtentInfo),
4402 CompareExtentFileID);
4403
4404 buflen = PATH_MAX * 4;
4405 /* Allocate buffer to read data */
4406 if (isHFSPlus) {
4407 filepath = malloc (buflen);
4408 }
4409
4410 for (i = 0; i < numOverlapExtents; i++) {
4411 extentInfo = &((**extentsTableH).extentInfo[i]);
4412
4413 /* Skip the same fileID */
4414 if (lastID == extentInfo->fileID) {
4415 continue;
4416 }
4417
4418 lastID = extentInfo->fileID;
4419 printMsg = false;
4420
4421 if (filepath) {
4422 filepathlen = buflen;
4423 if (extentInfo->fileID >= kHFSFirstUserCatalogNodeID) {
4424 /* Lookup the file path */
4425 err = GetFileNamePathByID (GPtr, extentInfo->fileID, filepath, &filepathlen, NULL, NULL, NULL);
4426 } else {
4427 /* Get system filename */
4428 err = GetSystemFileName (extentInfo->fileID, filepath, &filepathlen);
4429 }
4430
4431 if (err == noErr) {
4432 /* print fileID, filepath */
4433 fsckPrint(GPtr->context, E_OvlExt, extentInfo->fileID, filepath);
4434 printMsg = true;
4435 }
4436
4437 if (fsckGetVerbosity(GPtr->context) >= kDebugLog) {
4438 plog ("\textentType=0x%x, startBlock=0x%x, blockCount=0x%x, attrName=%s\n",
4439 extentInfo->forkType, extentInfo->startBlock, extentInfo->blockCount, extentInfo->attrname);
4440 }
4441 }
4442
4443 if (printMsg == false) {
4444 /* print only fileID */
4445 fsckPrint(GPtr->context, E_OvlExtID, extentInfo->fileID);
4446 }
4447 }
4448
4449 if (filepath) {
4450 free (filepath);
4451 }
4452
4453 return;
4454 } /* PrintOverlapFiles */
4455
4456 /* Function: CompareExtentFileID
4457 *
4458 * Description: Compares the fileID from two ExtentInfo and return the
4459 * comparison result. (since we have to arrange in ascending order)
4460 *
4461 * Input:
4462 * first and second - void pointers to ExtentInfo structure.
4463 *
4464 * Output:
4465 * >0 if first > second
4466 * =0 if first == second
4467 * <0 if first < second
4468 */
4469 static int CompareExtentFileID(const void *first, const void *second)
4470 {
4471 return (((ExtentInfo *)first)->fileID -
4472 ((ExtentInfo *)second)->fileID);
4473 } /* CompareExtentFileID */
4474
4475 /* Function: journal_replay
4476 *
4477 * Description: Replay journal on a journaled HFS+ volume. This function
4478 * returns success if the volume is not journaled or the journal was not
4479 * dirty. If there was any error in replaying the journal, a non-zero value
4480 * is returned.
4481 *
4482 * Output:
4483 * 0 - success, non-zero - failure.
4484 */
4485 //int journal_replay(SGlobPtr gptr)
4486 int journal_replay(const char *block_device)
4487 {
4488 int retval = 0;
4489 struct vfsconf vfc;
4490 int mib[4];
4491 int jfd;
4492
4493 jfd = open(block_device, O_RDWR);
4494 if (jfd == -1) {
4495 retval = errno;
4496 if (debug)
4497 fplog(stderr, "Unable to open block device %s: %s", block_device, strerror(errno));
4498 goto out;
4499 }
4500
4501 retval = getvfsbyname("hfs", &vfc);
4502 if (retval) {
4503 close(jfd);
4504 goto out;
4505 }
4506
4507 mib[0] = CTL_VFS;
4508 mib[1] = vfc.vfc_typenum;
4509 mib[2] = HFS_REPLAY_JOURNAL;
4510 mib[3] = jfd;
4511 retval = sysctl(mib, 4, NULL, NULL, NULL, 0);
4512 if (retval) {
4513 retval = errno;
4514 }
4515 (void)close(jfd);
4516
4517 out:
4518 return retval;
4519 }
4520