]> git.saurik.com Git - apple/hfs.git/blob - fsck_hfs/dfalib/SVerify1.c
hfs-407.1.3.tar.gz
[apple/hfs.git] / fsck_hfs / dfalib / SVerify1.c
1 /*
2 * Copyright (c) 1999-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 File: SVerify1.c
25
26 Contains: xxx put contents here xxx
27
28 Version: xxx put version here xxx
29
30 Copyright: © 1997-1999 by Apple Computer, Inc., all rights reserved.
31
32 */
33
34 #include "Scavenger.h"
35 #include "../cache.h"
36 #include <stdlib.h>
37 #include <stddef.h>
38 #include <unistd.h>
39 #include <errno.h>
40 #include <fcntl.h>
41 #include <limits.h>
42
43 #include <libkern/OSByteOrder.h>
44 #define SW16(x) OSSwapBigToHostInt16(x)
45 #define SW32(x) OSSwapBigToHostInt32(x)
46 #define SW64(x) OSSwapBigToHostInt64(x)
47
48 extern int OpenDeviceByUUID(void *uuidp, char **nameptr);
49
50 // internal routine prototypes
51
52 static int RcdValErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID parid );
53
54 static int RcdNameLockedErr( SGlobPtr GPtr, OSErr type, UInt32 incorrect );
55
56 static OSErr RcdMDBEmbededVolDescriptionErr( SGlobPtr GPtr, OSErr type, HFSMasterDirectoryBlock *mdb );
57
58 static OSErr CheckNodesFirstOffset( SGlobPtr GPtr, BTreeControlBlock *btcb );
59
60 static OSErr ScavengeVolumeType( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb, UInt32 *volumeType );
61 static OSErr SeekVolumeHeader( SGlobPtr GPtr, UInt64 startSector, UInt32 numSectors, UInt64 *vHSector );
62
63 /* overlapping extents verification functions prototype */
64 static OSErr AddExtentToOverlapList( SGlobPtr GPtr, HFSCatalogNodeID fileNumber, const char *attrName, UInt32 extentStartBlock, UInt32 extentBlockCount, UInt8 forkType );
65
66 static Boolean ExtentInfoExists( ExtentsTable **extentsTableH, ExtentInfo *extentInfo);
67
68 static void CheckHFSPlusExtentRecords(SGlobPtr GPtr, UInt32 fileID, const char *attrname, HFSPlusExtentRecord extent, UInt8 forkType);
69
70 static void CheckHFSExtentRecords(SGlobPtr GPtr, UInt32 fileID, HFSExtentRecord extent, UInt8 forkType);
71
72 static Boolean DoesOverlap(SGlobPtr GPtr, UInt32 fileID, const char *attrname, UInt32 startBlock, UInt32 blockCount, UInt8 forkType);
73
74 static int CompareExtentFileID(const void *first, const void *second);
75
76 /*
77 * Check if a volume is journaled.
78 *
79 * If journal_bit_only is true, the function only checks
80 * if kHFSVolumeJournaledBit is set or not. If the bit
81 * is set, function returns 1 otherwise 0.
82 *
83 * If journal_bit_only is false, in addition to checking
84 * kHFSVolumeJournaledBit, the function also checks if the
85 * last mounted version indicates failed journal replay,
86 * or runtime corruption was detected or simply the volume
87 * is not journaled and it was not unmounted cleanly.
88 * If all of the above conditions are false and the journal
89 * bit is set, function returns 1 to indicate that the
90 * volume is journaled truly otherwise returns 1 to fake
91 * that volume is not journaled.
92 *
93 * returns: 0 not journaled or any of the above conditions are true
94 * 1 journaled
95 *
96 */
97 int
98 CheckIfJournaled(SGlobPtr GPtr, Boolean journal_bit_only)
99 {
100 #define kIDSector 2
101
102 OSErr err;
103 int result;
104 HFSMasterDirectoryBlock *mdbp;
105 HFSPlusVolumeHeader *vhp;
106 SVCB *vcb = GPtr->calculatedVCB;
107 ReleaseBlockOptions rbOptions;
108 BlockDescriptor block;
109
110 vhp = (HFSPlusVolumeHeader *) NULL;
111 rbOptions = kReleaseBlock;
112
113 err = GetVolumeBlock(vcb, kIDSector, kGetBlock, &block);
114 if (err) return (0);
115
116 mdbp = (HFSMasterDirectoryBlock *) block.buffer;
117
118 if (mdbp->drSigWord == kHFSPlusSigWord || mdbp->drSigWord == kHFSXSigWord) {
119 vhp = (HFSPlusVolumeHeader *) block.buffer;
120
121 } else if (mdbp->drSigWord == kHFSSigWord) {
122
123 if (mdbp->drEmbedSigWord == kHFSPlusSigWord) {
124 UInt32 vhSector;
125 UInt32 blkSectors;
126
127 blkSectors = mdbp->drAlBlkSiz / 512;
128 vhSector = mdbp->drAlBlSt;
129 vhSector += blkSectors * mdbp->drEmbedExtent.startBlock;
130 vhSector += kIDSector;
131
132 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
133 err = GetVolumeBlock(vcb, vhSector, kGetBlock, &block);
134 if (err) return (0);
135
136 vhp = (HFSPlusVolumeHeader *) block.buffer;
137 mdbp = (HFSMasterDirectoryBlock *) NULL;
138
139 }
140 }
141
142 if ((vhp != NULL) && (ValidVolumeHeader(vhp) == noErr)) {
143 result = ((vhp->attributes & kHFSVolumeJournaledMask) != 0);
144 if (journal_bit_only == true) {
145 goto out;
146 }
147
148 // even if journaling is enabled for this volume, we'll return
149 // false if it wasn't unmounted cleanly and it was previously
150 // mounted by someone that doesn't know about journaling.
151 // or if lastMountedVersion is kFSKMountVersion
152 if ( vhp->lastMountedVersion == kFSKMountVersion ||
153 (vhp->attributes & kHFSVolumeInconsistentMask) ||
154 ((vhp->lastMountedVersion != kHFSJMountVersion) &&
155 (vhp->attributes & kHFSVolumeUnmountedMask) == 0)) {
156 result = 0;
157 }
158 } else {
159 result = 0;
160 }
161
162 out:
163 (void) ReleaseVolumeBlock(vcb, &block, rbOptions);
164
165 return (result);
166 }
167
168 /*
169 * Get the JournalInfoBlock from a volume.
170 *
171 * It borrows code to get the volume header. Note that it
172 * uses the primary volume header, not the alternate one.
173 * It returns 0 on success, or an error value.
174 * If requested, it will also set the block size (as a 32-bit
175 * value), via bsizep -- this is useful because the journal code
176 * needs to know the volume blocksize, but it doesn't necessarily
177 * have the header.
178 *
179 * Note also that it does direct reads, rather than going through
180 * the cache code. This simplifies getting the JIB.
181 */
182
183 static OSErr
184 GetJournalInfoBlock(SGlobPtr GPtr, JournalInfoBlock *jibp, UInt32 *bsizep)
185 {
186 #define kIDSector 2
187
188 OSErr err;
189 int result = 0;
190 UInt32 jiBlk = 0;
191 HFSMasterDirectoryBlock *mdbp;
192 HFSPlusVolumeHeader *vhp;
193 SVCB *vcb = GPtr->calculatedVCB;
194 ReleaseBlockOptions rbOptions;
195 BlockDescriptor block;
196 size_t blockSize = 0;
197 off_t embeddedOffset = 0;
198
199 vhp = (HFSPlusVolumeHeader *) NULL;
200 rbOptions = kReleaseBlock;
201
202 if (jibp == NULL)
203 return paramErr;
204
205 err = GetVolumeBlock(vcb, kIDSector, kGetBlock, &block);
206 if (err) return (err);
207
208 mdbp = (HFSMasterDirectoryBlock *) block.buffer;
209
210 if (mdbp->drSigWord == kHFSPlusSigWord || mdbp->drSigWord == kHFSXSigWord) {
211 vhp = (HFSPlusVolumeHeader *) block.buffer;
212
213 } else if (mdbp->drSigWord == kHFSSigWord) {
214
215 if (mdbp->drEmbedSigWord == kHFSPlusSigWord) {
216 UInt32 vhSector;
217 UInt32 blkSectors;
218
219 blkSectors = mdbp->drAlBlkSiz / 512;
220 vhSector = mdbp->drAlBlSt;
221 vhSector += blkSectors * mdbp->drEmbedExtent.startBlock;
222 vhSector += kIDSector;
223
224 embeddedOffset = (mdbp->drEmbedExtent.startBlock * mdbp->drAlBlkSiz) + (mdbp->drAlBlSt * Blk_Size);
225 if (debug)
226 plog("Embedded offset is %lld\n", embeddedOffset);
227
228 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
229 err = GetVolumeBlock(vcb, vhSector, kGetBlock, &block);
230 if (err) return (err);
231
232 vhp = (HFSPlusVolumeHeader *) block.buffer;
233 mdbp = (HFSMasterDirectoryBlock *) NULL;
234
235 }
236 }
237
238 if (vhp == NULL) {
239 result = paramErr;
240 goto out;
241 }
242 if ((err = ValidVolumeHeader(vhp)) != noErr) {
243 result = err;
244 goto out;
245 }
246
247 // journalInfoBlock is not automatically swapped
248 jiBlk = SW32(vhp->journalInfoBlock);
249 blockSize = vhp->blockSize;
250 (void)ReleaseVolumeBlock(vcb, &block, rbOptions);
251
252 if (jiBlk) {
253 int jfd = GPtr->DrvNum;
254 uint8_t block[blockSize];
255 ssize_t nread;
256
257 nread = pread(jfd, block, blockSize, (off_t)jiBlk * blockSize + embeddedOffset);
258 if (nread == blockSize) {
259 if (jibp)
260 memcpy(jibp, block, sizeof(JournalInfoBlock));
261 if (bsizep)
262 *bsizep = blockSize;
263 result = 0;
264 } else {
265 if (debug) {
266 plog("%s: Tried to read JIB, got %zd\n", __FUNCTION__, nread);
267 result = EINVAL;
268 }
269 }
270 }
271
272 out:
273 return (result);
274 }
275
276 /*
277 * Journal checksum calculation, taken directly from TN1150.
278 */
279 static int
280 calc_checksum(unsigned char *ptr, int len)
281 {
282 int i, cksum=0;
283
284 for(i=0; i < len; i++, ptr++) {
285 cksum = (cksum << 8) ^ (cksum + *ptr);
286 }
287
288 return (~cksum);
289 }
290
291 /*
292 * The journal_header structure is not defined in <hfs/hfs_format.h>;
293 * it's described in TN1150. It is on disk in the endian mode that was
294 * used to write it, so we may or may not need to swap the fields.
295 */
296 typedef struct journal_header {
297 UInt32 magic;
298 UInt32 endian;
299 UInt64 start;
300 UInt64 end;
301 UInt64 size;
302 UInt32 blhdr_size;
303 UInt32 checksum;
304 UInt32 jhdr_size;
305 UInt32 sequence_num;
306 } journal_header;
307
308 #define JOURNAL_HEADER_MAGIC 0x4a4e4c78
309 #define ENDIAN_MAGIC 0x12345678
310 #define JOURNAL_HEADER_CKSUM_SIZE (offsetof(struct journal_header, sequence_num))
311
312 /*
313 * Determine if a journal is empty.
314 * This code can use an in-filesystem, or external, journal.
315 * In general, it returns 0 if the journal exists, and appears to
316 * be non-empty (that is, start and end in the journal header are
317 * the same); it will return 1 if it exists and is empty, or if
318 * there was a problem getting the journal. (This behaviour was
319 * chosen because it mimics the existing behaviour of fsck_hfs,
320 * which has traditionally done nothing with the journal. Future
321 * versions may be more demanding.)
322 *
323 * <jp> is an OUT parameter: the contents of the structure it points
324 * to are filled in by this routine. (The reasoning for doing this
325 * is because this rountine has to open the journal info block, and read
326 * from the journal device, so putting this in another function was
327 * duplicative and error-prone. By making it a structure instead of
328 * discrete arguments, it can also be extended in the future if necessary.)
329 */
330 int
331 IsJournalEmpty(SGlobPtr GPtr, fsckJournalInfo_t *jp)
332 {
333 int retval = 1;
334 OSErr result;
335 OSErr err = 0;
336 JournalInfoBlock jib;
337 UInt32 bsize;
338
339 result = GetJournalInfoBlock(GPtr, &jib, &bsize);
340 if (result == 0) {
341 /* jib is not byte swapped */
342 /* If the journal needs to be initialized, it's empty. */
343 if ((SW32(jib.flags) & kJIJournalNeedInitMask) == 0) {
344 off_t hdrOffset = SW64(jib.offset);
345 struct journal_header *jhdr;
346 uint8_t block[bsize];
347 ssize_t nread;
348 int jfd = -1;
349
350 /* If it's an external journal, kJIJournalInSFMask will not be set */
351 if (SW32(jib.flags) & kJIJournalInFSMask) {
352 jfd = dup(GPtr->DrvNum);
353 jp->name = strdup(GPtr->deviceNode);
354 } else {
355 char **namePtr = jp ? &jp->name : NULL;
356 if (debug)
357 plog("External Journal device\n");
358 jfd = OpenDeviceByUUID(&jib.ext_jnl_uuid, namePtr);
359 }
360 if (jfd == -1) {
361 if (debug) {
362 plog("Unable to get journal file descriptor, journal flags = %#x\n", SW32(jib.flags));
363 }
364 goto out;
365 }
366 if (jp) {
367 jp->jnlfd = jfd;
368 jp->jnlOffset = SW64(jib.offset);
369 jp->jnlSize = SW64(jib.size);
370 }
371
372 nread = pread(jfd, block, bsize, hdrOffset);
373 if (nread == -1) {
374 if (debug) {
375 plog("Could not read journal from descriptor %d: %s", jfd, strerror(errno));
376 }
377 err = errno;
378 } else if (nread != bsize) {
379 if (debug) {
380 plog("Only read %zd bytes from journal (expected %zd)", nread, bsize);
381 err = EINVAL;
382 }
383 }
384 if (jp == NULL)
385 close(jfd);
386 /* We got the journal header, now we need to check it */
387 if (err == noErr) {
388 int swap = 0;
389 UInt32 cksum = 0;
390
391 jhdr = (struct journal_header*)block;
392
393 if (jhdr->magic == JOURNAL_HEADER_MAGIC ||
394 SW32(jhdr->magic) == JOURNAL_HEADER_MAGIC) {
395 if (jhdr->endian == ENDIAN_MAGIC)
396 swap = 0;
397 else if (SW32(jhdr->endian) == ENDIAN_MAGIC)
398 swap = 1;
399 else
400 swap = 2;
401
402 if (swap != 2) {
403 cksum = swap ? SW32(jhdr->checksum) : jhdr->checksum;
404 UInt32 calc_sum;
405 jhdr->checksum = 0;
406 /* Checksum calculation needs the checksum field to be zero. */
407 calc_sum = calc_checksum((unsigned char*)jhdr, JOURNAL_HEADER_CKSUM_SIZE);
408 /* But, for now, this is for debugging purposes only */
409 if (calc_sum != cksum) {
410 if (debug)
411 plog("Journal checksum doesn't match: orig %x != calc %x\n", cksum, calc_sum);
412 }
413 /* We have a journal, we got the header, now we check the start and end */
414 if (jhdr->start != jhdr->end) {
415 retval = 0;
416 if (debug)
417 plog("Non-empty journal: start = %lld, end = %lld\n",
418 swap ? SW64(jhdr->start) : jhdr->start,
419 swap ? SW64(jhdr->end) : jhdr->end);
420 }
421 }
422 }
423 }
424 }
425 }
426 out:
427 return retval;
428 }
429
430 /*
431 * The functions checks whether the volume is clean or dirty. It
432 * also marks the volume as clean/dirty depending on the type
433 * of operation specified. It modifies the volume header only
434 * if the old values are not same as the new values. If the volume
435 * header is updated, it also sets the last mounted version for HFS+.
436 *
437 * Input:
438 * GPtr - Pointer to scavenger global area
439 * operation - Type of operation to perform
440 * kCheckVolume, // check if volume is clean/dirty
441 * kMarkVolumeDirty, // mark the volume dirty
442 * kMarkVolumeClean // mark the volume clean
443 *
444 * Output:
445 * modified - true if the VH/MDB was modified, otherwise false.
446 * Return Value -
447 * -1 - if the volume is not an HFS/HFS+ volume
448 * 0 - if the volume was dirty or marked dirty
449 * 1 - if the volume was clean or marked clean
450 * If the operation requested was to mark the volume clean/dirty,
451 * the return value is dependent on type of operation (described above).
452 */
453 int CheckForClean(SGlobPtr GPtr, UInt8 operation, Boolean *modified)
454 {
455 enum { unknownVolume = -1, cleanUnmount = 1, dirtyUnmount = 0};
456 int result = unknownVolume;
457 Boolean update = false;
458 HFSMasterDirectoryBlock *mdbp;
459 HFSPlusVolumeHeader *vhp;
460 BlockDescriptor block;
461 ReleaseBlockOptions rbOptions;
462 UInt64 blockNum;
463 SVCB *vcb;
464
465 *modified = false;
466 vcb = GPtr->calculatedVCB;
467 block.buffer = NULL;
468 rbOptions = kReleaseBlock;
469
470 /* Get the block number for VH/MDB */
471 GetVolumeObjectBlockNum(&blockNum);
472 if (blockNum == 0) {
473 if (fsckGetVerbosity(GPtr->context) >= kDebugLog)
474 plog( "\t%s - unknown volume type \n", __FUNCTION__ );
475 goto ExitThisRoutine;
476 }
477
478 /* Get VH or MDB depending on the type of volume */
479 result = GetVolumeObjectPrimaryBlock(&block);
480 if (result) {
481 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
482 plog( "\t%s - could not get VHB/MDB at block %qd \n", __FUNCTION__, blockNum );
483 result = unknownVolume;
484 goto ExitThisRoutine;
485 }
486
487 result = cleanUnmount;
488
489 if (VolumeObjectIsHFSPlus()) {
490 vhp = (HFSPlusVolumeHeader *) block.buffer;
491
492 /* Check unmount bit and volume inconsistent bit */
493 if (((vhp->attributes & kHFSVolumeUnmountedMask) == 0) ||
494 (vhp->attributes & kHFSVolumeInconsistentMask))
495 result = dirtyUnmount;
496
497 /* Check last mounted version. If kFSKMountVersion, bad
498 * journal was encountered during mount. Force dirty volume.
499 */
500
501 if (vhp->lastMountedVersion == kFSKMountVersion) {
502 GPtr->JStat |= S_BadJournal;
503 RcdError (GPtr, E_BadJournal);
504 result = dirtyUnmount;
505 }
506
507 if (operation == kMarkVolumeDirty) {
508 /* Mark volume was not unmounted cleanly */
509 if (vhp->attributes & kHFSVolumeUnmountedMask) {
510 vhp->attributes &= ~kHFSVolumeUnmountedMask;
511 update = true;
512 }
513 /* Mark volume inconsistent */
514 if ((vhp->attributes & kHFSVolumeInconsistentMask) == 0) {
515 vhp->attributes |= kHFSVolumeInconsistentMask;
516 update = true;
517 }
518 } else if (operation == kMarkVolumeClean) {
519 /* Mark volume was unmounted cleanly */
520 if ((vhp->attributes & kHFSVolumeUnmountedMask) == 0) {
521 vhp->attributes |= kHFSVolumeUnmountedMask;
522 update = true;
523 }
524 /* Mark volume consistent */
525 if (vhp->attributes & kHFSVolumeInconsistentMask) {
526 vhp->attributes &= ~kHFSVolumeInconsistentMask;
527 update = true;
528 }
529 }
530
531 /* If any changes to VH, update the last mounted version */
532 if (update == true) {
533 vhp->lastMountedVersion = kFSCKMountVersion;
534 }
535 } else if (VolumeObjectIsHFS()) {
536 mdbp = (HFSMasterDirectoryBlock *) block.buffer;
537
538 /* Check unmount bit and volume inconsistent bit */
539 if (((mdbp->drAtrb & kHFSVolumeUnmountedMask) == 0) ||
540 (mdbp->drAtrb & kHFSVolumeInconsistentMask))
541 result = dirtyUnmount;
542
543 if (operation == kMarkVolumeDirty) {
544 /* Mark volume was not unmounted cleanly */
545 if (mdbp->drAtrb & kHFSVolumeUnmountedMask) {
546 mdbp->drAtrb &= ~kHFSVolumeUnmountedMask;
547 update = true;
548 }
549 /* Mark volume inconsistent */
550 if ((mdbp->drAtrb & kHFSVolumeInconsistentMask) == 0) {
551 mdbp->drAtrb |= kHFSVolumeInconsistentMask;
552 update = true;
553 }
554 } else if (operation == kMarkVolumeClean) {
555 /* Mark volume was unmounted cleanly */
556 if ((mdbp->drAtrb & kHFSVolumeUnmountedMask) == 0) {
557 mdbp->drAtrb |= kHFSVolumeUnmountedMask;
558 update = true;
559 }
560 /* Mark volume consistent */
561 if (mdbp->drAtrb & kHFSVolumeInconsistentMask) {
562 mdbp->drAtrb &= ~kHFSVolumeInconsistentMask;
563 update = true;
564 }
565 }
566 }
567
568 ExitThisRoutine:
569 if (update == true) {
570 *modified = true;
571 rbOptions = kForceWriteBlock;
572 /* Set appropriate return value */
573 if (operation == kMarkVolumeDirty) {
574 result = dirtyUnmount;
575 } else if (operation == kMarkVolumeClean) {
576 result = cleanUnmount;
577 }
578 }
579 if (block.buffer != NULL)
580 (void) ReleaseVolumeBlock(vcb, &block, rbOptions);
581
582 return (result);
583 }
584
585 /*------------------------------------------------------------------------------
586
587 Function: IVChk - (Initial Volume Check)
588
589 Function: Performs an initial check of the volume to be scavenged to confirm
590 that the volume can be accessed and that it is a HFS/HFS+ volume.
591
592 Input: GPtr - pointer to scavenger global area
593
594 Output: IVChk - function result:
595 0 = no error
596 n = error code
597 ------------------------------------------------------------------------------*/
598 #define kBitsPerSector 4096
599
600 OSErr IVChk( SGlobPtr GPtr )
601 {
602 OSErr err;
603 HFSMasterDirectoryBlock * myMDBPtr;
604 HFSPlusVolumeHeader * myVHBPtr;
605 UInt32 numABlks;
606 UInt32 minABlkSz;
607 UInt32 maxNumberOfAllocationBlocks;
608 UInt32 realAllocationBlockSize;
609 UInt32 realTotalBlocks;
610 UInt32 i;
611 BTreeControlBlock *btcb;
612 SVCB *vcb = GPtr->calculatedVCB;
613 VolumeObjectPtr myVOPtr;
614 UInt64 blockNum;
615 UInt64 totalSectors;
616 BlockDescriptor myBlockDescriptor;
617
618 // Set up
619 GPtr->TarID = AMDB_FNum; // target = alt MDB
620 GPtr->TarBlock = 0;
621 maxNumberOfAllocationBlocks = 0xFFFFFFFF;
622 realAllocationBlockSize = 0;
623 realTotalBlocks = 0;
624
625 myBlockDescriptor.buffer = NULL;
626 myVOPtr = GetVolumeObjectPtr( );
627
628 // check volume size
629 if ( myVOPtr->totalDeviceSectors < 3 ) {
630 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
631 plog("\tinvalid device information for volume - total sectors = %qd sector size = %d \n",
632 myVOPtr->totalDeviceSectors, myVOPtr->sectorSize);
633 return( 123 );
634 }
635
636 GetVolumeObjectBlockNum( &blockNum );
637 if ( blockNum == 0 || myVOPtr->volumeType == kUnknownVolumeType ) {
638 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
639 plog( "\t%s - unknown volume type \n", __FUNCTION__ );
640 err = R_BadSig; /* doesn't bear the HFS signature */
641 goto ReleaseAndBail;
642 }
643
644 // get Volume Header (HFS+) or Master Directory (HFS) block
645 err = GetVolumeObjectVHBorMDB( &myBlockDescriptor );
646 if ( err != noErr ) {
647 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
648 plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err );
649 goto ReleaseAndBail;
650 }
651 myMDBPtr = (HFSMasterDirectoryBlock *) myBlockDescriptor.buffer;
652
653 // if this is an HFS (kHFSVolumeType) volume and the MDB indicates this
654 // might contain an embedded HFS+ volume then we need to scan
655 // for an embedded HFS+ volume. I'm told there were some old problems
656 // where we could lose track of the embedded volume.
657 if ( VolumeObjectIsHFS( ) &&
658 (myMDBPtr->drEmbedSigWord != 0 ||
659 myMDBPtr->drEmbedExtent.blockCount != 0 ||
660 myMDBPtr->drEmbedExtent.startBlock != 0) ) {
661
662 err = ScavengeVolumeType( GPtr, myMDBPtr, &myVOPtr->volumeType );
663 if ( err == E_InvalidMDBdrAlBlSt )
664 err = RcdMDBEmbededVolDescriptionErr( GPtr, E_InvalidMDBdrAlBlSt, myMDBPtr );
665
666 if ( VolumeObjectIsEmbeddedHFSPlus( ) ) {
667 // we changed volume types so let's get the VHB
668 (void) ReleaseVolumeBlock( vcb, &myBlockDescriptor, kReleaseBlock );
669 myBlockDescriptor.buffer = NULL;
670 myMDBPtr = NULL;
671 err = GetVolumeObjectVHB( &myBlockDescriptor );
672 if ( err != noErr ) {
673 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
674 plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err );
675 WriteError( GPtr, E_InvalidVolumeHeader, 1, 0 );
676 err = E_InvalidVolumeHeader;
677 goto ReleaseAndBail;
678 }
679
680 GetVolumeObjectBlockNum( &blockNum ); // get the new Volume header block number
681 }
682 else {
683 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
684 plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err );
685 WriteError( GPtr, E_InvalidVolumeHeader, 1, 0 );
686 err = E_InvalidVolumeHeader;
687 goto ReleaseAndBail;
688 }
689 }
690
691 totalSectors = ( VolumeObjectIsEmbeddedHFSPlus( ) ) ? myVOPtr->totalEmbeddedSectors : myVOPtr->totalDeviceSectors;
692
693 // indicate what type of volume we are dealing with
694 if ( VolumeObjectIsHFSPlus( ) ) {
695
696 myVHBPtr = (HFSPlusVolumeHeader *) myBlockDescriptor.buffer;
697 if (myVHBPtr->attributes & kHFSVolumeJournaledMask) {
698 fsckPrint(GPtr->context, hfsJournalVolCheck);
699 } else {
700 fsckPrint(GPtr->context, hfsCheckNoJnl);
701 }
702 GPtr->numExtents = kHFSPlusExtentDensity;
703 vcb->vcbSignature = kHFSPlusSigWord;
704
705 // Further populate the VCB with VolumeHeader info
706 vcb->vcbAlBlSt = myVOPtr->embeddedOffset / 512;
707 vcb->vcbEmbeddedOffset = myVOPtr->embeddedOffset;
708 realAllocationBlockSize = myVHBPtr->blockSize;
709 realTotalBlocks = myVHBPtr->totalBlocks;
710 vcb->vcbNextCatalogID = myVHBPtr->nextCatalogID;
711 vcb->vcbCreateDate = myVHBPtr->createDate;
712 vcb->vcbAttributes = myVHBPtr->attributes & kHFSCatalogNodeIDsReused;
713
714 if ( myVHBPtr->attributesFile.totalBlocks == 0 )
715 vcb->vcbAttributesFile = NULL; /* XXX memory leak ? */
716
717 // Make sure the Extents B-Tree is set to use 16-bit key lengths.
718 // We access it before completely setting up the control block.
719 btcb = (BTreeControlBlock *) vcb->vcbExtentsFile->fcbBtree;
720 btcb->attributes |= kBTBigKeysMask;
721
722 // catch the case where the volume allocation block count is greater than
723 // maximum number of device allocation blocks. - bug 2916021
724 numABlks = myVOPtr->totalDeviceSectors / ( myVHBPtr->blockSize / Blk_Size );
725 if ( myVHBPtr->totalBlocks > numABlks ) {
726 RcdError( GPtr, E_NABlks );
727 err = E_NABlks;
728 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) {
729 plog( "\t%s - volume header total allocation blocks is greater than device size \n", __FUNCTION__ );
730 plog( "\tvolume allocation block count %d device allocation block count %d \n",
731 myVHBPtr->totalBlocks, numABlks );
732 }
733 goto ReleaseAndBail;
734 }
735 }
736 else if ( VolumeObjectIsHFS( ) ) {
737
738 // fsckPrint(GPtr->context, fsckCheckingVolume);
739 fsckPrint(GPtr->context, hfsCheckHFS);
740
741 GPtr->numExtents = kHFSExtentDensity;
742 vcb->vcbSignature = myMDBPtr->drSigWord;
743 maxNumberOfAllocationBlocks = 0xFFFF;
744 // set up next file ID, CheckBTreeKey makse sure we are under this value
745 vcb->vcbNextCatalogID = myMDBPtr->drNxtCNID;
746 vcb->vcbCreateDate = myMDBPtr->drCrDate;
747
748 realAllocationBlockSize = myMDBPtr->drAlBlkSiz;
749 realTotalBlocks = myMDBPtr->drNmAlBlks;
750 }
751
752 GPtr->TarBlock = blockNum; // target block
753
754 // verify volume allocation info
755 // Note: i is the number of sectors per allocation block
756 numABlks = totalSectors;
757 minABlkSz = Blk_Size; // init minimum ablock size
758 // loop while #ablocks won't fit
759 for( i = 2; numABlks > maxNumberOfAllocationBlocks; i++ ) {
760 minABlkSz = i * Blk_Size; // jack up minimum
761 numABlks = totalSectors / i; // recompute #ablocks, assuming this size
762 }
763
764 vcb->vcbBlockSize = realAllocationBlockSize;
765 numABlks = totalSectors / ( realAllocationBlockSize / Blk_Size );
766 if ( VolumeObjectIsHFSPlus( ) ) {
767 // HFS Plus allocation block size must be power of 2
768 if ( (realAllocationBlockSize < minABlkSz) ||
769 (realAllocationBlockSize & (realAllocationBlockSize - 1)) != 0 )
770 realAllocationBlockSize = 0;
771 }
772 else {
773 if ( (realAllocationBlockSize < minABlkSz) ||
774 (realAllocationBlockSize > Max_ABSiz) ||
775 ((realAllocationBlockSize % Blk_Size) != 0) )
776 realAllocationBlockSize = 0;
777 }
778
779 if ( realAllocationBlockSize == 0 ) {
780 RcdError( GPtr, E_ABlkSz );
781 err = E_ABlkSz; // bad allocation block size
782 goto ReleaseAndBail;
783 }
784
785 vcb->vcbTotalBlocks = realTotalBlocks;
786 vcb->vcbFreeBlocks = 0;
787
788 // Only do these tests on HFS volumes, since they are either
789 // or, getting the VolumeHeader would have already failed.
790 if ( VolumeObjectIsHFS( ) ) {
791 UInt32 bitMapSizeInSectors;
792
793 // Calculate the volume bitmap size
794 bitMapSizeInSectors = ( numABlks + kBitsPerSector - 1 ) / kBitsPerSector; // VBM size in blocks
795
796 //¥¥ Calculate the validaty of HFS Allocation blocks, I think realTotalBlocks == numABlks
797 numABlks = (totalSectors - 3 - bitMapSizeInSectors) / (realAllocationBlockSize / Blk_Size); // actual # of alloc blks
798
799 if ( realTotalBlocks > numABlks ) {
800 RcdError( GPtr, E_NABlks );
801 err = E_NABlks; // invalid number of allocation blocks
802 goto ReleaseAndBail;
803 }
804
805 if ( myMDBPtr->drVBMSt <= MDB_BlkN ) {
806 RcdError(GPtr,E_VBMSt);
807 err = E_VBMSt; // invalid VBM start block
808 goto ReleaseAndBail;
809 }
810 vcb->vcbVBMSt = myMDBPtr->drVBMSt;
811
812 if (myMDBPtr->drAlBlSt < (myMDBPtr->drVBMSt + bitMapSizeInSectors)) {
813 RcdError(GPtr,E_ABlkSt);
814 err = E_ABlkSt; // invalid starting alloc block
815 goto ReleaseAndBail;
816 }
817 vcb->vcbAlBlSt = myMDBPtr->drAlBlSt;
818 }
819
820 ReleaseAndBail:
821 if (myBlockDescriptor.buffer != NULL)
822 (void) ReleaseVolumeBlock(vcb, &myBlockDescriptor, kReleaseBlock);
823
824 return( err );
825 }
826
827
828 static OSErr ScavengeVolumeType( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb, UInt32 *volumeType )
829 {
830 UInt64 vHSector;
831 UInt64 startSector;
832 UInt64 altVHSector;
833 UInt64 hfsPlusSectors = 0;
834 UInt32 sectorsPerBlock;
835 UInt32 numSectorsToSearch;
836 OSErr err;
837 HFSPlusVolumeHeader *volumeHeader;
838 HFSExtentDescriptor embededExtent;
839 SVCB *calculatedVCB = GPtr->calculatedVCB;
840 VolumeObjectPtr myVOPtr;
841 UInt16 embedSigWord = mdb->drEmbedSigWord;
842 BlockDescriptor block;
843
844 /*
845 * If all of the embedded volume information is zero, then assume
846 * this really is a plain HFS disk like it says. Otherwise, if
847 * you reinitialize a large HFS Plus volume as HFS, the original
848 * embedded volume's volume header and alternate volume header will
849 * still be there, and we'll try to repair the embedded volume.
850 */
851 if (embedSigWord == 0 &&
852 mdb->drEmbedExtent.blockCount == 0 &&
853 mdb->drEmbedExtent.startBlock == 0)
854 {
855 *volumeType = kHFSVolumeType;
856 return noErr;
857 }
858
859 myVOPtr = GetVolumeObjectPtr( );
860 *volumeType = kEmbededHFSPlusVolumeType; // Assume HFS+
861
862 //
863 // First see if it is an HFS+ volume and the relevent structures look OK
864 //
865 if ( embedSigWord == kHFSPlusSigWord )
866 {
867 /* look for primary volume header */
868 vHSector = (UInt64)mdb->drAlBlSt +
869 ((UInt64)(mdb->drAlBlkSiz / Blk_Size) * (UInt64)mdb->drEmbedExtent.startBlock) + 2;
870
871 err = GetVolumeBlock(calculatedVCB, vHSector, kGetBlock, &block);
872 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
873 if ( err != noErr ) goto AssumeHFS;
874
875 myVOPtr->primaryVHB = vHSector;
876 err = ValidVolumeHeader( volumeHeader );
877 (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock);
878 if ( err == noErr ) {
879 myVOPtr->flags |= kVO_PriVHBOK;
880 return( noErr );
881 }
882 }
883
884 sectorsPerBlock = mdb->drAlBlkSiz / Blk_Size;
885
886 // Search the end of the disk to see if a Volume Header is present at all
887 if ( embedSigWord != kHFSPlusSigWord )
888 {
889 numSectorsToSearch = mdb->drAlBlkSiz / Blk_Size;
890 startSector = myVOPtr->totalDeviceSectors - 4 - numSectorsToSearch;
891
892 err = SeekVolumeHeader( GPtr, startSector, numSectorsToSearch, &altVHSector );
893 if ( err != noErr ) goto AssumeHFS;
894
895 // We found the Alt VH, so this must be a damaged embeded HFS+ volume
896 // Now Scavenge for the Primary VolumeHeader
897 myVOPtr->alternateVHB = altVHSector;
898 myVOPtr->flags |= kVO_AltVHBOK;
899 startSector = mdb->drAlBlSt + (4 * sectorsPerBlock); // Start looking at 4th HFS allocation block
900 numSectorsToSearch = 10 * sectorsPerBlock; // search for VH in next 10 allocation blocks
901
902 err = SeekVolumeHeader( GPtr, startSector, numSectorsToSearch, &vHSector );
903 if ( err != noErr ) goto AssumeHFS;
904
905 myVOPtr->primaryVHB = vHSector;
906 myVOPtr->flags |= kVO_PriVHBOK;
907 hfsPlusSectors = altVHSector - vHSector + 1 + 2 + 1; // numSectors + BB + end
908
909 // Fix the embeded extent
910 embededExtent.blockCount = hfsPlusSectors / sectorsPerBlock;
911 embededExtent.startBlock = (vHSector - 2 - mdb->drAlBlSt ) / sectorsPerBlock;
912 embedSigWord = kHFSPlusSigWord;
913
914 myVOPtr->embeddedOffset =
915 (embededExtent.startBlock * mdb->drAlBlkSiz) + (mdb->drAlBlSt * Blk_Size);
916 }
917 else
918 {
919 embedSigWord = mdb->drEmbedSigWord;
920 embededExtent.blockCount = mdb->drEmbedExtent.blockCount;
921 embededExtent.startBlock = mdb->drEmbedExtent.startBlock;
922 }
923
924 if ( embedSigWord == kHFSPlusSigWord )
925 {
926 startSector = 2 + mdb->drAlBlSt +
927 ((UInt64)embededExtent.startBlock * (mdb->drAlBlkSiz / Blk_Size));
928
929 err = SeekVolumeHeader( GPtr, startSector, mdb->drAlBlkSiz / Blk_Size, &vHSector );
930 if ( err != noErr ) goto AssumeHFS;
931
932 // Now replace the bad fields and mark the error
933 mdb->drEmbedExtent.blockCount = embededExtent.blockCount;
934 mdb->drEmbedExtent.startBlock = embededExtent.startBlock;
935 mdb->drEmbedSigWord = kHFSPlusSigWord;
936 mdb->drAlBlSt += vHSector - startSector; // Fix the bad field
937 myVOPtr->totalEmbeddedSectors = (mdb->drAlBlkSiz / Blk_Size) * mdb->drEmbedExtent.blockCount;
938 myVOPtr->embeddedOffset =
939 (mdb->drEmbedExtent.startBlock * mdb->drAlBlkSiz) + (mdb->drAlBlSt * Blk_Size);
940 myVOPtr->primaryVHB = vHSector;
941 myVOPtr->flags |= kVO_PriVHBOK;
942
943 GPtr->VIStat = GPtr->VIStat | S_MDB; // write out our MDB
944 return( E_InvalidMDBdrAlBlSt );
945 }
946
947 AssumeHFS:
948 *volumeType = kHFSVolumeType;
949 return( noErr );
950
951 } /* ScavengeVolumeType */
952
953
954 static OSErr SeekVolumeHeader( SGlobPtr GPtr, UInt64 startSector, UInt32 numSectors, UInt64 *vHSector )
955 {
956 OSErr err;
957 HFSPlusVolumeHeader *volumeHeader;
958 SVCB *calculatedVCB = GPtr->calculatedVCB;
959 BlockDescriptor block;
960
961 for ( *vHSector = startSector ; *vHSector < startSector + numSectors ; (*vHSector)++ )
962 {
963 err = GetVolumeBlock(calculatedVCB, *vHSector, kGetBlock, &block);
964 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
965 if ( err != noErr ) return( err );
966
967 err = ValidVolumeHeader(volumeHeader);
968
969 (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock);
970 if ( err == noErr )
971 return( noErr );
972 }
973
974 return( fnfErr );
975 }
976
977
978 #if 0 // not used at this time
979 static OSErr CheckWrapperExtents( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb )
980 {
981 OSErr err = noErr;
982
983 // See if Norton Disk Doctor 2.0 corrupted the catalog's first extent
984 if ( mdb->drCTExtRec[0].startBlock >= mdb->drEmbedExtent.startBlock)
985 {
986 // Fix the field in the in-memory copy, and record the error
987 mdb->drCTExtRec[0].startBlock = mdb->drXTExtRec[0].startBlock + mdb->drXTExtRec[0].blockCount;
988 GPtr->VIStat = GPtr->VIStat | S_MDB; // write out our MDB
989 err = RcdInvalidWrapperExtents( GPtr, E_InvalidWrapperExtents );
990 }
991
992 return err;
993 }
994 #endif
995
996 /*------------------------------------------------------------------------------
997
998 Function: CreateExtentsBTreeControlBlock
999
1000 Function: Create the calculated ExtentsBTree Control Block
1001
1002 Input: GPtr - pointer to scavenger global area
1003
1004 Output: - 0 = no error
1005 n = error code
1006 ------------------------------------------------------------------------------*/
1007
1008 OSErr CreateExtentsBTreeControlBlock( SGlobPtr GPtr )
1009 {
1010 OSErr err;
1011 SInt32 size;
1012 UInt32 numABlks;
1013 BTHeaderRec header;
1014 BTreeControlBlock * btcb;
1015 SVCB * vcb;
1016 BlockDescriptor block;
1017 Boolean isHFSPlus;
1018
1019 // Set up
1020 isHFSPlus = VolumeObjectIsHFSPlus( );
1021 GPtr->TarID = kHFSExtentsFileID; // target = extent file
1022 GPtr->TarBlock = kHeaderNodeNum; // target block = header node
1023 vcb = GPtr->calculatedVCB;
1024 btcb = GPtr->calculatedExtentsBTCB;
1025 block.buffer = NULL;
1026
1027 // get Volume Header (HFS+) or Master Directory (HFS) block
1028 err = GetVolumeObjectVHBorMDB( &block );
1029 if (err) goto exit;
1030 //
1031 // check out allocation info for the Extents File
1032 //
1033 if (isHFSPlus)
1034 {
1035 HFSPlusVolumeHeader *volumeHeader;
1036
1037 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
1038
1039 CopyMemory(volumeHeader->extentsFile.extents, GPtr->calculatedExtentsFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1040
1041 err = CheckFileExtents( GPtr, kHFSExtentsFileID, kDataFork, NULL, (void *)GPtr->calculatedExtentsFCB->fcbExtents32, &numABlks); // check out extent info
1042
1043 if (err) goto exit;
1044
1045 if ( volumeHeader->extentsFile.totalBlocks != numABlks ) // check out the PEOF
1046 {
1047 RcdError( GPtr, E_ExtPEOF );
1048 err = E_ExtPEOF;
1049 if (debug)
1050 plog("Extents File totalBlocks = %u, numABlks = %u\n", volumeHeader->extentsFile.totalBlocks, numABlks);
1051 goto exit;
1052 }
1053 else
1054 {
1055 GPtr->calculatedExtentsFCB->fcbLogicalSize = volumeHeader->extentsFile.logicalSize; // Set Extents tree's LEOF
1056 GPtr->calculatedExtentsFCB->fcbPhysicalSize = (UInt64)volumeHeader->extentsFile.totalBlocks *
1057 (UInt64)volumeHeader->blockSize; // Set Extents tree's PEOF
1058 }
1059
1060 //
1061 // Set up the minimal BTreeControlBlock structure
1062 //
1063
1064 // Read the BTreeHeader from disk & also validate it's node size.
1065 err = GetBTreeHeader(GPtr, GPtr->calculatedExtentsFCB, &header);
1066 if (err) goto exit;
1067
1068 btcb->maxKeyLength = kHFSPlusExtentKeyMaximumLength; // max key length
1069 btcb->keyCompareProc = (void *)CompareExtentKeysPlus;
1070 btcb->attributes |=kBTBigKeysMask; // HFS+ Extent files have 16-bit key length
1071 btcb->leafRecords = header.leafRecords;
1072 btcb->treeDepth = header.treeDepth;
1073 btcb->rootNode = header.rootNode;
1074 btcb->firstLeafNode = header.firstLeafNode;
1075 btcb->lastLeafNode = header.lastLeafNode;
1076
1077 btcb->nodeSize = header.nodeSize;
1078 btcb->totalNodes = ( GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize );
1079 btcb->freeNodes = btcb->totalNodes; // start with everything free
1080
1081 // Make sure the header nodes size field is correct by looking at the 1st record offset
1082 err = CheckNodesFirstOffset( GPtr, btcb );
1083 if ( (err != noErr) && (btcb->nodeSize != 1024) ) // default HFS+ Extents node size is 1024
1084 {
1085 btcb->nodeSize = 1024;
1086 btcb->totalNodes = ( GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize );
1087 btcb->freeNodes = btcb->totalNodes; // start with everything free
1088
1089 err = CheckNodesFirstOffset( GPtr, btcb );
1090 if (err) goto exit;
1091
1092 GPtr->EBTStat |= S_BTH; // update the Btree header
1093 }
1094 }
1095 else // Classic HFS
1096 {
1097 HFSMasterDirectoryBlock *alternateMDB;
1098
1099 alternateMDB = (HFSMasterDirectoryBlock *) block.buffer;
1100
1101 CopyMemory(alternateMDB->drXTExtRec, GPtr->calculatedExtentsFCB->fcbExtents16, sizeof(HFSExtentRecord) );
1102 // ExtDataRecToExtents(alternateMDB->drXTExtRec, GPtr->calculatedExtentsFCB->fcbExtents);
1103
1104
1105 err = CheckFileExtents( GPtr, kHFSExtentsFileID, kDataFork, NULL, (void *)GPtr->calculatedExtentsFCB->fcbExtents16, &numABlks); /* check out extent info */
1106 if (err) goto exit;
1107
1108 if (alternateMDB->drXTFlSize != ((UInt64)numABlks * (UInt64)GPtr->calculatedVCB->vcbBlockSize))// check out the PEOF
1109 {
1110 RcdError(GPtr,E_ExtPEOF);
1111 err = E_ExtPEOF;
1112 if (debug)
1113 plog("Alternate MDB drXTFlSize = %llu, should be %llu\n", (long long)alternateMDB->drXTFlSize, (long long)numABlks * (UInt64)GPtr->calculatedVCB->vcbBlockSize);
1114 goto exit;
1115 }
1116 else
1117 {
1118 GPtr->calculatedExtentsFCB->fcbPhysicalSize = alternateMDB->drXTFlSize; // set up PEOF and EOF in FCB
1119 GPtr->calculatedExtentsFCB->fcbLogicalSize = GPtr->calculatedExtentsFCB->fcbPhysicalSize;
1120 }
1121
1122 //
1123 // Set up the minimal BTreeControlBlock structure
1124 //
1125
1126 // Read the BTreeHeader from disk & also validate it's node size.
1127 err = GetBTreeHeader(GPtr, GPtr->calculatedExtentsFCB, &header);
1128 if (err) goto exit;
1129
1130 btcb->maxKeyLength = kHFSExtentKeyMaximumLength; // max key length
1131 btcb->keyCompareProc = (void *)CompareExtentKeys;
1132 btcb->leafRecords = header.leafRecords;
1133 btcb->treeDepth = header.treeDepth;
1134 btcb->rootNode = header.rootNode;
1135 btcb->firstLeafNode = header.firstLeafNode;
1136 btcb->lastLeafNode = header.lastLeafNode;
1137
1138 btcb->nodeSize = header.nodeSize;
1139 btcb->totalNodes = (GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize );
1140 btcb->freeNodes = btcb->totalNodes; // start with everything free
1141
1142 // Make sure the header nodes size field is correct by looking at the 1st record offset
1143 err = CheckNodesFirstOffset( GPtr, btcb );
1144 if (err) goto exit;
1145 }
1146
1147 if ( header.btreeType != kHFSBTreeType )
1148 {
1149 GPtr->EBTStat |= S_ReservedBTH; // Repair reserved fields in Btree header
1150 }
1151
1152 //
1153 // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes.
1154 //
1155 btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions
1156 if ( btcb->refCon == nil ) {
1157 err = R_NoMem;
1158 goto exit;
1159 }
1160 size = (btcb->totalNodes + 7) / 8; // size of BTree bit map
1161 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap
1162 if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil )
1163 {
1164 err = R_NoMem;
1165 goto exit;
1166 }
1167
1168 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is
1169 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes;// keep track of real free nodes for progress
1170 exit:
1171 if ( block.buffer != NULL )
1172 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1173
1174 return (err);
1175 }
1176
1177
1178
1179 /*------------------------------------------------------------------------------
1180
1181 Function: CheckNodesFirstOffset
1182
1183 Function: Minimal check verifies that the 1st offset is within bounds. If it's not
1184 the nodeSize may be wrong. In the future this routine could be modified
1185 to try different size values until one fits.
1186
1187 ------------------------------------------------------------------------------*/
1188 #define GetRecordOffset(btreePtr,node,index) (*(short *) ((UInt8 *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize))
1189 static OSErr CheckNodesFirstOffset( SGlobPtr GPtr, BTreeControlBlock *btcb )
1190 {
1191 NodeRec nodeRec;
1192 UInt16 offset;
1193 OSErr err;
1194
1195 (void) SetFileBlockSize(btcb->fcbPtr, btcb->nodeSize);
1196
1197 err = GetNode( btcb, kHeaderNodeNum, &nodeRec );
1198
1199 if ( err == noErr )
1200 {
1201 offset = GetRecordOffset( btcb, (NodeDescPtr)nodeRec.buffer, 0 );
1202 if ( (offset < sizeof (BTNodeDescriptor)) || // offset < minimum
1203 (offset & 1) || // offset is odd
1204 (offset >= btcb->nodeSize) ) // offset beyond end of node
1205 {
1206 if (debug) fprintf(stderr, "%s(%d): offset is wrong\n", __FUNCTION__, __LINE__);
1207 err = fsBTInvalidNodeErr;
1208 }
1209 }
1210
1211 if ( err != noErr )
1212 RcdError( GPtr, E_InvalidNodeSize );
1213
1214 (void) ReleaseNode(btcb, &nodeRec);
1215
1216 return( err );
1217 }
1218
1219
1220
1221 /*------------------------------------------------------------------------------
1222
1223 Function: ExtBTChk - (Extent BTree Check)
1224
1225 Function: Verifies the extent BTree structure.
1226
1227 Input: GPtr - pointer to scavenger global area
1228
1229 Output: ExtBTChk - function result:
1230 0 = no error
1231 n = error code
1232 ------------------------------------------------------------------------------*/
1233
1234 OSErr ExtBTChk( SGlobPtr GPtr )
1235 {
1236 OSErr err;
1237
1238 // Set up
1239 GPtr->TarID = kHFSExtentsFileID; // target = extent file
1240 GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB
1241
1242 //
1243 // check out the BTree structure
1244 //
1245
1246 err = BTCheck(GPtr, kCalculatedExtentRefNum, NULL);
1247 ReturnIfError( err ); // invalid extent file BTree
1248
1249 //
1250 // check out the allocation map structure
1251 //
1252
1253 err = BTMapChk( GPtr, kCalculatedExtentRefNum );
1254 ReturnIfError( err ); // Invalid extent BTree map
1255
1256 //
1257 // Make sure unused nodes in the B-tree are zero filled.
1258 //
1259 err = BTCheckUnusedNodes(GPtr, kCalculatedExtentRefNum, &GPtr->EBTStat);
1260 ReturnIfError( err );
1261
1262 //
1263 // compare BTree header record on disk with scavenger's BTree header record
1264 //
1265
1266 err = CmpBTH( GPtr, kCalculatedExtentRefNum );
1267 ReturnIfError( err );
1268
1269 //
1270 // compare BTree map on disk with scavenger's BTree map
1271 //
1272
1273 err = CmpBTM( GPtr, kCalculatedExtentRefNum );
1274
1275 return( err );
1276 }
1277
1278
1279
1280 /*------------------------------------------------------------------------------
1281
1282 Function: BadBlockFileExtentCheck - (Check extents of bad block file)
1283
1284 Function:
1285 Verifies the extents of bad block file (kHFSBadBlockFileID) that
1286 exist in extents Btree.
1287
1288 Note that the extents for other file IDs < kHFSFirstUserCatalogNodeID
1289 are being taken care in the following functions:
1290
1291 kHFSExtentsFileID - CreateExtentsBTreeControlBlock
1292 kHFSCatalogFileID - CreateCatalogBTreeControlBlock
1293 kHFSAllocationFileID - CreateExtendedAllocationsFCB
1294 kHFSStartupFileID - CreateExtendedAllocationsFCB
1295 kHFSAttributesFileID - CreateAttributesBTreeControlBlock
1296
1297 Input: GPtr - pointer to scavenger global area
1298
1299 Output: BadBlockFileExtentCheck - function result:
1300 0 = no error
1301 +n = error code
1302 ------------------------------------------------------------------------------*/
1303
1304 OSErr BadBlockFileExtentCheck( SGlobPtr GPtr )
1305 {
1306 UInt32 attributes;
1307 void *p;
1308 OSErr result;
1309 SVCB *vcb;
1310 Boolean isHFSPlus;
1311 BlockDescriptor block;
1312
1313 isHFSPlus = VolumeObjectIsHFSPlus( );
1314 block.buffer = NULL;
1315
1316 //
1317 // process the bad block extents (created by the disk init pkg to hide badspots)
1318 //
1319 vcb = GPtr->calculatedVCB;
1320
1321 result = GetVolumeObjectVHBorMDB( &block );
1322 if ( result != noErr ) goto ExitThisRoutine; // error, could't get it
1323
1324 p = (void *) block.buffer;
1325 attributes = isHFSPlus == true ? ((HFSPlusVolumeHeader*)p)->attributes : ((HFSMasterDirectoryBlock*)p)->drAtrb;
1326
1327 //¥¥ Does HFS+ honnor the same mask?
1328 if ( attributes & kHFSVolumeSparedBlocksMask ) // if any badspots
1329 {
1330 HFSPlusExtentRecord zeroXdr; // dummy passed to 'CheckFileExtents'
1331 UInt32 numBadBlocks;
1332
1333 ClearMemory ( zeroXdr, sizeof( HFSPlusExtentRecord ) );
1334 result = CheckFileExtents( GPtr, kHFSBadBlockFileID, kDataFork, NULL, (void *)zeroXdr, &numBadBlocks); // check and mark bitmap
1335 }
1336
1337 ExitThisRoutine:
1338 if ( block.buffer != NULL )
1339 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1340
1341 return (result);
1342 }
1343
1344
1345 /*------------------------------------------------------------------------------
1346
1347 Function: CreateCatalogBTreeControlBlock
1348
1349 Function: Create the calculated CatalogBTree Control Block
1350
1351 Input: GPtr - pointer to scavenger global area
1352
1353 Output: - 0 = no error
1354 n = error code
1355 ------------------------------------------------------------------------------*/
1356 OSErr CreateCatalogBTreeControlBlock( SGlobPtr GPtr )
1357 {
1358 OSErr err;
1359 SInt32 size;
1360 UInt32 numABlks;
1361 BTHeaderRec header;
1362 BTreeControlBlock * btcb;
1363 SVCB * vcb;
1364 BlockDescriptor block;
1365 Boolean isHFSPlus;
1366
1367 // Set up
1368 isHFSPlus = VolumeObjectIsHFSPlus( );
1369 GPtr->TarID = kHFSCatalogFileID;
1370 GPtr->TarBlock = kHeaderNodeNum;
1371 vcb = GPtr->calculatedVCB;
1372 btcb = GPtr->calculatedCatalogBTCB;
1373 block.buffer = NULL;
1374
1375 err = GetVolumeObjectVHBorMDB( &block );
1376 if ( err != noErr ) goto ExitThisRoutine; // error, could't get it
1377 //
1378 // check out allocation info for the Catalog File
1379 //
1380 if (isHFSPlus)
1381 {
1382 HFSPlusVolumeHeader * volumeHeader;
1383
1384 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
1385
1386 CopyMemory(volumeHeader->catalogFile.extents, GPtr->calculatedCatalogFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1387
1388 err = CheckFileExtents( GPtr, kHFSCatalogFileID, kDataFork, NULL, (void *)GPtr->calculatedCatalogFCB->fcbExtents32, &numABlks);
1389 if (err) goto exit;
1390
1391 if ( volumeHeader->catalogFile.totalBlocks != numABlks )
1392 {
1393 RcdError( GPtr, E_CatPEOF );
1394 err = E_CatPEOF;
1395 goto exit;
1396 }
1397 else
1398 {
1399 GPtr->calculatedCatalogFCB->fcbLogicalSize = volumeHeader->catalogFile.logicalSize;
1400 GPtr->calculatedCatalogFCB->fcbPhysicalSize = (UInt64)volumeHeader->catalogFile.totalBlocks *
1401 (UInt64)volumeHeader->blockSize;
1402 }
1403
1404 //
1405 // Set up the minimal BTreeControlBlock structure
1406 //
1407
1408 // read the BTreeHeader from disk & also validate it's node size.
1409 err = GetBTreeHeader(GPtr, GPtr->calculatedCatalogFCB, &header);
1410 if (err) goto exit;
1411
1412 btcb->maxKeyLength = kHFSPlusCatalogKeyMaximumLength; // max key length
1413
1414 /*
1415 * Figure out the type of key string compare
1416 * (case-insensitive or case-sensitive)
1417 *
1418 * To do: should enforce an "HX" volume is require for kHFSBinaryCompare.
1419 */
1420 if (header.keyCompareType == kHFSBinaryCompare)
1421 {
1422 btcb->keyCompareProc = (void *)CaseSensitiveCatalogKeyCompare;
1423 fsckPrint(GPtr->context, hfsCaseSensitive);
1424 }
1425 else
1426 {
1427 btcb->keyCompareProc = (void *)CompareExtendedCatalogKeys;
1428 }
1429 btcb->keyCompareType = header.keyCompareType;
1430 btcb->leafRecords = header.leafRecords;
1431 btcb->nodeSize = header.nodeSize;
1432 btcb->totalNodes = ( GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize );
1433 btcb->freeNodes = btcb->totalNodes; // start with everything free
1434 btcb->attributes |=(kBTBigKeysMask + kBTVariableIndexKeysMask); // HFS+ Catalog files have large, variable-sized keys
1435
1436 btcb->treeDepth = header.treeDepth;
1437 btcb->rootNode = header.rootNode;
1438 btcb->firstLeafNode = header.firstLeafNode;
1439 btcb->lastLeafNode = header.lastLeafNode;
1440
1441
1442 // Make sure the header nodes size field is correct by looking at the 1st record offset
1443 err = CheckNodesFirstOffset( GPtr, btcb );
1444 if ( (err != noErr) && (btcb->nodeSize != 4096) ) // default HFS+ Catalog node size is 4096
1445 {
1446 btcb->nodeSize = 4096;
1447 btcb->totalNodes = ( GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize );
1448 btcb->freeNodes = btcb->totalNodes; // start with everything free
1449
1450 err = CheckNodesFirstOffset( GPtr, btcb );
1451 if (err) goto exit;
1452
1453 GPtr->CBTStat |= S_BTH; // update the Btree header
1454 }
1455 }
1456 else // HFS
1457 {
1458 HFSMasterDirectoryBlock *alternateMDB;
1459
1460 alternateMDB = (HFSMasterDirectoryBlock *) block.buffer;
1461
1462 CopyMemory( alternateMDB->drCTExtRec, GPtr->calculatedCatalogFCB->fcbExtents16, sizeof(HFSExtentRecord) );
1463 // ExtDataRecToExtents(alternateMDB->drCTExtRec, GPtr->calculatedCatalogFCB->fcbExtents);
1464
1465 err = CheckFileExtents( GPtr, kHFSCatalogFileID, kDataFork, NULL, (void *)GPtr->calculatedCatalogFCB->fcbExtents16, &numABlks); /* check out extent info */
1466 if (err) goto exit;
1467
1468 if (alternateMDB->drCTFlSize != ((UInt64)numABlks * (UInt64)vcb->vcbBlockSize)) // check out the PEOF
1469 {
1470 RcdError( GPtr, E_CatPEOF );
1471 err = E_CatPEOF;
1472 goto exit;
1473 }
1474 else
1475 {
1476 GPtr->calculatedCatalogFCB->fcbPhysicalSize = alternateMDB->drCTFlSize; // set up PEOF and EOF in FCB
1477 GPtr->calculatedCatalogFCB->fcbLogicalSize = GPtr->calculatedCatalogFCB->fcbPhysicalSize;
1478 }
1479
1480 //
1481 // Set up the minimal BTreeControlBlock structure
1482 //
1483
1484 // read the BTreeHeader from disk & also validate it's node size.
1485 err = GetBTreeHeader(GPtr, GPtr->calculatedCatalogFCB, &header);
1486 if (err) goto exit;
1487
1488 btcb->maxKeyLength = kHFSCatalogKeyMaximumLength; // max key length
1489 btcb->keyCompareProc = (void *) CompareCatalogKeys;
1490 btcb->leafRecords = header.leafRecords;
1491 btcb->nodeSize = header.nodeSize;
1492 btcb->totalNodes = (GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize );
1493 btcb->freeNodes = btcb->totalNodes; // start with everything free
1494
1495 btcb->treeDepth = header.treeDepth;
1496 btcb->rootNode = header.rootNode;
1497 btcb->firstLeafNode = header.firstLeafNode;
1498 btcb->lastLeafNode = header.lastLeafNode;
1499
1500 // Make sure the header nodes size field is correct by looking at the 1st record offset
1501 err = CheckNodesFirstOffset( GPtr, btcb );
1502 if (err) goto exit;
1503 }
1504 #if 0
1505 plog(" Catalog B-tree is %qd bytes\n", (UInt64)btcb->totalNodes * (UInt64) btcb->nodeSize);
1506 #endif
1507
1508 if ( header.btreeType != kHFSBTreeType )
1509 {
1510 GPtr->CBTStat |= S_ReservedBTH; // Repair reserved fields in Btree header
1511 }
1512
1513 //
1514 // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes.
1515 //
1516
1517 btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions
1518 if ( btcb->refCon == nil ) {
1519 err = R_NoMem;
1520 goto exit;
1521 }
1522 size = (btcb->totalNodes + 7) / 8; // size of BTree bit map
1523 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap
1524 if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil )
1525 {
1526 err = R_NoMem;
1527 goto exit;
1528 }
1529
1530 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is
1531 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes; // keep track of real free nodes for progress
1532
1533 /* it should be OK at this point to get volume name and stuff it into our global */
1534 {
1535 OSErr result;
1536 UInt16 recSize;
1537 CatalogKey key;
1538 CatalogRecord record;
1539
1540 BuildCatalogKey( kHFSRootFolderID, NULL, isHFSPlus, &key );
1541 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, NULL, &record, &recSize, NULL );
1542 if ( result == noErr ) {
1543 if ( isHFSPlus ) {
1544 size_t len;
1545 HFSPlusCatalogThread * recPtr = &record.hfsPlusThread;
1546 (void) utf_encodestr( recPtr->nodeName.unicode,
1547 recPtr->nodeName.length * 2,
1548 GPtr->volumeName, &len, sizeof(GPtr->volumeName) );
1549 GPtr->volumeName[len] = '\0';
1550 }
1551 else {
1552 HFSCatalogThread * recPtr = &record.hfsThread;
1553 bcopy( &recPtr->nodeName[1], GPtr->volumeName, recPtr->nodeName[0] );
1554 GPtr->volumeName[ recPtr->nodeName[0] ] = '\0';
1555 }
1556 fsckPrint(GPtr->context, fsckVolumeName, GPtr->volumeName);
1557 }
1558 }
1559
1560 exit:
1561 ExitThisRoutine:
1562 if ( block.buffer != NULL )
1563 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1564
1565 return (err);
1566 }
1567
1568
1569 /*------------------------------------------------------------------------------
1570
1571 Function: CreateExtendedAllocationsFCB
1572
1573 Function: Create the calculated ExtentsBTree Control Block for
1574 kHFSAllocationFileID and kHFSStartupFileID.
1575
1576 Input: GPtr - pointer to scavenger global area
1577
1578 Output: - 0 = no error
1579 n = error code
1580 ------------------------------------------------------------------------------*/
1581 OSErr CreateExtendedAllocationsFCB( SGlobPtr GPtr )
1582 {
1583 OSErr err = 0;
1584 UInt32 numABlks;
1585 SVCB * vcb;
1586 Boolean isHFSPlus;
1587 BlockDescriptor block;
1588
1589 // Set up
1590 isHFSPlus = VolumeObjectIsHFSPlus( );
1591 GPtr->TarID = kHFSAllocationFileID;
1592 GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB
1593 vcb = GPtr->calculatedVCB;
1594 block.buffer = NULL;
1595
1596 //
1597 // check out allocation info for the allocation File
1598 //
1599
1600 if ( isHFSPlus )
1601 {
1602 SFCB * fcb;
1603 HFSPlusVolumeHeader *volumeHeader;
1604
1605 err = GetVolumeObjectVHB( &block );
1606 if ( err != noErr )
1607 goto exit;
1608 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
1609
1610 fcb = GPtr->calculatedAllocationsFCB;
1611 CopyMemory( volumeHeader->allocationFile.extents, fcb->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1612
1613 err = CheckFileExtents( GPtr, kHFSAllocationFileID, kDataFork, NULL, (void *)fcb->fcbExtents32, &numABlks);
1614 if (err) goto exit;
1615
1616 //
1617 // The allocation file will get processed in whole allocation blocks, or
1618 // maximal-sized cache blocks, whichever is smaller. This means the cache
1619 // doesn't need to cope with buffers that are larger than a cache block.
1620 if (vcb->vcbBlockSize < fscache.BlockSize)
1621 (void) SetFileBlockSize (fcb, vcb->vcbBlockSize);
1622 else
1623 (void) SetFileBlockSize (fcb, fscache.BlockSize);
1624
1625 if ( volumeHeader->allocationFile.totalBlocks != numABlks )
1626 {
1627 RcdError( GPtr, E_CatPEOF );
1628 err = E_CatPEOF;
1629 goto exit;
1630 }
1631 else
1632 {
1633 fcb->fcbLogicalSize = volumeHeader->allocationFile.logicalSize;
1634 fcb->fcbPhysicalSize = (UInt64) volumeHeader->allocationFile.totalBlocks *
1635 (UInt64) volumeHeader->blockSize;
1636 }
1637
1638 /* while we're here, also get startup file extents... */
1639 fcb = GPtr->calculatedStartupFCB;
1640 CopyMemory( volumeHeader->startupFile.extents, fcb->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1641
1642 err = CheckFileExtents( GPtr, kHFSStartupFileID, kDataFork, NULL, (void *)fcb->fcbExtents32, &numABlks);
1643 if (err) goto exit;
1644
1645 fcb->fcbLogicalSize = volumeHeader->startupFile.logicalSize;
1646 fcb->fcbPhysicalSize = (UInt64) volumeHeader->startupFile.totalBlocks *
1647 (UInt64) volumeHeader->blockSize;
1648 }
1649
1650 exit:
1651 if (block.buffer)
1652 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1653
1654 return (err);
1655
1656 }
1657
1658
1659 /*------------------------------------------------------------------------------
1660
1661 Function: CatHChk - (Catalog Hierarchy Check)
1662
1663 Function: Verifies the catalog hierarchy.
1664
1665 Input: GPtr - pointer to scavenger global area
1666
1667 Output: CatHChk - function result:
1668 0 = no error
1669 n = error code
1670 ------------------------------------------------------------------------------*/
1671
1672 OSErr CatHChk( SGlobPtr GPtr )
1673 {
1674 SInt16 i;
1675 OSErr result;
1676 UInt16 recSize;
1677 SInt16 selCode;
1678 UInt32 hint;
1679 UInt32 dirCnt;
1680 UInt32 filCnt;
1681 SInt16 rtdirCnt;
1682 SInt16 rtfilCnt;
1683 SVCB *calculatedVCB;
1684 SDPR *dprP;
1685 SDPR *dprP1;
1686 CatalogKey foundKey;
1687 Boolean validKeyFound;
1688 CatalogKey key;
1689 CatalogRecord record;
1690 CatalogRecord record2;
1691 HFSPlusCatalogFolder *largeCatalogFolderP;
1692 HFSPlusCatalogFile *largeCatalogFileP;
1693 HFSCatalogFile *smallCatalogFileP;
1694 HFSCatalogFolder *smallCatalogFolderP;
1695 CatalogName catalogName;
1696 UInt32 valence;
1697 CatalogRecord threadRecord;
1698 HFSCatalogNodeID parID;
1699 Boolean isHFSPlus;
1700
1701 // set up
1702 isHFSPlus = VolumeObjectIsHFSPlus( );
1703 calculatedVCB = GPtr->calculatedVCB;
1704 GPtr->TarID = kHFSCatalogFileID; /* target = catalog file */
1705 GPtr->TarBlock = 0; /* no target block yet */
1706
1707 //
1708 // position to the beginning of catalog
1709 //
1710
1711 //¥¥ Can we ignore this part by just taking advantage of setting the selCode = 0x8001;
1712 {
1713 BuildCatalogKey( 1, (const CatalogName *)nil, isHFSPlus, &key );
1714 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint );
1715
1716 GPtr->TarBlock = hint; /* set target block */
1717 if ( result != btNotFound )
1718 {
1719 RcdError( GPtr, E_CatRec );
1720 return( E_CatRec );
1721 }
1722 }
1723
1724 GPtr->DirLevel = 1;
1725 dprP = &(GPtr->DirPTPtr)[0];
1726 dprP->directoryID = 1;
1727
1728 dirCnt = filCnt = rtdirCnt = rtfilCnt = 0;
1729
1730 result = noErr;
1731 selCode = 0x8001; /* start with root directory */
1732
1733 //
1734 // enumerate the entire catalog
1735 //
1736 while ( (GPtr->DirLevel > 0) && (result == noErr) )
1737 {
1738 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
1739
1740 validKeyFound = true;
1741 record.recordType = 0;
1742
1743 // get the next record
1744 result = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &foundKey, &record, &recSize, &hint );
1745
1746 GPtr->TarBlock = hint; /* set target block */
1747 if ( result != noErr )
1748 {
1749 if ( result == btNotFound )
1750 {
1751 result = noErr;
1752 validKeyFound = false;
1753 }
1754 else
1755 {
1756 result = IntError( GPtr, result ); /* error from BTGetRecord */
1757 return( result );
1758 }
1759 }
1760 selCode = 1; /* get next rec from now on */
1761
1762 GPtr->itemsProcessed++;
1763
1764 //
1765 // if same ParID ...
1766 //
1767 parID = isHFSPlus == true ? foundKey.hfsPlus.parentID : foundKey.hfs.parentID;
1768 if ( (validKeyFound == true) && (parID == dprP->directoryID) )
1769 {
1770 dprP->offspringIndex++; /* increment offspring index */
1771
1772 // if new directory ...
1773
1774 if ( record.recordType == kHFSPlusFolderRecord )
1775 {
1776 result = CheckForStop( GPtr ); ReturnIfError( result ); // Permit the user to interrupt
1777
1778 largeCatalogFolderP = (HFSPlusCatalogFolder *) &record;
1779 GPtr->TarID = largeCatalogFolderP->folderID; // target ID = directory ID
1780 GPtr->CNType = record.recordType; // target CNode type = directory ID
1781 CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus );
1782
1783 if ( dprP->directoryID > 1 )
1784 {
1785 GPtr->DirLevel++; // we have a new directory level
1786 dirCnt++;
1787 }
1788 if ( dprP->directoryID == kHFSRootFolderID ) // bump root dir count
1789 rtdirCnt++;
1790
1791 if ( GPtr->DirLevel > GPtr->dirPathCount )
1792 {
1793 void *ptr;
1794
1795 ptr = realloc(GPtr->DirPTPtr, (GPtr->dirPathCount + CMMaxDepth) * sizeof(SDPR));
1796 if (ptr == nil)
1797 {
1798 fsckPrint(GPtr->context, E_CatDepth, GPtr->dirPathCount);
1799 return noErr; /* abort this check, but let other checks proceed */
1800 }
1801 ClearMemory((char *)ptr + (GPtr->dirPathCount * sizeof(SDPR)), (CMMaxDepth * sizeof(SDPR)));
1802 GPtr->dirPathCount += CMMaxDepth;
1803 GPtr->DirPTPtr = ptr;
1804 }
1805
1806 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
1807 dprP->directoryID = largeCatalogFolderP->folderID;
1808 dprP->offspringIndex = 1;
1809 dprP->directoryHint = hint;
1810 dprP->parentDirID = foundKey.hfsPlus.parentID;
1811 CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &dprP->directoryName, isHFSPlus );
1812
1813 for ( i = 1; i < GPtr->DirLevel; i++ )
1814 {
1815 dprP1 = &(GPtr->DirPTPtr)[i - 1];
1816 if (dprP->directoryID == dprP1->directoryID)
1817 {
1818 RcdError( GPtr,E_DirLoop ); // loop in directory hierarchy
1819 return( E_DirLoop );
1820 }
1821 }
1822
1823 /*
1824 * Find thread record
1825 */
1826 BuildCatalogKey( dprP->directoryID, (const CatalogName *) nil, isHFSPlus, &key );
1827 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint );
1828 if ( result != noErr ) {
1829 struct MissingThread *mtp;
1830
1831 /* Report the error */
1832 fsckPrint(GPtr->context, E_NoThd, dprP->directoryID);
1833
1834 /* HFS will exit here */
1835 if ( !isHFSPlus )
1836 return (E_NoThd);
1837 /*
1838 * A directory thread is missing. If we can find this
1839 * ID on the missing-thread list then we know where the
1840 * child entries reside and can resume our enumeration.
1841 */
1842 for (mtp = GPtr->missingThreadList; mtp != NULL; mtp = mtp->link) {
1843 if (mtp->threadID == dprP->directoryID) {
1844 mtp->thread.recordType = kHFSPlusFolderThreadRecord;
1845 mtp->thread.parentID = dprP->parentDirID;
1846 CopyCatalogName(&dprP->directoryName, (CatalogName *)&mtp->thread.nodeName, isHFSPlus);
1847
1848 /* Reposition to the first child of target directory */
1849 result = SearchBTreeRecord(GPtr->calculatedCatalogFCB, &mtp->nextKey,
1850 kNoHint, &foundKey, &threadRecord, &recSize, &hint);
1851 if (result) {
1852 return (E_NoThd);
1853 }
1854 selCode = 0; /* use current record instead of next */
1855 break;
1856 }
1857 }
1858 if (selCode != 0) {
1859 /*
1860 * A directory thread is missing but we know this
1861 * directory has no children (since we didn't find
1862 * its ID on the missing-thread list above).
1863 *
1864 * At this point we can resume the enumeration at
1865 * our previous position in our parent directory.
1866 */
1867 goto resumeAtParent;
1868 }
1869 }
1870 dprP->threadHint = hint;
1871 GPtr->TarBlock = hint;
1872 }
1873
1874 // LargeCatalogFile
1875 else if ( record.recordType == kHFSPlusFileRecord )
1876 {
1877 largeCatalogFileP = (HFSPlusCatalogFile *) &record;
1878 GPtr->TarID = largeCatalogFileP->fileID; // target ID = file number
1879 GPtr->CNType = record.recordType; // target CNode type = thread
1880 CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus );
1881 filCnt++;
1882 if (dprP->directoryID == kHFSRootFolderID)
1883 rtfilCnt++;
1884 }
1885
1886 else if ( record.recordType == kHFSFolderRecord )
1887 {
1888 result = CheckForStop( GPtr ); ReturnIfError( result ); // Permit the user to interrupt
1889
1890 smallCatalogFolderP = (HFSCatalogFolder *) &record;
1891 GPtr->TarID = smallCatalogFolderP->folderID; /* target ID = directory ID */
1892 GPtr->CNType = record.recordType; /* target CNode type = directory ID */
1893 CopyCatalogName( (const CatalogName *) &key.hfs.nodeName, &GPtr->CName, isHFSPlus ); /* target CName = directory name */
1894
1895 if (dprP->directoryID > 1)
1896 {
1897 GPtr->DirLevel++; /* we have a new directory level */
1898 dirCnt++;
1899 }
1900 if (dprP->directoryID == kHFSRootFolderID) /* bump root dir count */
1901 rtdirCnt++;
1902
1903 if ( GPtr->DirLevel > GPtr->dirPathCount )
1904 {
1905 void *ptr;
1906
1907 ptr = realloc(GPtr->DirPTPtr, (GPtr->dirPathCount + CMMaxDepth) * sizeof(SDPR));
1908 if (ptr == nil)
1909 {
1910 fsckPrint(GPtr->context, E_CatDepth, GPtr->dirPathCount);
1911 return noErr; /* abort this check, but let other checks proceed */
1912 }
1913 ClearMemory((char *)ptr + (GPtr->dirPathCount * sizeof(SDPR)), (CMMaxDepth * sizeof(SDPR)));
1914 GPtr->dirPathCount += CMMaxDepth;
1915 GPtr->DirPTPtr = ptr;
1916 }
1917
1918 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
1919 dprP->directoryID = smallCatalogFolderP->folderID;
1920 dprP->offspringIndex = 1;
1921 dprP->directoryHint = hint;
1922 dprP->parentDirID = foundKey.hfs.parentID;
1923
1924 CopyCatalogName( (const CatalogName *) &foundKey.hfs.nodeName, &dprP->directoryName, isHFSPlus );
1925
1926 for (i = 1; i < GPtr->DirLevel; i++)
1927 {
1928 dprP1 = &(GPtr->DirPTPtr)[i - 1];
1929 if (dprP->directoryID == dprP1->directoryID)
1930 {
1931 RcdError( GPtr,E_DirLoop ); /* loop in directory hierarchy */
1932 return( E_DirLoop );
1933 }
1934 }
1935
1936 BuildCatalogKey( dprP->directoryID, (const CatalogName *)0, isHFSPlus, &key );
1937 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint );
1938 if (result != noErr )
1939 {
1940 result = IntError(GPtr,result); /* error from BTSearch */
1941 return(result);
1942 }
1943 dprP->threadHint = hint; /* save hint for thread */
1944 GPtr->TarBlock = hint; /* set target block */
1945 }
1946
1947 // HFSCatalogFile...
1948 else if ( record.recordType == kHFSFileRecord )
1949 {
1950 smallCatalogFileP = (HFSCatalogFile *) &record;
1951 GPtr->TarID = smallCatalogFileP->fileID; /* target ID = file number */
1952 GPtr->CNType = record.recordType; /* target CNode type = thread */
1953 CopyCatalogName( (const CatalogName *) &foundKey.hfs.nodeName, &GPtr->CName, isHFSPlus ); /* target CName = directory name */
1954 filCnt++;
1955 if (dprP->directoryID == kHFSRootFolderID)
1956 rtfilCnt++;
1957 }
1958
1959 // Unknown/Bad record type
1960 else
1961 {
1962 M_DebugStr("\p Unknown-Bad record type");
1963 return( 123 );
1964 }
1965 }
1966
1967 //
1968 // if not same ParID or no record
1969 //
1970 else if ( (record.recordType == kHFSFileThreadRecord) || (record.recordType == kHFSPlusFileThreadRecord) ) /* it's a file thread, skip past it */
1971 {
1972 GPtr->TarID = parID; // target ID = file number
1973 GPtr->CNType = record.recordType; // target CNode type = thread
1974 GPtr->CName.ustr.length = 0; // no target CName
1975 }
1976
1977 else
1978 {
1979 resumeAtParent:
1980 GPtr->TarID = dprP->directoryID; /* target ID = current directory ID */
1981 GPtr->CNType = record.recordType; /* target CNode type = directory */
1982 CopyCatalogName( (const CatalogName *) &dprP->directoryName, &GPtr->CName, isHFSPlus ); // copy the string name
1983
1984 // re-locate current directory
1985 CopyCatalogName( (const CatalogName *) &dprP->directoryName, &catalogName, isHFSPlus );
1986 BuildCatalogKey( dprP->parentDirID, (const CatalogName *)&catalogName, isHFSPlus, &key );
1987 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, dprP->directoryHint, &foundKey, &record2, &recSize, &hint );
1988
1989 if ( result != noErr )
1990 {
1991 result = IntError(GPtr,result); /* error from BTSearch */
1992 return(result);
1993 }
1994 GPtr->TarBlock = hint; /* set target block */
1995
1996
1997 valence = isHFSPlus == true ? record2.hfsPlusFolder.valence : (UInt32)record2.hfsFolder.valence;
1998
1999 if ( valence != dprP->offspringIndex -1 ) /* check its valence */
2000 if ( ( result = RcdValErr( GPtr, E_DirVal, dprP->offspringIndex -1, valence, dprP->parentDirID ) ) )
2001 return( result );
2002
2003 GPtr->DirLevel--; /* move up a level */
2004
2005 if(GPtr->DirLevel > 0)
2006 {
2007 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
2008 GPtr->TarID = dprP->directoryID; /* target ID = current directory ID */
2009 GPtr->CNType = record.recordType; /* target CNode type = directory */
2010 CopyCatalogName( (const CatalogName *) &dprP->directoryName, &GPtr->CName, isHFSPlus );
2011 }
2012 }
2013 } // end while
2014
2015 //
2016 // verify directory and file counts (all nonfatal, repairable errors)
2017 //
2018 if (!isHFSPlus && (rtdirCnt != calculatedVCB->vcbNmRtDirs)) /* check count of dirs in root */
2019 if ( ( result = RcdValErr(GPtr,E_RtDirCnt,rtdirCnt,calculatedVCB->vcbNmRtDirs,0) ) )
2020 return( result );
2021
2022 if (!isHFSPlus && (rtfilCnt != calculatedVCB->vcbNmFls)) /* check count of files in root */
2023 if ( ( result = RcdValErr(GPtr,E_RtFilCnt,rtfilCnt,calculatedVCB->vcbNmFls,0) ) )
2024 return( result );
2025
2026 if (dirCnt != calculatedVCB->vcbFolderCount) /* check count of dirs in volume */
2027 if ( ( result = RcdValErr(GPtr,E_DirCnt,dirCnt,calculatedVCB->vcbFolderCount,0) ) )
2028 return( result );
2029
2030 if (filCnt != calculatedVCB->vcbFileCount) /* check count of files in volume */
2031 if ( ( result = RcdValErr(GPtr,E_FilCnt,filCnt,calculatedVCB->vcbFileCount,0) ) )
2032 return( result );
2033
2034 return( noErr );
2035
2036 } /* end of CatHChk */
2037
2038
2039
2040 /*------------------------------------------------------------------------------
2041
2042 Function: CreateAttributesBTreeControlBlock
2043
2044 Function: Create the calculated AttributesBTree Control Block
2045
2046 Input: GPtr - pointer to scavenger global area
2047
2048 Output: - 0 = no error
2049 n = error code
2050 ------------------------------------------------------------------------------*/
2051 OSErr CreateAttributesBTreeControlBlock( SGlobPtr GPtr )
2052 {
2053 OSErr err = 0;
2054 SInt32 size;
2055 UInt32 numABlks;
2056 BTreeControlBlock * btcb;
2057 SVCB * vcb;
2058 Boolean isHFSPlus;
2059 BTHeaderRec header;
2060 BlockDescriptor block;
2061
2062 // Set up
2063 isHFSPlus = VolumeObjectIsHFSPlus( );
2064 GPtr->TarID = kHFSAttributesFileID;
2065 GPtr->TarBlock = kHeaderNodeNum;
2066 block.buffer = NULL;
2067 btcb = GPtr->calculatedAttributesBTCB;
2068 vcb = GPtr->calculatedVCB;
2069
2070 //
2071 // check out allocation info for the Attributes File
2072 //
2073
2074 if (isHFSPlus)
2075 {
2076 HFSPlusVolumeHeader *volumeHeader;
2077
2078 err = GetVolumeObjectVHB( &block );
2079 if ( err != noErr )
2080 goto exit;
2081 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
2082
2083 CopyMemory( volumeHeader->attributesFile.extents, GPtr->calculatedAttributesFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) );
2084
2085 err = CheckFileExtents( GPtr, kHFSAttributesFileID, kDataFork, NULL, (void *)GPtr->calculatedAttributesFCB->fcbExtents32, &numABlks);
2086 if (err) goto exit;
2087
2088 if ( volumeHeader->attributesFile.totalBlocks != numABlks ) // check out the PEOF
2089 {
2090 RcdError( GPtr, E_CatPEOF );
2091 err = E_CatPEOF;
2092 goto exit;
2093 }
2094 else
2095 {
2096 GPtr->calculatedAttributesFCB->fcbLogicalSize = (UInt64) volumeHeader->attributesFile.logicalSize; // Set Attributes tree's LEOF
2097 GPtr->calculatedAttributesFCB->fcbPhysicalSize = (UInt64) volumeHeader->attributesFile.totalBlocks *
2098 (UInt64) volumeHeader->blockSize; // Set Attributes tree's PEOF
2099 }
2100
2101 //
2102 // See if we actually have an attributes BTree
2103 //
2104 if (numABlks == 0)
2105 {
2106 btcb->maxKeyLength = 0;
2107 btcb->keyCompareProc = 0;
2108 btcb->leafRecords = 0;
2109 btcb->nodeSize = 0;
2110 btcb->totalNodes = 0;
2111 btcb->freeNodes = 0;
2112 btcb->attributes = 0;
2113
2114 btcb->treeDepth = 0;
2115 btcb->rootNode = 0;
2116 btcb->firstLeafNode = 0;
2117 btcb->lastLeafNode = 0;
2118
2119 // GPtr->calculatedVCB->attributesRefNum = 0;
2120 GPtr->calculatedVCB->vcbAttributesFile = NULL;
2121 }
2122 else
2123 {
2124 // read the BTreeHeader from disk & also validate it's node size.
2125 err = GetBTreeHeader(GPtr, GPtr->calculatedAttributesFCB, &header);
2126 if (err) goto exit;
2127
2128 btcb->maxKeyLength = kAttributeKeyMaximumLength; // max key length
2129 btcb->keyCompareProc = (void *)CompareAttributeKeys;
2130 btcb->leafRecords = header.leafRecords;
2131 btcb->nodeSize = header.nodeSize;
2132 btcb->totalNodes = ( GPtr->calculatedAttributesFCB->fcbPhysicalSize / btcb->nodeSize );
2133 btcb->freeNodes = btcb->totalNodes; // start with everything free
2134 btcb->attributes |=(kBTBigKeysMask + kBTVariableIndexKeysMask); // HFS+ Attributes files have large, variable-sized keys
2135
2136 btcb->treeDepth = header.treeDepth;
2137 btcb->rootNode = header.rootNode;
2138 btcb->firstLeafNode = header.firstLeafNode;
2139 btcb->lastLeafNode = header.lastLeafNode;
2140
2141 //
2142 // Make sure the header nodes size field is correct by looking at the 1st record offset
2143 //
2144 err = CheckNodesFirstOffset( GPtr, btcb );
2145 if (err) goto exit;
2146 }
2147 }
2148 else
2149 {
2150 btcb->maxKeyLength = 0;
2151 btcb->keyCompareProc = 0;
2152 btcb->leafRecords = 0;
2153 btcb->nodeSize = 0;
2154 btcb->totalNodes = 0;
2155 btcb->freeNodes = 0;
2156 btcb->attributes = 0;
2157
2158 btcb->treeDepth = 0;
2159 btcb->rootNode = 0;
2160 btcb->firstLeafNode = 0;
2161 btcb->lastLeafNode = 0;
2162
2163 GPtr->calculatedVCB->vcbAttributesFile = NULL;
2164 }
2165
2166 //
2167 // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes.
2168 //
2169 btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions
2170 if ( btcb->refCon == nil ) {
2171 err = R_NoMem;
2172 goto exit;
2173 }
2174
2175 if (btcb->totalNodes == 0)
2176 {
2177 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = nil;
2178 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = 0;
2179 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = 0;
2180 }
2181 else
2182 {
2183 if ( btcb->refCon == nil ) {
2184 err = R_NoMem;
2185 goto exit;
2186 }
2187 size = (btcb->totalNodes + 7) / 8; // size of BTree bit map
2188 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap
2189 if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil )
2190 {
2191 err = R_NoMem;
2192 goto exit;
2193 }
2194
2195 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is
2196 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes; // keep track of real free nodes for progress
2197 }
2198
2199 exit:
2200 if (block.buffer)
2201 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
2202
2203 return (err);
2204 }
2205
2206 /*
2207 * Function: RecordLastAttrBits
2208 *
2209 * Description:
2210 * Updates the Chinese Remainder Theorem buckets with extended attribute
2211 * information for the previous fileID stored in the global structure.
2212 *
2213 * Input:
2214 * GPtr - pointer to scavenger global area
2215 * * GPtr->lastAttrInfo.fileID - fileID of last attribute seen
2216 *
2217 * Output: Nothing
2218 */
2219 static void RecordLastAttrBits(SGlobPtr GPtr)
2220 {
2221 /* lastAttrInfo structure is initialized to zero and hence ignore
2222 * recording information for fileID = 0. fileIDs < 16 (except for
2223 * fileID = 2) can have extended attributes but do not have
2224 * corresponding entry in catalog Btree. Ignore recording these
2225 * fileIDs for Chinese Remainder Theorem buckets. Currently we only
2226 * set extended attributes for fileID = 1 among these fileIDs
2227 * and this can change in future (see 3984119)
2228 */
2229 if ((GPtr->lastAttrInfo.fileID == 0) ||
2230 ((GPtr->lastAttrInfo.fileID < kHFSFirstUserCatalogNodeID) &&
2231 (GPtr->lastAttrInfo.fileID != kHFSRootFolderID))) {
2232 return;
2233 }
2234
2235 if (GPtr->lastAttrInfo.hasSecurity == true) {
2236 /* fileID has both extended attribute and ACL */
2237 RecordXAttrBits(GPtr, kHFSHasAttributesMask | kHFSHasSecurityMask,
2238 GPtr->lastAttrInfo.fileID, kCalculatedAttributesRefNum);
2239 GPtr->lastAttrInfo.hasSecurity = false;
2240 } else {
2241 /* fileID only has extended attribute */
2242 RecordXAttrBits(GPtr, kHFSHasAttributesMask,
2243 GPtr->lastAttrInfo.fileID, kCalculatedAttributesRefNum);
2244 }
2245 }
2246
2247 /*
2248 * Function: setLastAttrAllocInfo
2249 *
2250 * Description:
2251 * Set the global structure of last extended attribute with
2252 * the allocation block information. Also set the isValid to true
2253 * to indicate that the data is valid and should be used to verify
2254 * allocation blocks.
2255 *
2256 * Input:
2257 * GPtr - pointer to scavenger global area
2258 * totalBlocks - total blocks allocated by the attribute
2259 * logicalSize - logical size of the attribute
2260 * calculatedBlocks - blocks accounted by the attribute in current extent
2261 *
2262 * Output: Nothing
2263 */
2264 static void setLastAttrAllocInfo(SGlobPtr GPtr, u_int32_t totalBlocks,
2265 u_int64_t logicalSize, u_int32_t calculatedTotalBlocks)
2266 {
2267 GPtr->lastAttrInfo.totalBlocks = totalBlocks;
2268 GPtr->lastAttrInfo.logicalSize = logicalSize;
2269 GPtr->lastAttrInfo.calculatedTotalBlocks = calculatedTotalBlocks;
2270 GPtr->lastAttrInfo.isValid = true;
2271 }
2272
2273 /*
2274 * Function: CheckLastAttrAllocation
2275 *
2276 * Description:
2277 * Checks the allocation block information stored for the last
2278 * extended attribute seen during extended attribute BTree traversal.
2279 * Always resets the information stored for last EA allocation.
2280 *
2281 * Input: GPtr - pointer to scavenger global area
2282 *
2283 * Output: int - function result:
2284 * zero - no error
2285 * non-zero - error
2286 */
2287 static int CheckLastAttrAllocation(SGlobPtr GPtr)
2288 {
2289 int result = 0;
2290 u_int64_t bytes;
2291
2292 if (GPtr->lastAttrInfo.isValid == true) {
2293 if (GPtr->lastAttrInfo.totalBlocks !=
2294 GPtr->lastAttrInfo.calculatedTotalBlocks) {
2295 result = RecordBadAllocation(GPtr->lastAttrInfo.fileID,
2296 GPtr->lastAttrInfo.attrname, kEAData,
2297 GPtr->lastAttrInfo.totalBlocks,
2298 GPtr->lastAttrInfo.calculatedTotalBlocks);
2299 } else {
2300 bytes = (u_int64_t)GPtr->lastAttrInfo.calculatedTotalBlocks *
2301 (u_int64_t)GPtr->calculatedVCB->vcbBlockSize;
2302 if (GPtr->lastAttrInfo.logicalSize > bytes) {
2303 result = RecordTruncation(GPtr->lastAttrInfo.fileID,
2304 GPtr->lastAttrInfo.attrname, kEAData,
2305 GPtr->lastAttrInfo.logicalSize, bytes);
2306 }
2307 }
2308
2309 /* Invalidate information in the global structure */
2310 GPtr->lastAttrInfo.isValid = false;
2311 }
2312
2313 return (result);
2314 }
2315
2316 /*------------------------------------------------------------------------------
2317 Function: CheckAttributeRecord
2318
2319 Description:
2320 This is call back function called for all leaf records in
2321 Attribute BTree during the verify and repair stage. The basic
2322 functionality of the function is same during verify and repair
2323 stages except that whenever it finds corruption, the verify
2324 stage prints message and the repair stage repairs it. In the verify
2325 stage, this function accounts for allocation blocks used
2326 by extent-based extended attributes and also updates the chinese
2327 remainder theorem buckets corresponding the extended attribute
2328 and security bit.
2329
2330 1. Only in the verify stage, if the fileID or attribute name of current
2331 extended attribute are not same as the previous attribute, check the
2332 allocation block counts for the previous attribute.
2333
2334 2. Only in the verify stage, If the fileID of current attribute is not
2335 same as the previous attribute, record the previous fileID information
2336 for Chinese Remainder Theorem.
2337
2338 3. For attribute type,
2339 kHFSPlusAttrForkData:
2340 ---------------------
2341 Do all of the following during verify stage and nothing in repair
2342 stage -
2343
2344 Check the start block for extended attribute from the key. If not
2345 zero, print error.
2346
2347 Account for blocks occupied by this extent and store the allocation
2348 information for this extent to check in future. Also update the
2349 last attribute information in the global structure.
2350
2351 kHFSPlusAttrExtents:
2352 --------------------
2353 If the current attribute's fileID is not same as previous fileID, or
2354 if the previous recordType is not a valid forkData or overflow extent
2355 record, report an error in verify stage or mark it for deletion in
2356 repair stage.
2357
2358 Do all of the following during verify stage and nothing in repair
2359 stage -
2360
2361 Check the start block for extended attribute from the key. If not
2362 equal to the total blocks seen uptil last attribtue, print error.
2363
2364 Account for blocks occupied by this extent. Update previous
2365 attribute allocation information with blocks seen in current
2366 extent. Also update last attribute block information in the global
2367 structure.
2368
2369 kHFSPlusAttrInlineData:
2370 -----------------------
2371 Only in the verify stage, check if the start block in the key is
2372 equal to zero. If not, print error.
2373
2374 Unknown type:
2375 -------------
2376 In verify stage, report error. In repair stage, mark the record
2377 to delete.
2378
2379 4. If a record is marked for deletion, delete the record.
2380
2381 5. Before exiting from the function, always do the following -
2382 a. Indicate if the extended attribute was an ACL
2383 b. Update previous fileID and recordType with current information.
2384 c. Update previous attribute name with current attribute name.
2385
2386 Input: GPtr - pointer to scavenger global area
2387 key - key for current attribute
2388 rec - attribute record
2389 reclen - length of the record
2390
2391 Output: int - function result:
2392 0 = no error
2393 n = error code
2394 ------------------------------------------------------------------------------*/
2395 int
2396 CheckAttributeRecord(SGlobPtr GPtr, const HFSPlusAttrKey *key, const HFSPlusAttrRecord *rec, UInt16 reclen)
2397 {
2398 int result = 0;
2399 unsigned char attrname[XATTR_MAXNAMELEN+1];
2400 size_t attrlen;
2401 u_int32_t blocks;
2402 u_int32_t fileID;
2403 struct attributeInfo *prevAttr;
2404 Boolean isSameAttr = true;
2405 Boolean doDelete = false;
2406 u_int16_t dfaStage = GetDFAStage();
2407
2408 /* Assert if volume is not HFS Plus */
2409 assert(VolumeObjectIsHFSPlus() == true);
2410
2411 prevAttr = &(GPtr->lastAttrInfo);
2412 fileID = key->fileID;
2413 /* Convert unicode attribute name to UTF-8 string */
2414 (void) utf_encodestr(key->attrName, key->attrNameLen * 2, attrname, &attrlen, sizeof(attrname));
2415 attrname[attrlen] = '\0';
2416
2417 /* Compare the current attribute to last attribute seen */
2418 if ((fileID != prevAttr->fileID) ||
2419 (strcmp((char *)attrname, (char *)prevAttr->attrname) != 0)) {
2420 isSameAttr = false;
2421 }
2422
2423 /* We check allocation block information and record EA information for
2424 * CRT bucket in verify stage and hence no need to do it again in
2425 * repair stage.
2426 */
2427 if (dfaStage == kVerifyStage) {
2428 /* Different attribute - check allocation block information */
2429 if (isSameAttr == false) {
2430 result = CheckLastAttrAllocation(GPtr);
2431 if (result) {
2432 goto update_out;
2433 }
2434 }
2435
2436 /* Different fileID - record information in CRT bucket */
2437 if (fileID != prevAttr->fileID) {
2438 RecordLastAttrBits(GPtr);
2439 }
2440 }
2441
2442 switch (rec->recordType) {
2443 case kHFSPlusAttrForkData: {
2444 /* Check start block only in verify stage to avoid printing message
2445 * in repair stage. Note that this corruption is not repairable
2446 * currently. Also check extents only in verify stage to avoid
2447 * false overlap extents error.
2448 */
2449
2450 if (dfaStage == kVerifyStage) {
2451 /* Start block in the key should be zero */
2452 if (key->startBlock != 0) {
2453 RcdError(GPtr, E_ABlkSt);
2454 result = E_ABlkSt;
2455 goto err_out;
2456 }
2457
2458 HFSPlusForkData forkData;
2459 memcpy((void*)(&forkData), (void*)(&rec->forkData.theFork), sizeof(HFSPlusForkData));
2460 /* Check the extent information and record overlapping extents, if any */
2461 result = CheckFileExtents (GPtr, fileID, kEAData, attrname,
2462 &forkData.extents, &blocks);
2463 if (result) {
2464 goto update_out;
2465 }
2466
2467 /* Store allocation information to check in future */
2468 (void) setLastAttrAllocInfo(GPtr, rec->forkData.theFork.totalBlocks,
2469 rec->forkData.theFork.logicalSize, blocks);
2470 }
2471 break;
2472 }
2473
2474 case kHFSPlusAttrExtents: {
2475 /* Different attribute/fileID or incorrect previous record type */
2476 if ((isSameAttr == false) ||
2477 ((prevAttr->recordType != kHFSPlusAttrExtents) &&
2478 (prevAttr->recordType != kHFSPlusAttrForkData))) {
2479 if (dfaStage == kRepairStage) {
2480 /* Delete record in repair stage */
2481 doDelete = true;
2482 } else {
2483 /* Report error in verify stage */
2484 RcdError(GPtr, E_AttrRec);
2485 GPtr->ABTStat |= S_AttrRec;
2486 goto err_out;
2487 }
2488 }
2489
2490 /* Check start block only in verify stage to avoid printing message
2491 * in repair stage. Note that this corruption is not repairable
2492 * currently. Also check extents only in verify stage to avoid
2493 * false overlap extents error.
2494 */
2495 if (dfaStage == kVerifyStage) {
2496 /* startBlock in the key should be equal to total blocks
2497 * seen uptil last attribute.
2498 */
2499 if (key->startBlock != prevAttr->calculatedTotalBlocks) {
2500 RcdError(GPtr, E_ABlkSt);
2501 result = E_ABlkSt;
2502 goto err_out;
2503 }
2504
2505 /* Check the extent information and record overlapping extents, if any */
2506 result = CheckFileExtents (GPtr, fileID, kEAData, attrname,
2507 rec->overflowExtents.extents, &blocks);
2508 if (result) {
2509 goto update_out;
2510 }
2511
2512 /* Increment the blocks seen uptil now for this attribute */
2513 prevAttr->calculatedTotalBlocks += blocks;
2514 }
2515 break;
2516 }
2517
2518 case kHFSPlusAttrInlineData: {
2519 /* Check start block only in verify stage to avoid printing message
2520 * in repair stage.
2521 */
2522 if (dfaStage == kVerifyStage) {
2523 /* Start block in the key should be zero */
2524 if (key->startBlock != 0) {
2525 RcdError(GPtr, E_ABlkSt);
2526 result = E_ABlkSt;
2527 goto err_out;
2528 }
2529 }
2530 break;
2531 }
2532
2533 default: {
2534 /* Unknown attribute record */
2535 if (dfaStage == kRepairStage) {
2536 /* Delete record in repair stage */
2537 doDelete = true;
2538 } else {
2539 /* Report error in verify stage */
2540 RcdError(GPtr, E_AttrRec);
2541 GPtr->ABTStat |= S_AttrRec;
2542 goto err_out;
2543 }
2544 break;
2545 }
2546 };
2547
2548 if (doDelete == true) {
2549 result = DeleteBTreeRecord(GPtr->calculatedAttributesFCB, key);
2550 DPRINTF (d_info|d_xattr, "%s: Deleting attribute %s for fileID %d, type = %d\n", __FUNCTION__, attrname, key->fileID, rec->recordType);
2551 if (result) {
2552 DPRINTF (d_error|d_xattr, "%s: Error in deleting record for %s for fileID %d, type = %d\n", __FUNCTION__, attrname, key->fileID, rec->recordType);
2553 }
2554
2555 /* Set flags to mark header and map dirty */
2556 GPtr->ABTStat |= S_BTH + S_BTM;
2557 goto err_out;
2558 }
2559
2560 update_out:
2561 /* Note that an ACL exists for this fileID */
2562 if (strcmp((char *)attrname, KAUTH_FILESEC_XATTR) == 0) {
2563 prevAttr->hasSecurity = true;
2564 }
2565
2566 /* Always update the last recordType, fileID and attribute name before exiting */
2567 prevAttr->recordType = rec->recordType;
2568 prevAttr->fileID = fileID;
2569 (void) strlcpy((char *)prevAttr->attrname, (char *)attrname, sizeof(prevAttr->attrname));
2570
2571 goto out;
2572
2573 err_out:
2574 /* If the current record is invalid/bogus, decide whether to update
2575 * fileID stored in global structure for future comparison based on the
2576 * previous fileID.
2577 * If the current bogus record's fileID is different from fileID of the
2578 * previous good record, we do not want to account for bogus fileID in
2579 * the Chinese Remainder Theorem when we see next good record.
2580 * Hence reset the fileID in global structure to dummy value. Example,
2581 * if the fileIDs are 10 15 20 and record with ID=15 is bogus, we do not
2582 * want to account for record with ID=15.
2583 * If the current bogus record's fileID is same as the fileID of the
2584 * previous good record, we want to account for this fileID in the
2585 * next good record we see after this bogus record. Hence do not
2586 * reset the fileID to dummy value. Example, if the records have fileID
2587 * 10 10 30 and the second record with ID=10 is bogus, we want to
2588 * account for ID=10 when we see record with ID=30.
2589 */
2590 if (prevAttr->fileID != fileID) {
2591 prevAttr->fileID = 0;
2592 }
2593
2594 out:
2595 return(result);
2596 }
2597
2598 /* Function: RecordXAttrBits
2599 *
2600 * Description:
2601 * This function increments the prime number buckets for the associated
2602 * prime bucket set based on the flags and btreetype to determine
2603 * the discrepancy between the attribute btree and catalog btree for
2604 * extended attribute data consistency. This function is based on
2605 * Chinese Remainder Theorem.
2606 *
2607 * Alogrithm:
2608 * 1. If none of kHFSHasAttributesMask or kHFSHasSecurity mask is set,
2609 * return.
2610 * 2. Based on btreetype and the flags, determine which prime number
2611 * bucket should be updated. Initialize pointers accordingly.
2612 * 3. Divide the fileID with pre-defined prime numbers. Store the
2613 * remainder.
2614 * 4. Increment each prime number bucket at an offset of the
2615 * corresponding remainder with one.
2616 *
2617 * Input: 1. GPtr - pointer to global scavenger area
2618 * 2. flags - can include kHFSHasAttributesMask and/or kHFSHasSecurityMask
2619 * 3. fileid - fileID for which particular extended attribute is seen
2620 * 4. btreetye - can be kHFSPlusCatalogRecord or kHFSPlusAttributeRecord
2621 * indicates which btree prime number bucket should be incremented
2622 *
2623 * Output: nil
2624 */
2625 void RecordXAttrBits(SGlobPtr GPtr, UInt16 flags, HFSCatalogNodeID fileid, UInt16 btreetype)
2626 {
2627 PrimeBuckets *cur_attr = NULL;
2628 PrimeBuckets *cur_sec = NULL;
2629
2630 if ( ((flags & kHFSHasAttributesMask) == 0) &&
2631 ((flags & kHFSHasSecurityMask) == 0) ) {
2632 /* No attributes exists for this fileID */
2633 goto out;
2634 }
2635
2636 /* Determine which bucket are we updating */
2637 if (btreetype == kCalculatedCatalogRefNum) {
2638 /* Catalog BTree buckets */
2639 if (flags & kHFSHasAttributesMask) {
2640 cur_attr = &(GPtr->CBTAttrBucket);
2641 GPtr->cat_ea_count++;
2642 }
2643 if (flags & kHFSHasSecurityMask) {
2644 cur_sec = &(GPtr->CBTSecurityBucket);
2645 GPtr->cat_acl_count++;
2646 }
2647 } else if (btreetype == kCalculatedAttributesRefNum) {
2648 /* Attribute BTree buckets */
2649 if (flags & kHFSHasAttributesMask) {
2650 cur_attr = &(GPtr->ABTAttrBucket);
2651 GPtr->attr_ea_count++;
2652 }
2653 if (flags & kHFSHasSecurityMask) {
2654 cur_sec = &(GPtr->ABTSecurityBucket);
2655 GPtr->attr_acl_count++;
2656 }
2657 } else {
2658 /* Incorrect btreetype found */
2659 goto out;
2660 }
2661
2662 if (cur_attr) {
2663 add_prime_bucket_uint32(cur_attr, fileid);
2664 }
2665
2666 if (cur_sec) {
2667 add_prime_bucket_uint32(cur_sec, fileid);
2668 }
2669
2670 out:
2671 return;
2672 }
2673
2674 /* Function: CompareXattrPrimeBuckets
2675 *
2676 * Description:
2677 * This function compares the prime number buckets for catalog btree
2678 * and attribute btree for the given attribute type (normal attribute
2679 * bit or security bit).
2680 *
2681 * Input: 1. GPtr - pointer to global scavenger area
2682 * 2. BitMask - indicate which attribute type should be compared.
2683 * can include kHFSHasAttributesMask and/or kHFSHasSecurityMask
2684 * Output: zero - buckets were compared successfully
2685 * non-zero - buckets were not compared
2686 */
2687 static int CompareXattrPrimeBuckets(SGlobPtr GPtr, UInt16 BitMask)
2688 {
2689 int result = 1;
2690 PrimeBuckets *cat; /* Catalog BTree */
2691 PrimeBuckets *attr; /* Attribute BTree */
2692
2693 /* Find the correct PrimeBuckets to compare */
2694 if (BitMask & kHFSHasAttributesMask) {
2695 /* Compare buckets for attribute bit */
2696 cat = &(GPtr->CBTAttrBucket);
2697 attr = &(GPtr->ABTAttrBucket);
2698 } else if (BitMask & kHFSHasSecurityMask) {
2699 /* Compare buckets for security bit */
2700 cat = &(GPtr->CBTSecurityBucket);
2701 attr = &(GPtr->ABTSecurityBucket);
2702 } else {
2703 plog ("%s: Incorrect BitMask found.\n", __FUNCTION__);
2704 goto out;
2705 }
2706
2707 result = compare_prime_buckets(cat, attr);
2708 if (result) {
2709 char catbtree[32], attrbtree[32];
2710 /* Unequal values found, set the error bit in ABTStat */
2711 if (BitMask & kHFSHasAttributesMask) {
2712 fsckPrint(GPtr->context, E_IncorrectAttrCount);
2713 sprintf (catbtree, "%u", GPtr->cat_ea_count);
2714 sprintf (attrbtree, "%u", GPtr->attr_ea_count);
2715 fsckPrint(GPtr->context, E_BadValue, attrbtree, catbtree);
2716 GPtr->ABTStat |= S_AttributeCount;
2717 } else {
2718 fsckPrint(GPtr->context, E_IncorrectSecurityCount);
2719 sprintf (catbtree, "%u", GPtr->cat_acl_count);
2720 sprintf (attrbtree, "%u", GPtr->attr_acl_count);
2721 fsckPrint (GPtr->context, E_BadValue, attrbtree, catbtree);
2722 GPtr->ABTStat |= S_SecurityCount;
2723 }
2724 }
2725
2726 result = 0;
2727
2728 out:
2729 return result;
2730 }
2731
2732 /*------------------------------------------------------------------------------
2733
2734 Function: AttrBTChk - (Attributes BTree Check)
2735
2736 Function: Verifies the attributes BTree structure.
2737
2738 Input: GPtr - pointer to scavenger global area
2739
2740 Output: ExtBTChk - function result:
2741 0 = no error
2742 n = error code
2743 ------------------------------------------------------------------------------*/
2744
2745 OSErr AttrBTChk( SGlobPtr GPtr )
2746 {
2747 OSErr err;
2748
2749 //
2750 // If this volume has no attributes BTree, then skip this check
2751 //
2752 if (GPtr->calculatedVCB->vcbAttributesFile == NULL)
2753 return noErr;
2754
2755 // Write the status message here to avoid potential confusion to user.
2756 fsckPrint(GPtr->context, hfsExtAttrBTCheck);
2757
2758 // Set up
2759 GPtr->TarID = kHFSAttributesFileID; // target = attributes file
2760 GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB
2761
2762 //
2763 // check out the BTree structure
2764 //
2765
2766 err = BTCheck( GPtr, kCalculatedAttributesRefNum, (CheckLeafRecordProcPtr)CheckAttributeRecord);
2767 ReturnIfError( err ); // invalid attributes file BTree
2768
2769 // check the allocation block information about the last attribute
2770 err = CheckLastAttrAllocation(GPtr);
2771 ReturnIfError(err);
2772
2773 // record the last fileID for Chinese Remainder Theorem comparison
2774 RecordLastAttrBits(GPtr);
2775
2776 // compare the attributes prime buckets calculated from catalog btree and attribute btree
2777 err = CompareXattrPrimeBuckets(GPtr, kHFSHasAttributesMask);
2778 ReturnIfError( err );
2779
2780 // compare the security prime buckets calculated from catalog btree and attribute btree
2781 err = CompareXattrPrimeBuckets(GPtr, kHFSHasSecurityMask);
2782 ReturnIfError( err );
2783
2784 //
2785 // check out the allocation map structure
2786 //
2787
2788 err = BTMapChk( GPtr, kCalculatedAttributesRefNum );
2789 ReturnIfError( err ); // Invalid attributes BTree map
2790
2791 //
2792 // Make sure unused nodes in the B-tree are zero filled.
2793 //
2794 err = BTCheckUnusedNodes(GPtr, kCalculatedAttributesRefNum, &GPtr->ABTStat);
2795 ReturnIfError( err );
2796
2797 //
2798 // compare BTree header record on disk with scavenger's BTree header record
2799 //
2800
2801 err = CmpBTH( GPtr, kCalculatedAttributesRefNum );
2802 ReturnIfError( err );
2803
2804 //
2805 // compare BTree map on disk with scavenger's BTree map
2806 //
2807
2808 err = CmpBTM( GPtr, kCalculatedAttributesRefNum );
2809
2810 return( err );
2811 }
2812
2813
2814 /*------------------------------------------------------------------------------
2815
2816 Name: RcdValErr - (Record Valence Error)
2817
2818 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
2819 list, to describe an incorrect valence count for possible repair.
2820
2821 Input: GPtr - ptr to scavenger global data
2822 type - error code (E_xxx), which should be >0
2823 correct - the correct valence, as computed here
2824 incorrect - the incorrect valence as found in volume
2825 parid - the parent id, if S_Valence error
2826
2827 Output: 0 - no error
2828 R_NoMem - not enough mem to allocate record
2829 ------------------------------------------------------------------------------*/
2830
2831 static int RcdValErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID parid ) /* the ParID, if needed */
2832 {
2833 RepairOrderPtr p; /* the new node we compile */
2834 SInt16 n; /* size of node we allocate */
2835 Boolean isHFSPlus;
2836 char goodStr[32], badStr[32];
2837
2838 isHFSPlus = VolumeObjectIsHFSPlus( );
2839 fsckPrint(GPtr->context, type);
2840 sprintf(goodStr, "%u", correct);
2841 sprintf(badStr, "%u", incorrect);
2842 fsckPrint(GPtr->context, E_BadValue, goodStr, badStr);
2843
2844 if (type == E_DirVal) /* if normal directory valence error */
2845 n = CatalogNameSize( &GPtr->CName, isHFSPlus);
2846 else
2847 n = 0; /* other errors don't need the name */
2848
2849 p = AllocMinorRepairOrder( GPtr,n ); /* get the node */
2850 if (p==NULL) /* quit if out of room */
2851 return (R_NoMem);
2852
2853 p->type = type; /* save error info */
2854 p->correct = correct;
2855 p->incorrect = incorrect;
2856 p->parid = parid;
2857
2858 if ( n != 0 ) /* if name needed */
2859 CopyCatalogName( (const CatalogName *) &GPtr->CName, (CatalogName*)&p->name, isHFSPlus );
2860
2861 GPtr->CatStat |= S_Valence; /* set flag to trigger repair */
2862
2863 return( noErr ); /* successful return */
2864 }
2865
2866 /*------------------------------------------------------------------------------
2867
2868 Name: RcdHsFldCntErr - (Record HasFolderCount)
2869
2870 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
2871 list, to describe folder flag missing the HasFolderCount bit
2872
2873 Input: GPtr - ptr to scavenger global data
2874 type - error code (E_xxx), which should be >0
2875 correct - the folder mask, as computed here
2876 incorrect - the folder mask, as found in volume
2877 fid - the folder id
2878
2879 Output: 0 - no error
2880 R_NoMem - not enough mem to allocate record
2881 ------------------------------------------------------------------------------*/
2882
2883 int RcdHsFldCntErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID fid )
2884 {
2885 RepairOrderPtr p; /* the new node we compile */
2886 char goodStr[32], badStr[32];
2887 fsckPrint(GPtr->context, type, fid);
2888 sprintf(goodStr, "%#x", correct);
2889 sprintf(badStr, "%#x", incorrect);
2890 fsckPrint(GPtr->context, E_BadValue, goodStr, badStr);
2891
2892 p = AllocMinorRepairOrder( GPtr,0 ); /* get the node */
2893 if (p==NULL) /* quit if out of room */
2894 return (R_NoMem);
2895
2896 p->type = type; /* save error info */
2897 p->correct = correct;
2898 p->incorrect = incorrect;
2899 p->parid = fid;
2900
2901 return( noErr ); /* successful return */
2902 }
2903 /*------------------------------------------------------------------------------
2904
2905 Name: RcdFCntErr - (Record Folder Count)
2906
2907 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
2908 list, to describe an incorrect folder count for possible repair.
2909
2910 Input: GPtr - ptr to scavenger global data
2911 type - error code (E_xxx), which should be >0
2912 correct - the correct folder count, as computed here
2913 incorrect - the incorrect folder count as found in volume
2914 fid - the folder id
2915
2916 Output: 0 - no error
2917 R_NoMem - not enough mem to allocate record
2918 ------------------------------------------------------------------------------*/
2919
2920 int RcdFCntErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID fid )
2921 {
2922 RepairOrderPtr p; /* the new node we compile */
2923 char goodStr[32], badStr[32];
2924
2925 fsckPrint(GPtr->context, type, fid);
2926 sprintf(goodStr, "%u", correct);
2927 sprintf(badStr, "%u", incorrect);
2928 fsckPrint(GPtr->context, E_BadValue, goodStr, badStr);
2929
2930 p = AllocMinorRepairOrder( GPtr,0 ); /* get the node */
2931 if (p==NULL) /* quit if out of room */
2932 return (R_NoMem);
2933
2934 p->type = type; /* save error info */
2935 p->correct = correct;
2936 p->incorrect = incorrect;
2937 p->parid = fid;
2938
2939 return( noErr ); /* successful return */
2940 }
2941
2942 /*------------------------------------------------------------------------------
2943
2944 Name: RcdMDBAllocationBlockStartErr - (Record Allocation Block Start Error)
2945
2946 Function: Allocates a RepairOrder node and linking it into the 'GPtr->RepairP'
2947 list, to describe the error for possible repair.
2948
2949 Input: GPtr - ptr to scavenger global data
2950 type - error code (E_xxx), which should be >0
2951 correct - the correct valence, as computed here
2952 incorrect - the incorrect valence as found in volume
2953
2954 Output: 0 - no error
2955 R_NoMem - not enough mem to allocate record
2956 ------------------------------------------------------------------------------*/
2957
2958 static OSErr RcdMDBEmbededVolDescriptionErr( SGlobPtr GPtr, OSErr type, HFSMasterDirectoryBlock *mdb )
2959 {
2960 RepairOrderPtr p; // the new node we compile
2961 EmbededVolDescription *desc;
2962
2963 RcdError( GPtr, type ); // first, record the error
2964
2965 p = AllocMinorRepairOrder( GPtr, sizeof(EmbededVolDescription) ); // get the node
2966 if ( p == nil ) return( R_NoMem );
2967
2968 p->type = type; // save error info
2969 desc = (EmbededVolDescription *) &(p->name);
2970 desc->drAlBlSt = mdb->drAlBlSt;
2971 desc->drEmbedSigWord = mdb->drEmbedSigWord;
2972 desc->drEmbedExtent.startBlock = mdb->drEmbedExtent.startBlock;
2973 desc->drEmbedExtent.blockCount = mdb->drEmbedExtent.blockCount;
2974
2975 GPtr->VIStat |= S_InvalidWrapperExtents; // set flag to trigger repair
2976
2977 return( noErr ); // successful return
2978 }
2979
2980
2981 #if 0 // not used at this time
2982 /*------------------------------------------------------------------------------
2983
2984 Name: RcdInvalidWrapperExtents - (Record Invalid Wrapper Extents)
2985
2986 Function: Allocates a RepairOrder node and linking it into the 'GPtr->RepairP'
2987 list, to describe the error for possible repair.
2988
2989 Input: GPtr - ptr to scavenger global data
2990 type - error code (E_xxx), which should be >0
2991 correct - the correct valence, as computed here
2992 incorrect - the incorrect valence as found in volume
2993
2994 Output: 0 - no error
2995 R_NoMem - not enough mem to allocate record
2996 ------------------------------------------------------------------------------*/
2997
2998 static OSErr RcdInvalidWrapperExtents( SGlobPtr GPtr, OSErr type )
2999 {
3000 RepairOrderPtr p; // the new node we compile
3001
3002 RcdError( GPtr, type ); // first, record the error
3003
3004 p = AllocMinorRepairOrder( GPtr, 0 ); // get the node
3005 if ( p == nil ) return( R_NoMem );
3006
3007 p->type = type; // save error info
3008
3009 GPtr->VIStat |= S_BadMDBdrAlBlSt; // set flag to trigger repair
3010
3011 return( noErr ); // successful return
3012 }
3013 #endif
3014
3015
3016 #if 0 // We just check and fix them in SRepair.c
3017 /*------------------------------------------------------------------------------
3018
3019 Name: RcdOrphanedExtentErr
3020
3021 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
3022 list, to describe an locked volume name for possible repair.
3023
3024 Input: GPtr - ptr to scavenger global data
3025 type - error code (E_xxx), which should be >0
3026 incorrect - the incorrect file flags as found in file record
3027
3028 Output: 0 - no error
3029 R_NoMem - not enough mem to allocate record
3030 ------------------------------------------------------------------------------*/
3031
3032 static OSErr RcdOrphanedExtentErr ( SGlobPtr GPtr, SInt16 type, void *theKey )
3033 {
3034 RepairOrderPtr p; /* the new node we compile */
3035 SInt16 n; /* size of node we allocate */
3036 Boolean isHFSPlus;
3037
3038 isHFSPlus = VolumeObjectIsHFSPlus( );
3039 RcdError( GPtr,type ); /* first, record the error */
3040
3041 if ( isHFSPlus )
3042 n = sizeof( HFSPlusExtentKey );
3043 else
3044 n = sizeof( HFSExtentKey );
3045
3046 p = AllocMinorRepairOrder( GPtr, n ); /* get the node */
3047 if ( p == NULL ) /* quit if out of room */
3048 return( R_NoMem );
3049
3050 CopyMemory( theKey, p->name, n ); /* copy in the key */
3051
3052 p->type = type; /* save error info */
3053
3054 GPtr->EBTStat |= S_OrphanedExtent; /* set flag to trigger repair */
3055
3056 return( noErr ); /* successful return */
3057 }
3058 #endif
3059
3060
3061 /*------------------------------------------------------------------------------
3062
3063 Function: VInfoChk - (Volume Info Check)
3064
3065 Function: Verifies volume level information.
3066
3067 Input: GPtr - pointer to scavenger global area
3068
3069 Output: VInfoChk - function result:
3070 0 = no error
3071 n = error code
3072 ------------------------------------------------------------------------------*/
3073
3074 OSErr VInfoChk( SGlobPtr GPtr )
3075 {
3076 OSErr result;
3077 UInt16 recSize;
3078 Boolean isHFSPlus;
3079 UInt32 hint;
3080 UInt64 maxClump;
3081 SVCB *vcb;
3082 VolumeObjectPtr myVOPtr;
3083 CatalogRecord record;
3084 CatalogKey foundKey;
3085 BlockDescriptor altBlock;
3086 BlockDescriptor priBlock;
3087
3088 vcb = GPtr->calculatedVCB;
3089 altBlock.buffer = priBlock.buffer = NULL;
3090 isHFSPlus = VolumeObjectIsHFSPlus( );
3091 myVOPtr = GetVolumeObjectPtr( );
3092
3093 // locate the catalog record for the root directoryÉ
3094 result = GetBTreeRecord( GPtr->calculatedCatalogFCB, 0x8001, &foundKey, &record, &recSize, &hint );
3095 GPtr->TarID = kHFSCatalogFileID; /* target = catalog */
3096 GPtr->TarBlock = hint; /* target block = returned hint */
3097 if ( result != noErr )
3098 {
3099 result = IntError( GPtr, result );
3100 return( result );
3101 }
3102
3103 GPtr->TarID = AMDB_FNum; // target = alternate MDB or VHB
3104 GetVolumeObjectAlternateBlockNum( &GPtr->TarBlock );
3105 result = GetVolumeObjectAlternateBlock( &altBlock );
3106
3107 // invalidate if we have not marked the alternate as OK
3108 if ( isHFSPlus ) {
3109 if ( (myVOPtr->flags & kVO_AltVHBOK) == 0 )
3110 result = badMDBErr;
3111 }
3112 else if ( (myVOPtr->flags & kVO_AltMDBOK) == 0 ) {
3113 result = badMDBErr;
3114 }
3115 if ( result != noErr ) {
3116 GPtr->VIStat = GPtr->VIStat | S_MDB;
3117 if ( VolumeObjectIsHFS( ) ) {
3118 WriteError( GPtr, E_MDBDamaged, 0, 0 );
3119 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3120 plog("\tinvalid alternate MDB at %qd result %d \n", GPtr->TarBlock, result);
3121 }
3122 else {
3123 WriteError( GPtr, E_VolumeHeaderDamaged, 0, 0 );
3124 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3125 plog("\tinvalid alternate VHB at %qd result %d \n", GPtr->TarBlock, result);
3126 }
3127 result = noErr;
3128 goto exit;
3129 }
3130
3131 GPtr->TarID = MDB_FNum; // target = primary MDB or VHB
3132 GetVolumeObjectPrimaryBlockNum( &GPtr->TarBlock );
3133 result = GetVolumeObjectPrimaryBlock( &priBlock );
3134
3135 // invalidate if we have not marked the primary as OK
3136 if ( isHFSPlus ) {
3137 if ( (myVOPtr->flags & kVO_PriVHBOK) == 0 )
3138 result = badMDBErr;
3139 }
3140 else if ( (myVOPtr->flags & kVO_PriMDBOK) == 0 ) {
3141 result = badMDBErr;
3142 }
3143 if ( result != noErr ) {
3144 GPtr->VIStat = GPtr->VIStat | S_MDB;
3145 if ( VolumeObjectIsHFS( ) ) {
3146 WriteError( GPtr, E_MDBDamaged, 1, 0 );
3147 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3148 plog("\tinvalid primary MDB at %qd result %d \n", GPtr->TarBlock, result);
3149 }
3150 else {
3151 WriteError( GPtr, E_VolumeHeaderDamaged, 1, 0 );
3152 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3153 plog("\tinvalid primary VHB at %qd result %d \n", GPtr->TarBlock, result);
3154 }
3155 result = noErr;
3156 goto exit;
3157 }
3158
3159 // check to see that embedded HFS plus volumes still have both (alternate and primary) MDBs
3160 if ( VolumeObjectIsEmbeddedHFSPlus( ) &&
3161 ( (myVOPtr->flags & kVO_PriMDBOK) == 0 || (myVOPtr->flags & kVO_AltMDBOK) == 0 ) )
3162 {
3163 GPtr->VIStat |= S_WMDB;
3164 WriteError( GPtr, E_MDBDamaged, 0, 0 );
3165 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3166 plog("\tinvalid wrapper MDB \n");
3167 }
3168
3169 if ( isHFSPlus )
3170 {
3171 HFSPlusVolumeHeader * volumeHeader;
3172 HFSPlusVolumeHeader * alternateVolumeHeader;
3173
3174 alternateVolumeHeader = (HFSPlusVolumeHeader *) altBlock.buffer;
3175 volumeHeader = (HFSPlusVolumeHeader *) priBlock.buffer;
3176
3177 maxClump = (UInt64) (vcb->vcbTotalBlocks / 4) * vcb->vcbBlockSize; /* max clump = 1/4 volume size */
3178
3179 // check out creation and last mod dates
3180 vcb->vcbCreateDate = alternateVolumeHeader->createDate; // use creation date in alt MDB
3181 vcb->vcbModifyDate = volumeHeader->modifyDate; // don't change last mod date
3182 vcb->vcbCheckedDate = volumeHeader->checkedDate; // don't change checked date
3183
3184 // 3882639: Removed check for volume attributes in HFS Plus
3185 vcb->vcbAttributes = volumeHeader->attributes;
3186
3187 // verify allocation map ptr
3188 if ( volumeHeader->nextAllocation < vcb->vcbTotalBlocks )
3189 vcb->vcbNextAllocation = volumeHeader->nextAllocation;
3190 else
3191 vcb->vcbNextAllocation = 0;
3192
3193 // verify default clump sizes
3194 if ( (volumeHeader->rsrcClumpSize > 0) &&
3195 (volumeHeader->rsrcClumpSize <= kMaxClumpSize) &&
3196 ((volumeHeader->rsrcClumpSize % vcb->vcbBlockSize) == 0) )
3197 vcb->vcbRsrcClumpSize = volumeHeader->rsrcClumpSize;
3198 else if ( (alternateVolumeHeader->rsrcClumpSize > 0) &&
3199 (alternateVolumeHeader->rsrcClumpSize <= kMaxClumpSize) &&
3200 ((alternateVolumeHeader->rsrcClumpSize % vcb->vcbBlockSize) == 0) )
3201 vcb->vcbRsrcClumpSize = alternateVolumeHeader->rsrcClumpSize;
3202 else if (4ULL * vcb->vcbBlockSize <= kMaxClumpSize)
3203 vcb->vcbRsrcClumpSize = 4 * vcb->vcbBlockSize;
3204 else
3205 vcb->vcbRsrcClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3206
3207 if ( vcb->vcbRsrcClumpSize > kMaxClumpSize )
3208 vcb->vcbRsrcClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3209
3210 if ( (volumeHeader->dataClumpSize > 0) && (volumeHeader->dataClumpSize <= kMaxClumpSize) &&
3211 ((volumeHeader->dataClumpSize % vcb->vcbBlockSize) == 0) )
3212 vcb->vcbDataClumpSize = volumeHeader->dataClumpSize;
3213 else if ( (alternateVolumeHeader->dataClumpSize > 0) &&
3214 (alternateVolumeHeader->dataClumpSize <= kMaxClumpSize) &&
3215 ((alternateVolumeHeader->dataClumpSize % vcb->vcbBlockSize) == 0) )
3216 vcb->vcbDataClumpSize = alternateVolumeHeader->dataClumpSize;
3217 else if (4ULL * vcb->vcbBlockSize <= kMaxClumpSize)
3218 vcb->vcbDataClumpSize = 4 * vcb->vcbBlockSize;
3219 else
3220 vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3221
3222 if ( vcb->vcbDataClumpSize > kMaxClumpSize )
3223 vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3224
3225 /* Verify next CNode ID.
3226 * If volumeHeader->nextCatalogID < vcb->vcbNextCatalogID, probably
3227 * nextCatalogID has wrapped around.
3228 * If volumeHeader->nextCatalogID > vcb->vcbNextCatalogID, probably
3229 * many files were created and deleted, followed by no new file
3230 * creation.
3231 */
3232 if ( (volumeHeader->nextCatalogID > vcb->vcbNextCatalogID) )
3233 vcb->vcbNextCatalogID = volumeHeader->nextCatalogID;
3234
3235 //¥¥TBD location and unicode? volumename
3236 // verify the volume name
3237 result = ChkCName( GPtr, (const CatalogName*) &foundKey.hfsPlus.nodeName, isHFSPlus );
3238
3239 // verify last backup date and backup seqence number
3240 vcb->vcbBackupDate = volumeHeader->backupDate; /* don't change last backup date */
3241
3242 // verify write count
3243 vcb->vcbWriteCount = volumeHeader->writeCount; /* don't change write count */
3244
3245 // check out extent file clump size
3246 if ( ((volumeHeader->extentsFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3247 (volumeHeader->extentsFile.clumpSize <= maxClump) )
3248 vcb->vcbExtentsFile->fcbClumpSize = volumeHeader->extentsFile.clumpSize;
3249 else if ( ((alternateVolumeHeader->extentsFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3250 (alternateVolumeHeader->extentsFile.clumpSize <= maxClump) )
3251 vcb->vcbExtentsFile->fcbClumpSize = alternateVolumeHeader->extentsFile.clumpSize;
3252 else
3253 vcb->vcbExtentsFile->fcbClumpSize =
3254 (alternateVolumeHeader->extentsFile.extents[0].blockCount * vcb->vcbBlockSize);
3255
3256 // check out catalog file clump size
3257 if ( ((volumeHeader->catalogFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3258 (volumeHeader->catalogFile.clumpSize <= maxClump) )
3259 vcb->vcbCatalogFile->fcbClumpSize = volumeHeader->catalogFile.clumpSize;
3260 else if ( ((alternateVolumeHeader->catalogFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3261 (alternateVolumeHeader->catalogFile.clumpSize <= maxClump) )
3262 vcb->vcbCatalogFile->fcbClumpSize = alternateVolumeHeader->catalogFile.clumpSize;
3263 else
3264 vcb->vcbCatalogFile->fcbClumpSize =
3265 (alternateVolumeHeader->catalogFile.extents[0].blockCount * vcb->vcbBlockSize);
3266
3267 // check out allocations file clump size
3268 if ( ((volumeHeader->allocationFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3269 (volumeHeader->allocationFile.clumpSize <= maxClump) )
3270 vcb->vcbAllocationFile->fcbClumpSize = volumeHeader->allocationFile.clumpSize;
3271 else if ( ((alternateVolumeHeader->allocationFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3272 (alternateVolumeHeader->allocationFile.clumpSize <= maxClump) )
3273 vcb->vcbAllocationFile->fcbClumpSize = alternateVolumeHeader->allocationFile.clumpSize;
3274 else
3275 vcb->vcbAllocationFile->fcbClumpSize =
3276 (alternateVolumeHeader->allocationFile.extents[0].blockCount * vcb->vcbBlockSize);
3277
3278 // check out attribute file clump size
3279 if (vcb->vcbAttributesFile) {
3280 if ( ((volumeHeader->attributesFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3281 (volumeHeader->attributesFile.clumpSize <= maxClump) &&
3282 (volumeHeader->attributesFile.clumpSize != 0))
3283 vcb->vcbAttributesFile->fcbClumpSize = volumeHeader->attributesFile.clumpSize;
3284 else if ( ((alternateVolumeHeader->attributesFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3285 (alternateVolumeHeader->attributesFile.clumpSize <= maxClump) &&
3286 (alternateVolumeHeader->attributesFile.clumpSize != 0))
3287 vcb->vcbAttributesFile->fcbClumpSize = alternateVolumeHeader->attributesFile.clumpSize;
3288 else if (vcb->vcbCatalogFile->fcbClumpSize != 0)
3289 // The original attribute clump may be too small, use catalog's
3290 vcb->vcbAttributesFile->fcbClumpSize = vcb->vcbCatalogFile->fcbClumpSize;
3291 else
3292 vcb->vcbAttributesFile->fcbClumpSize =
3293 alternateVolumeHeader->attributesFile.extents[0].blockCount * vcb->vcbBlockSize;
3294 }
3295
3296 CopyMemory( volumeHeader->finderInfo, vcb->vcbFinderInfo, sizeof(vcb->vcbFinderInfo) );
3297
3298 // Now compare verified Volume Header info (in the form of a vcb) with Volume Header info on disk
3299 result = CompareVolumeHeader( GPtr, volumeHeader );
3300
3301 // check to see that embedded volume info is correct in both wrapper MDBs
3302 CheckEmbeddedVolInfoInMDBs( GPtr );
3303
3304 }
3305 else // HFS
3306 {
3307 HFSMasterDirectoryBlock *mdbP;
3308 HFSMasterDirectoryBlock *alternateMDB;
3309
3310 //
3311 // get volume name from BTree Key
3312 //
3313
3314 alternateMDB = (HFSMasterDirectoryBlock *) altBlock.buffer;
3315 mdbP = (HFSMasterDirectoryBlock *) priBlock.buffer;
3316
3317 maxClump = (UInt64) (vcb->vcbTotalBlocks / 4) * vcb->vcbBlockSize; /* max clump = 1/4 volume size */
3318
3319 // check out creation and last mod dates
3320 vcb->vcbCreateDate = alternateMDB->drCrDate; /* use creation date in alt MDB */
3321 vcb->vcbModifyDate = mdbP->drLsMod; /* don't change last mod date */
3322
3323 // verify volume attribute flags
3324 if ( (mdbP->drAtrb & VAtrb_Msk) == 0 )
3325 vcb->vcbAttributes = mdbP->drAtrb;
3326 else
3327 vcb->vcbAttributes = VAtrb_DFlt;
3328
3329 // verify allocation map ptr
3330 if ( mdbP->drAllocPtr < vcb->vcbTotalBlocks )
3331 vcb->vcbNextAllocation = mdbP->drAllocPtr;
3332 else
3333 vcb->vcbNextAllocation = 0;
3334
3335 // verify default clump size
3336 if ( (mdbP->drClpSiz > 0) &&
3337 (mdbP->drClpSiz <= maxClump) &&
3338 ((mdbP->drClpSiz % vcb->vcbBlockSize) == 0) )
3339 vcb->vcbDataClumpSize = mdbP->drClpSiz;
3340 else if ( (alternateMDB->drClpSiz > 0) &&
3341 (alternateMDB->drClpSiz <= maxClump) &&
3342 ((alternateMDB->drClpSiz % vcb->vcbBlockSize) == 0) )
3343 vcb->vcbDataClumpSize = alternateMDB->drClpSiz;
3344 else
3345 vcb->vcbDataClumpSize = 4 * vcb->vcbBlockSize;
3346
3347 if ( vcb->vcbDataClumpSize > kMaxClumpSize )
3348 vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3349
3350 // verify next CNode ID
3351 if ( (mdbP->drNxtCNID > vcb->vcbNextCatalogID) && (mdbP->drNxtCNID <= (vcb->vcbNextCatalogID + 4096)) )
3352 vcb->vcbNextCatalogID = mdbP->drNxtCNID;
3353
3354 // verify the volume name
3355 result = ChkCName( GPtr, (const CatalogName*) &vcb->vcbVN, isHFSPlus );
3356 if ( result == noErr )
3357 if ( CmpBlock( mdbP->drVN, vcb->vcbVN, vcb->vcbVN[0] + 1 ) == 0 )
3358 CopyMemory( mdbP->drVN, vcb->vcbVN, kHFSMaxVolumeNameChars + 1 ); /* ...we have a good one */
3359
3360 // verify last backup date and backup seqence number
3361 vcb->vcbBackupDate = mdbP->drVolBkUp; /* don't change last backup date */
3362 vcb->vcbVSeqNum = mdbP->drVSeqNum; /* don't change last backup sequence # */
3363
3364 // verify write count
3365 vcb->vcbWriteCount = mdbP->drWrCnt; /* don't change write count */
3366
3367 // check out extent file and catalog clump sizes
3368 if ( ((mdbP->drXTClpSiz % vcb->vcbBlockSize) == 0) && (mdbP->drXTClpSiz <= maxClump) )
3369 vcb->vcbExtentsFile->fcbClumpSize = mdbP->drXTClpSiz;
3370 else if ( ((alternateMDB->drXTClpSiz % vcb->vcbBlockSize) == 0) && (alternateMDB->drXTClpSiz <= maxClump) )
3371 vcb->vcbExtentsFile->fcbClumpSize = alternateMDB->drXTClpSiz;
3372 else
3373 vcb->vcbExtentsFile->fcbClumpSize = (alternateMDB->drXTExtRec[0].blockCount * vcb->vcbBlockSize);
3374
3375 if ( ((mdbP->drCTClpSiz % vcb->vcbBlockSize) == 0) && (mdbP->drCTClpSiz <= maxClump) )
3376 vcb->vcbCatalogFile->fcbClumpSize = mdbP->drCTClpSiz;
3377 else if ( ((alternateMDB->drCTClpSiz % vcb->vcbBlockSize) == 0) && (alternateMDB->drCTClpSiz <= maxClump) )
3378 vcb->vcbCatalogFile->fcbClumpSize = alternateMDB->drCTClpSiz;
3379 else
3380 vcb->vcbCatalogFile->fcbClumpSize = (alternateMDB->drCTExtRec[0].blockCount * vcb->vcbBlockSize);
3381
3382 // just copy Finder info for now
3383 CopyMemory(mdbP->drFndrInfo, vcb->vcbFinderInfo, sizeof(mdbP->drFndrInfo));
3384
3385 // now compare verified MDB info with MDB info on disk
3386 result = CmpMDB( GPtr, mdbP);
3387 }
3388
3389 exit:
3390 if (priBlock.buffer)
3391 (void) ReleaseVolumeBlock(vcb, &priBlock, kReleaseBlock);
3392 if (altBlock.buffer)
3393 (void) ReleaseVolumeBlock(vcb, &altBlock, kReleaseBlock);
3394
3395 return (result);
3396
3397 } /* end of VInfoChk */
3398
3399
3400 /*------------------------------------------------------------------------------
3401
3402 Function: VLockedChk - (Volume Name Locked Check)
3403
3404 Function: Makes sure the volume name isn't locked. If it is locked, generate a repair order.
3405
3406 This function is not called if file sharing is operating.
3407
3408 Input: GPtr - pointer to scavenger global area
3409
3410 Output: VInfoChk - function result:
3411 0 = no error
3412 n = error code
3413 ------------------------------------------------------------------------------*/
3414
3415 OSErr VLockedChk( SGlobPtr GPtr )
3416 {
3417 UInt32 hint;
3418 CatalogKey foundKey;
3419 CatalogRecord record;
3420 UInt16 recSize;
3421 OSErr result;
3422 UInt16 frFlags;
3423 Boolean isHFSPlus;
3424 SVCB *calculatedVCB = GPtr->calculatedVCB;
3425 VolumeObjectPtr myVOPtr;
3426
3427 myVOPtr = GetVolumeObjectPtr( );
3428 isHFSPlus = VolumeObjectIsHFSPlus( );
3429 GPtr->TarID = kHFSCatalogFileID; /* target = catalog file */
3430 GPtr->TarBlock = 0; /* no target block yet */
3431
3432 //
3433 // locate the catalog record for the root directory
3434 //
3435 result = GetBTreeRecord( GPtr->calculatedCatalogFCB, 0x8001, &foundKey, &record, &recSize, &hint );
3436
3437 if ( result)
3438 {
3439 RcdError( GPtr, E_EntryNotFound );
3440 return( E_EntryNotFound );
3441 }
3442
3443 // put the volume name in the VCB
3444 if ( isHFSPlus == false )
3445 {
3446 CopyMemory( foundKey.hfs.nodeName, calculatedVCB->vcbVN, sizeof(calculatedVCB->vcbVN) );
3447 }
3448 else if ( myVOPtr->volumeType != kPureHFSPlusVolumeType )
3449 {
3450 HFSMasterDirectoryBlock *mdbP;
3451 BlockDescriptor block;
3452
3453 block.buffer = NULL;
3454 if ( (myVOPtr->flags & kVO_PriMDBOK) != 0 )
3455 result = GetVolumeObjectPrimaryMDB( &block );
3456 else
3457 result = GetVolumeObjectAlternateMDB( &block );
3458 if ( result == noErr ) {
3459 mdbP = (HFSMasterDirectoryBlock *) block.buffer;
3460 CopyMemory( mdbP->drVN, calculatedVCB->vcbVN, sizeof(mdbP->drVN) );
3461 }
3462 if ( block.buffer != NULL )
3463 (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock );
3464 ReturnIfError(result);
3465 }
3466 else // Because we don't have the unicode converters, just fill it with a dummy name.
3467 {
3468 CopyMemory( "\x0dPure HFS Plus", calculatedVCB->vcbVN, sizeof(Str27) );
3469 }
3470
3471 GPtr->TarBlock = hint;
3472 if ( isHFSPlus )
3473 CopyCatalogName( (const CatalogName *)&foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus );
3474 else
3475 CopyCatalogName( (const CatalogName *)&foundKey.hfs.nodeName, &GPtr->CName, isHFSPlus );
3476
3477 if ( (record.recordType == kHFSPlusFolderRecord) || (record.recordType == kHFSFolderRecord) )
3478 {
3479 frFlags = record.recordType == kHFSPlusFolderRecord ?
3480 record.hfsPlusFolder.userInfo.frFlags :
3481 record.hfsFolder.userInfo.frFlags;
3482
3483 if ( frFlags & fNameLocked ) // name locked bit set?
3484 RcdNameLockedErr( GPtr, E_LockedDirName, frFlags );
3485 }
3486
3487 return( noErr );
3488 }
3489
3490
3491 /*------------------------------------------------------------------------------
3492
3493 Name: RcdNameLockedErr
3494
3495 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
3496 list, to describe an locked volume name for possible repair.
3497
3498 Input: GPtr - ptr to scavenger global data
3499 type - error code (E_xxx), which should be >0
3500 incorrect - the incorrect file flags as found in file record
3501
3502 Output: 0 - no error
3503 R_NoMem - not enough mem to allocate record
3504 ------------------------------------------------------------------------------*/
3505
3506 static int RcdNameLockedErr( SGlobPtr GPtr, SInt16 type, UInt32 incorrect ) /* for a consistency check */
3507 {
3508 RepairOrderPtr p; /* the new node we compile */
3509 int n; /* size of node we allocate */
3510 Boolean isHFSPlus;
3511
3512 isHFSPlus = VolumeObjectIsHFSPlus( );
3513 RcdError( GPtr, type ); /* first, record the error */
3514
3515 n = CatalogNameSize( &GPtr->CName, isHFSPlus );
3516
3517 p = AllocMinorRepairOrder( GPtr, n ); /* get the node */
3518 if ( p==NULL ) /* quit if out of room */
3519 return ( R_NoMem );
3520
3521 CopyCatalogName( (const CatalogName *) &GPtr->CName, (CatalogName*)&p->name, isHFSPlus );
3522
3523 p->type = type; /* save error info */
3524 p->correct = incorrect & ~fNameLocked; /* mask off the name locked bit */
3525 p->incorrect = incorrect;
3526 p->maskBit = (UInt16)fNameLocked;
3527 p->parid = 1;
3528
3529 GPtr->CatStat |= S_LockedDirName; /* set flag to trigger repair */
3530
3531 return( noErr ); /* successful return */
3532 }
3533
3534 /*------------------------------------------------------------------------------
3535
3536 Name: RecordBadExtent
3537
3538 Function: Allocates a RepairOrder for repairing bad extent.
3539
3540 Input: GPtr - ptr to scavenger global data
3541 fileID - fileID of the file with bad extent
3542 forkType - bad extent's fork type
3543 startBlock - start block of the bad extent record
3544 badExtentIndex - index of bad extent entry in the extent record
3545
3546 Output: 0 - no error
3547 R_NoMem - not enough mem to allocate record
3548 ------------------------------------------------------------------------------*/
3549
3550 static int RecordBadExtent(SGlobPtr GPtr, UInt32 fileID, UInt8 forkType,
3551 UInt32 startBlock, UInt32 badExtentIndex)
3552 {
3553 RepairOrderPtr p;
3554 Boolean isHFSPlus;
3555
3556 isHFSPlus = VolumeObjectIsHFSPlus();
3557
3558 p = AllocMinorRepairOrder(GPtr, 0);
3559 if (p == NULL) {
3560 return(R_NoMem);
3561 }
3562
3563 p->type = E_ExtEnt;
3564 p->forkType = forkType;
3565 p->correct = badExtentIndex;
3566 p->hint = startBlock;
3567 p->parid = fileID;
3568
3569 GPtr->CatStat |= S_BadExtent;
3570 return (0);
3571 }
3572
3573 /*
3574 * Build a catalog node thread key.
3575 */
3576 __unused static void
3577 buildthreadkey(UInt32 parentID, int std_hfs, CatalogKey *key)
3578 {
3579 if (std_hfs) {
3580 key->hfs.keyLength = kHFSCatalogKeyMinimumLength;
3581 key->hfs.reserved = 0;
3582 key->hfs.parentID = parentID;
3583 key->hfs.nodeName[0] = 0;
3584 } else {
3585 key->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength;
3586 key->hfsPlus.parentID = parentID;
3587 key->hfsPlus.nodeName.length = 0;
3588 }
3589 }
3590
3591
3592 static void
3593 printpath(SGlobPtr GPtr, UInt32 fileID)
3594 {
3595 int result;
3596 char path[PATH_MAX * 4];
3597 unsigned int pathlen = PATH_MAX * 4;
3598
3599 if (fileID < kHFSFirstUserCatalogNodeID) {
3600 switch(fileID) {
3601 case kHFSExtentsFileID:
3602 printf("$Extents_Overflow_File\n");
3603 return;
3604 case kHFSCatalogFileID:
3605 printf("$Catalog_File\n");
3606 return;
3607 case kHFSAllocationFileID:
3608 printf("$Allocation_Bitmap_File\n");
3609 return;
3610 case kHFSAttributesFileID:
3611 printf("$Attributes_File\n");
3612 return;
3613 default:
3614 printf("$File_ID_%d\n", fileID);
3615 return;
3616 }
3617 }
3618
3619 result = GetFileNamePathByID(GPtr, fileID, path, &pathlen, NULL, NULL, NULL);
3620 if (result) {
3621 printf ("error %d getting path for id=%u\n", result, fileID);
3622 }
3623
3624 printf("\"ROOT_OF_VOLUME%s\" (file id=%u)\n", path, fileID);
3625 }
3626
3627 void
3628 CheckPhysicalMatch(SVCB *vcb, UInt32 startblk, UInt32 blkcount, UInt32 fileNumber, UInt8 forkType)
3629 {
3630 int i;
3631 u_int64_t blk, blk1, blk2;
3632 u_int64_t offset;
3633
3634 offset = (u_int64_t) startblk * (u_int64_t) vcb->vcbBlockSize;
3635
3636 if (vcb->vcbSignature == kHFSPlusSigWord)
3637 offset += vcb->vcbEmbeddedOffset; // offset into the wrapper
3638 else
3639 offset += vcb->vcbAlBlSt * 512ULL; // offset to start of volume
3640
3641 blk1 = offset / gBlockSize;
3642 blk2 = blk1 + ((blkcount * vcb->vcbBlockSize) / gBlockSize);
3643
3644 for (i = 0; i < gBlkListEntries; ++i) {
3645 blk = gBlockList[i];
3646
3647 if (blk >= blk1 && blk < blk2) {
3648 // printf("block %d is in file %d\n", blk, fileNumber);
3649 /* Do we need to grow the found blocks list? */
3650 if (gFoundBlockEntries % FOUND_BLOCKS_QUANTUM == 0) {
3651 struct found_blocks *new_blocks;
3652 new_blocks = realloc(gFoundBlocksList, (gFoundBlockEntries + FOUND_BLOCKS_QUANTUM) * sizeof(struct found_blocks));
3653 if (new_blocks == NULL) {
3654 fprintf(stderr, "CheckPhysicalMatch: Out of memory!\n");
3655 return;
3656 }
3657 gFoundBlocksList = new_blocks;
3658 }
3659 gFoundBlocksList[gFoundBlockEntries].block = blk;
3660 gFoundBlocksList[gFoundBlockEntries].fileID = fileNumber;
3661 ++gFoundBlockEntries;
3662 }
3663 }
3664 }
3665
3666 static int compare_found_blocks(const void *x1_arg, const void *x2_arg)
3667 {
3668 const struct found_blocks *x1 = x1_arg;
3669 const struct found_blocks *x2 = x2_arg;
3670
3671 if (x1->block < x2->block)
3672 return -1;
3673 else if (x1->block > x2->block)
3674 return 1;
3675 else {
3676 if (x1->fileID < x2->fileID)
3677 return -1;
3678 else if (x1->fileID > x2->fileID)
3679 return 1;
3680 }
3681
3682 return 0;
3683 }
3684
3685 void
3686 dumpblocklist(SGlobPtr GPtr)
3687 {
3688 int i, j;
3689 u_int64_t block;
3690
3691 /* Sort the found blocks */
3692 qsort(gFoundBlocksList, gFoundBlockEntries, sizeof(struct found_blocks), compare_found_blocks);
3693
3694 /*
3695 * Print out the blocks with matching files. In the case of overlapped
3696 * extents, the same block number will be printed multiple times, with
3697 * each file containing an overlapping extent. If overlapping extents
3698 * come from the same file, then that path will be printed multiple times.
3699 */
3700 for (i = 0; i < gFoundBlockEntries; ++i) {
3701 block = gFoundBlocksList[i].block;
3702
3703 printf("block %llu:\t", (unsigned long long) block);
3704 printpath(GPtr, gFoundBlocksList[i].fileID);
3705
3706 /* Remove block from the gBlockList */
3707 for (j = 0; j < gBlkListEntries; ++j) {
3708 if (gBlockList[j] == block) {
3709 gBlockList[j] = gBlockList[--gBlkListEntries];
3710 break;
3711 }
3712 }
3713 }
3714
3715 /* Print out the blocks without matching files */
3716 for (j = 0; j < gBlkListEntries; ++j) {
3717 printf("block %llu:\t*** NO MATCH ***\n", (unsigned long long) gBlockList[j]);
3718 }
3719 }
3720
3721 /*------------------------------------------------------------------------------
3722
3723 Function: CheckFileExtents - (Check File Extents)
3724
3725 Description:
3726 Verifies the extent info for a file data or extented attribute data. It
3727 checks the correctness of extent data. If the extent information is
3728 correct/valid, it updates in-memory volume bitmap, total number of valid
3729 blocks for given file, and if overlapping extents exist, adds them to
3730 the overlap extents list. If the extent information is not correct, it
3731 considers the file truncated beyond the bad extent entry and reports
3732 only the total number of good blocks seen. Therefore the caller detects
3733 adds the extent information to repair order. It does not include the
3734 invalid extent and any extents after it for checking volume bitmap and
3735 hence overlapping extents. Note that currently the function
3736 returns error if invalid extent is found for system files or for
3737 extended attributes.
3738
3739 For data fork and resource fork of file - This function checks extent
3740 record present in catalog record as well as extent overflow records, if
3741 any, for given fileID.
3742
3743 For extended attribute data - This function only checks the extent record
3744 passed as parameter. If any extended attribute has overflow extents in
3745 the attribute btree, this function does not look them up. It is the left
3746 to the caller to check remaining extents for given file's extended attribute.
3747
3748 Input:
3749 GPtr - pointer to scavenger global area
3750 fileNumber - file number for fork/extended attribute
3751 forkType - fork type
3752 00 - kDataFork - data fork
3753 01 - kEAData - extended attribute data extent
3754 ff - kRsrcFork - resource fork
3755 attrname - if fork type is kEAData, attrname contains pointer to the
3756 name of extended attribute whose extent is being checked; else
3757 it should be NULL. Note that the function assumes that this is
3758 NULL-terminated string.
3759 extents - ptr to 1st extent record for the file
3760
3761 Output:
3762 CheckFileExtents - function result:
3763 noErr = no error
3764 n = error code
3765 blocksUsed - number of allocation blocks allocated to the file
3766 ------------------------------------------------------------------------------*/
3767
3768 OSErr CheckFileExtents( SGlobPtr GPtr, UInt32 fileNumber, UInt8 forkType,
3769 const unsigned char *attrname, const void *extents,
3770 UInt32 *blocksUsed)
3771 {
3772 UInt32 blockCount = 0;
3773 UInt32 extentBlockCount;
3774 UInt32 extentStartBlock;
3775 UInt32 hint;
3776 HFSPlusExtentKey key;
3777 HFSPlusExtentKey extentKey;
3778 HFSPlusExtentRecord extentRecord;
3779 UInt16 recSize;
3780 OSErr err = noErr;
3781 SInt16 i;
3782 Boolean firstRecord;
3783 Boolean isHFSPlus;
3784 unsigned int lastExtentIndex;
3785 Boolean foundBadExtent;
3786
3787 /* For all extended attribute extents, the attrname should not be NULL */
3788 if (forkType == kEAData) {
3789 assert(attrname != NULL);
3790 }
3791
3792 isHFSPlus = VolumeObjectIsHFSPlus( );
3793 firstRecord = true;
3794 foundBadExtent = false;
3795 lastExtentIndex = GPtr->numExtents;
3796
3797 while ( (extents != nil) && (err == noErr) )
3798 {
3799 // checkout the extent record first
3800 err = ChkExtRec( GPtr, fileNumber, extents, &lastExtentIndex );
3801 if (err != noErr) {
3802 DPRINTF (d_info, "%s: Bad extent for fileID %u in extent %u for startblock %u\n", __FUNCTION__, fileNumber, lastExtentIndex, blockCount);
3803 if (cur_debug_level & d_dump_record)
3804 {
3805 plog("Extents:\n");
3806 HexDump(extents, sizeof(HFSPlusExtentRecord), FALSE);
3807 plog("\n");
3808 }
3809
3810 /* Stop verification if bad extent is found for system file or EA */
3811 if ((fileNumber < kHFSFirstUserCatalogNodeID) ||
3812 (forkType == kEAData)) {
3813 break;
3814 }
3815
3816 /* store information about bad extent in repair order */
3817 (void) RecordBadExtent(GPtr, fileNumber, forkType, blockCount, lastExtentIndex);
3818 foundBadExtent = true;
3819 err = noErr;
3820 }
3821
3822 /* Check only till the last valid extent entry reported by ChkExtRec */
3823 for ( i=0 ; i<lastExtentIndex ; i++ ) // now checkout the extents
3824 {
3825 // HFS+/HFS moving extent fields into local variables for evaluation
3826 if ( isHFSPlus == true )
3827 {
3828 extentBlockCount = ((HFSPlusExtentDescriptor *)extents)[i].blockCount;
3829 extentStartBlock = ((HFSPlusExtentDescriptor *)extents)[i].startBlock;
3830 }
3831 else
3832 {
3833 extentBlockCount = ((HFSExtentDescriptor *)extents)[i].blockCount;
3834 extentStartBlock = ((HFSExtentDescriptor *)extents)[i].startBlock;
3835 }
3836
3837 if ( extentBlockCount == 0 )
3838 break;
3839
3840 if (gBlkListEntries != 0)
3841 CheckPhysicalMatch(GPtr->calculatedVCB, extentStartBlock, extentBlockCount, fileNumber, forkType);
3842
3843 err = CaptureBitmapBits(extentStartBlock, extentBlockCount);
3844 if (err == E_OvlExt) {
3845 err = AddExtentToOverlapList(GPtr, fileNumber, (char *)attrname, extentStartBlock, extentBlockCount, forkType);
3846 }
3847
3848 blockCount += extentBlockCount;
3849 }
3850
3851 if ( fileNumber == kHFSExtentsFileID ) // Extents file has no overflow extents
3852 break;
3853
3854 /* Found bad extent for this file, do not find any extents after
3855 * current extent. We assume that the file is truncated at the
3856 * bad extent entry
3857 */
3858 if (foundBadExtent == true) {
3859 break;
3860 }
3861
3862 /* For extended attributes, only check the extent passed as parameter. The
3863 * caller will take care of checking other extents, if any, for given
3864 * extended attribute.
3865 */
3866 if (forkType == kEAData) {
3867 break;
3868 }
3869
3870 if ( firstRecord == true )
3871 {
3872 firstRecord = false;
3873
3874 // Set up the extent key
3875 BuildExtentKey( isHFSPlus, forkType, fileNumber, blockCount, (void *)&key );
3876
3877 err = SearchBTreeRecord( GPtr->calculatedExtentsFCB, &key, kNoHint, (void *) &extentKey, (void *) &extentRecord, &recSize, &hint );
3878
3879 if ( err == btNotFound )
3880 {
3881 err = noErr; // no more extent records
3882 extents = nil;
3883 break;
3884 }
3885 else if ( err != noErr )
3886 {
3887 err = IntError( GPtr, err ); // error from SearchBTreeRecord
3888 return( err );
3889 }
3890 }
3891 else
3892 {
3893 err = GetBTreeRecord( GPtr->calculatedExtentsFCB, 1, &extentKey, extentRecord, &recSize, &hint );
3894
3895 if ( err == btNotFound )
3896 {
3897 err = noErr; // no more extent records
3898 extents = nil;
3899 break;
3900 }
3901 else if ( err != noErr )
3902 {
3903 err = IntError( GPtr, err ); /* error from BTGetRecord */
3904 return( err );
3905 }
3906
3907 // Check same file and fork
3908 if ( isHFSPlus )
3909 {
3910 if ( (extentKey.fileID != fileNumber) || (extentKey.forkType != forkType) )
3911 break;
3912 }
3913 else
3914 {
3915 if ( (((HFSExtentKey *) &extentKey)->fileID != fileNumber) || (((HFSExtentKey *) &extentKey)->forkType != forkType) )
3916 break;
3917 }
3918 }
3919
3920 extents = (void *) &extentRecord;
3921 }
3922
3923 *blocksUsed = blockCount;
3924
3925 return( err );
3926 }
3927
3928
3929 void BuildExtentKey( Boolean isHFSPlus, UInt8 forkType, HFSCatalogNodeID fileNumber, UInt32 blockNumber, void * key )
3930 {
3931 if ( isHFSPlus )
3932 {
3933 HFSPlusExtentKey *hfsPlusKey = (HFSPlusExtentKey*) key;
3934
3935 hfsPlusKey->keyLength = kHFSPlusExtentKeyMaximumLength;
3936 hfsPlusKey->forkType = forkType;
3937 hfsPlusKey->pad = 0;
3938 hfsPlusKey->fileID = fileNumber;
3939 hfsPlusKey->startBlock = blockNumber;
3940 }
3941 else
3942 {
3943 HFSExtentKey *hfsKey = (HFSExtentKey*) key;
3944
3945 hfsKey->keyLength = kHFSExtentKeyMaximumLength;
3946 hfsKey->forkType = forkType;
3947 hfsKey->fileID = fileNumber;
3948 hfsKey->startBlock = (UInt16) blockNumber;
3949 }
3950 }
3951
3952
3953
3954 //
3955 // Adds this extent to our OverlappedExtentList for later repair.
3956 //
3957 static OSErr AddExtentToOverlapList( SGlobPtr GPtr, HFSCatalogNodeID fileNumber, const char *attrname, UInt32 extentStartBlock, UInt32 extentBlockCount, UInt8 forkType )
3958 {
3959 size_t newHandleSize;
3960 ExtentInfo extentInfo;
3961 ExtentsTable **extentsTableH;
3962 size_t attrlen;
3963
3964 ClearMemory(&extentInfo, sizeof(extentInfo));
3965 extentInfo.fileID = fileNumber;
3966 extentInfo.startBlock = extentStartBlock;
3967 extentInfo.blockCount = extentBlockCount;
3968 extentInfo.forkType = forkType;
3969 /* store the name of extended attribute */
3970 if (forkType == kEAData) {
3971 assert(attrname != NULL);
3972
3973 attrlen = strlen(attrname) + 1;
3974 extentInfo.attrname = malloc(attrlen);
3975 if (extentInfo.attrname == NULL) {
3976 return(memFullErr);
3977 }
3978 strlcpy(extentInfo.attrname, attrname, attrlen);
3979 }
3980
3981 // If it's uninitialized
3982 if ( GPtr->overlappedExtents == nil )
3983 {
3984 GPtr->overlappedExtents = (ExtentsTable **) NewHandleClear( sizeof(ExtentsTable) );
3985 extentsTableH = GPtr->overlappedExtents;
3986 }
3987 else
3988 {
3989 extentsTableH = GPtr->overlappedExtents;
3990
3991 if ( ExtentInfoExists( extentsTableH, &extentInfo) == true )
3992 return( noErr );
3993
3994 // Grow the Extents table for a new entry.
3995 newHandleSize = ( sizeof(ExtentInfo) ) + ( GetHandleSize( (Handle)extentsTableH ) );
3996 SetHandleSize( (Handle)extentsTableH, newHandleSize );
3997 }
3998
3999 // Copy the new extents into the end of the table
4000 CopyMemory( &extentInfo, &((**extentsTableH).extentInfo[(**extentsTableH).count]), sizeof(ExtentInfo) );
4001
4002 // Update the overlap extent bit
4003 GPtr->VIStat |= S_OverlappingExtents;
4004
4005 // Update the extent table count
4006 (**extentsTableH).count++;
4007
4008 return( noErr );
4009 }
4010
4011
4012 /* Compare if the given extentInfo exsists in the extents table */
4013 static Boolean ExtentInfoExists( ExtentsTable **extentsTableH, ExtentInfo *extentInfo)
4014 {
4015 UInt32 i;
4016 ExtentInfo *aryExtentInfo;
4017
4018
4019 for ( i = 0 ; i < (**extentsTableH).count ; i++ )
4020 {
4021 aryExtentInfo = &((**extentsTableH).extentInfo[i]);
4022
4023 if ( extentInfo->fileID == aryExtentInfo->fileID )
4024 {
4025 if ( (extentInfo->startBlock == aryExtentInfo->startBlock) &&
4026 (extentInfo->blockCount == aryExtentInfo->blockCount) &&
4027 (extentInfo->forkType == aryExtentInfo->forkType) )
4028 {
4029 /* startBlock, blockCount, forkType are same.
4030 * Compare the extended attribute names, if they exist.
4031 */
4032
4033 /* If no attribute name exists, the two extents are same */
4034 if ((extentInfo->attrname == NULL) &&
4035 (aryExtentInfo->attrname == NULL)) {
4036 return(true);
4037 }
4038
4039 /* If only one attribute name exists, the two extents are not same */
4040 if (((extentInfo->attrname != NULL) && (aryExtentInfo->attrname == NULL)) ||
4041 ((extentInfo->attrname == NULL) && (aryExtentInfo->attrname != NULL))) {
4042 return(false);
4043 }
4044
4045 /* Both attribute name exist. Compare the names */
4046 if (!strcmp(extentInfo->attrname, aryExtentInfo->attrname)) {
4047 return (true);
4048 } else {
4049 return (false);
4050 }
4051
4052 }
4053 }
4054 }
4055
4056 return( false );
4057 }
4058
4059 /* Function : DoesOverlap
4060 *
4061 * Description:
4062 * This function takes a start block and the count of blocks in a
4063 * given extent and compares it against the list of overlapped
4064 * extents in the global structure.
4065 * This is useful in finding the original files that overlap with
4066 * the files found in catalog btree check. If a file is found
4067 * overlapping, it is added to the overlap list.
4068 *
4069 * Input:
4070 * 1. GPtr - global scavenger pointer.
4071 * 2. fileID - file ID being checked.
4072 * 3. attrname - name of extended attribute being checked, should be NULL for regular files
4073 * 4. startBlock - start block in extent.
4074 * 5. blockCount - total number of blocks in extent.
4075 * 6. forkType - type of fork being check (kDataFork, kRsrcFork, kEAData).
4076 *
4077 * Output: isOverlapped - Boolean value of true or false.
4078 */
4079 static Boolean DoesOverlap(SGlobPtr GPtr, UInt32 fileID, const char *attrname, UInt32 startBlock, UInt32 blockCount, UInt8 forkType)
4080 {
4081 int i;
4082 Boolean isOverlapped = false;
4083 ExtentInfo *curExtentInfo;
4084 ExtentsTable **extentsTableH = GPtr->overlappedExtents;
4085
4086 for (i = 0; i < (**extentsTableH).count; i++) {
4087 curExtentInfo = &((**extentsTableH).extentInfo[i]);
4088 /* Check extents */
4089 if (curExtentInfo->startBlock < startBlock) {
4090 if ((curExtentInfo->startBlock + curExtentInfo->blockCount) > startBlock) {
4091 isOverlapped = true;
4092 break;
4093 }
4094 } else { /* curExtentInfo->startBlock >= startBlock */
4095 if (curExtentInfo->startBlock < (startBlock + blockCount)) {
4096 isOverlapped = true;
4097 break;
4098 }
4099 }
4100 } /* for loop Extents Table */
4101
4102 /* Add this extent to overlap list */
4103 if (isOverlapped) {
4104 AddExtentToOverlapList(GPtr, fileID, attrname, startBlock, blockCount, forkType);
4105 }
4106
4107 return isOverlapped;
4108 } /* DoesOverlap */
4109
4110 /* Function : CheckHFSPlusExtentRecords
4111 *
4112 * Description:
4113 * For all valid extents, this function calls DoesOverlap to find
4114 * if a given extent is overlapping with another extent existing
4115 * in the overlap list.
4116 *
4117 * Input:
4118 * 1. GPtr - global scavenger pointer.
4119 * 2. fileID - file ID being checked.
4120 * 3. attrname - name of extended attribute being checked, should be NULL for regular files
4121 * 4. extent - extent information to check.
4122 * 5. forkType - type of fork being check (kDataFork, kRsrcFork, kEAData).
4123 *
4124 * Output: None.
4125 */
4126 static void CheckHFSPlusExtentRecords(SGlobPtr GPtr, UInt32 fileID, const char *attrname, HFSPlusExtentRecord extent, UInt8 forkType)
4127 {
4128 int i;
4129
4130 /* Check for overlapping extents for all extents in given extent data */
4131 for (i = 0; i < kHFSPlusExtentDensity; i++) {
4132 if (extent[i].startBlock == 0) {
4133 break;
4134 }
4135 DoesOverlap(GPtr, fileID, attrname, extent[i].startBlock, extent[i].blockCount, forkType);
4136 }
4137 return;
4138 } /* CheckHFSPlusExtentRecords */
4139
4140 /* Function : CheckHFSExtentRecords
4141 *
4142 * Description:
4143 * For all valid extents, this function calls DoesOverlap to find
4144 * if a given extent is overlapping with another extent existing
4145 * in the overlap list.
4146 *
4147 * Input:
4148 * 1. GPtr - global scavenger pointer.
4149 * 2. fileID - file ID being checked.
4150 * 3. extent - extent information to check.
4151 * 4. forkType - type of fork being check (kDataFork, kRsrcFork).
4152 *
4153 * Output: None.
4154 */
4155 static void CheckHFSExtentRecords(SGlobPtr GPtr, UInt32 fileID, HFSExtentRecord extent, UInt8 forkType)
4156 {
4157 int i;
4158
4159 /* Check for overlapping extents for all extents in given extents */
4160 for (i = 0; i < kHFSExtentDensity; i++) {
4161 if (extent[i].startBlock == 0) {
4162 break;
4163 }
4164 DoesOverlap(GPtr, fileID, NULL, extent[i].startBlock, extent[i].blockCount, forkType);
4165 }
4166 return;
4167 } /* CheckHFSExtentRecords */
4168
4169 /* Function: FindOrigOverlapFiles
4170 *
4171 * Description:
4172 * This function is called only if btree check results in
4173 * overlapped extents errors. The btree checks do not find
4174 * out the original files whose extents are overlapping with one
4175 * being reported in its check. This function finds out all the
4176 * original files whose that are being overlapped.
4177 *
4178 * This function relies on comparison of extents with Overlap list
4179 * created in verify stage. The list is also updated with the
4180 * overlapped extents found in this function.
4181 *
4182 * 1. Compare extents for all the files located in volume header.
4183 * 2. Traverse catalog btree and compare extents of all files.
4184 * 3. Traverse extents btree and compare extents for all entries.
4185 *
4186 * Input: GPtr - pointer to global scanvenger area.
4187 *
4188 * Output: err - function result
4189 * zero means success
4190 * non-zero means failure
4191 */
4192 int FindOrigOverlapFiles(SGlobPtr GPtr)
4193 {
4194 OSErr err = noErr;
4195 Boolean isHFSPlus;
4196
4197 UInt16 selCode; /* select access pattern for BTree */
4198 UInt16 recordSize;
4199 UInt32 hint;
4200
4201 CatalogRecord catRecord;
4202 CatalogKey catKey;
4203
4204 ExtentRecord extentRecord;
4205 ExtentKey extentKey;
4206
4207 HFSPlusAttrRecord attrRecord;
4208 HFSPlusAttrKey attrKey;
4209 char attrName[XATTR_MAXNAMELEN];
4210 size_t len;
4211
4212 SVCB *calculatedVCB = GPtr->calculatedVCB;
4213
4214 isHFSPlus = VolumeObjectIsHFSPlus();
4215
4216 /* Check file extents from volume header */
4217 if (isHFSPlus) {
4218 /* allocation file */
4219 if (calculatedVCB->vcbAllocationFile) {
4220 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbAllocationFile->fcbFileID, NULL,
4221 calculatedVCB->vcbAllocationFile->fcbExtents32, kDataFork);
4222 }
4223
4224 /* extents file */
4225 if (calculatedVCB->vcbExtentsFile) {
4226 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbExtentsFile->fcbFileID, NULL,
4227 calculatedVCB->vcbExtentsFile->fcbExtents32, kDataFork);
4228 }
4229
4230 /* catalog file */
4231 if (calculatedVCB->vcbCatalogFile) {
4232 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbCatalogFile->fcbFileID, NULL,
4233 calculatedVCB->vcbCatalogFile->fcbExtents32, kDataFork);
4234 }
4235
4236 /* attributes file */
4237 if (calculatedVCB->vcbAttributesFile) {
4238 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbAttributesFile->fcbFileID, NULL,
4239 calculatedVCB->vcbAttributesFile->fcbExtents32, kDataFork);
4240 }
4241
4242 /* startup file */
4243 if (calculatedVCB->vcbStartupFile) {
4244 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbStartupFile->fcbFileID, NULL,
4245 calculatedVCB->vcbStartupFile->fcbExtents32, kDataFork);
4246 }
4247 } else {
4248 /* extents file */
4249 if (calculatedVCB->vcbExtentsFile) {
4250 CheckHFSExtentRecords(GPtr, calculatedVCB->vcbExtentsFile->fcbFileID,
4251 calculatedVCB->vcbExtentsFile->fcbExtents16, kDataFork);
4252 }
4253
4254 /* catalog file */
4255 if (calculatedVCB->vcbCatalogFile) {
4256 CheckHFSExtentRecords(GPtr, calculatedVCB->vcbCatalogFile->fcbFileID,
4257 calculatedVCB->vcbCatalogFile->fcbExtents16, kDataFork);
4258 }
4259 }
4260
4261 /* Traverse the catalog btree */
4262 selCode = 0x8001; /* Get first record from BTree */
4263 err = GetBTreeRecord(GPtr->calculatedCatalogFCB, selCode, &catKey, &catRecord, &recordSize, &hint);
4264 if (err != noErr) {
4265 goto traverseExtents;
4266 }
4267 selCode = 1; /* Get next record */
4268 do {
4269 if ((catRecord.recordType == kHFSPlusFileRecord) ||
4270 (catRecord.recordType == kHFSFileRecord)) {
4271
4272 if (isHFSPlus) {
4273 /* HFSPlus data fork */
4274 CheckHFSPlusExtentRecords(GPtr, catRecord.hfsPlusFile.fileID, NULL,
4275 catRecord.hfsPlusFile.dataFork.extents, kDataFork);
4276
4277 /* HFSPlus resource fork */
4278 CheckHFSPlusExtentRecords(GPtr, catRecord.hfsPlusFile.fileID, NULL,
4279 catRecord.hfsPlusFile.resourceFork.extents, kRsrcFork);
4280 } else {
4281 /* HFS data extent */
4282 CheckHFSExtentRecords(GPtr, catRecord.hfsFile.fileID,
4283 catRecord.hfsFile.dataExtents, kDataFork);
4284
4285 /* HFS resource extent */
4286 CheckHFSExtentRecords(GPtr, catRecord.hfsFile.fileID,
4287 catRecord.hfsFile.rsrcExtents, kRsrcFork);
4288 }
4289 }
4290
4291 /* Access the next record */
4292 err = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &catKey, &catRecord, &recordSize, &hint );
4293 } while (err == noErr);
4294
4295 traverseExtents:
4296 /* Traverse the extents btree */
4297 selCode = 0x8001; /* Get first record from BTree */
4298 err = GetBTreeRecord(GPtr->calculatedExtentsFCB, selCode, &extentKey, &extentRecord, &recordSize, &hint);
4299 if (err != noErr) {
4300 goto traverseAttribute;
4301 }
4302 selCode = 1; /* Get next record */
4303 do {
4304 if (isHFSPlus) {
4305 CheckHFSPlusExtentRecords(GPtr, extentKey.hfsPlus.fileID, NULL,
4306 extentRecord.hfsPlus, extentKey.hfsPlus.forkType);
4307 } else {
4308 CheckHFSExtentRecords(GPtr, extentKey.hfs.fileID, extentRecord.hfs,
4309 extentKey.hfs.forkType);
4310 }
4311
4312 /* Access the next record */
4313 err = GetBTreeRecord(GPtr->calculatedExtentsFCB, selCode, &extentKey, &extentRecord, &recordSize, &hint);
4314 } while (err == noErr);
4315
4316 traverseAttribute:
4317 /* Extended attributes are only supported in HFS Plus */
4318 if (!isHFSPlus) {
4319 goto out;
4320 }
4321
4322 /* Traverse the attribute btree */
4323 selCode = 0x8001; /* Get first record from BTree */
4324 /* Warning: Attribute record of type kHFSPlusAttrInlineData may be
4325 * truncated on read! (4425232). This function only uses recordType
4326 * field from inline attribute record.
4327 */
4328 err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &attrKey, &attrRecord, &recordSize, &hint);
4329 if (err != noErr) {
4330 goto out;
4331 }
4332 selCode = 1; /* Get next record */
4333 do {
4334 if (attrRecord.recordType == kHFSPlusAttrForkData) {
4335 (void) utf_encodestr(attrKey.attrName, attrKey.attrNameLen * 2, (unsigned char *)attrName, &len, sizeof(attrName));
4336 attrName[len] = '\0';
4337
4338 CheckHFSPlusExtentRecords(GPtr, attrKey.fileID, attrName, attrRecord.forkData.theFork.extents, kEAData);
4339 } else if (attrRecord.recordType == kHFSPlusAttrExtents) {
4340 (void) utf_encodestr(attrKey.attrName, attrKey.attrNameLen * 2, (unsigned char *)attrName, &len, sizeof(attrName));
4341 attrName[len] = '\0';
4342
4343 CheckHFSPlusExtentRecords(GPtr, attrKey.fileID, attrName, attrRecord.overflowExtents.extents, kEAData);
4344 }
4345
4346 /* Access the next record
4347 * Warning: Attribute record of type kHFSPlusAttrInlineData may be
4348 * truncated on read! (4425232). This function only uses recordType
4349 * field from inline attribute record.
4350 */
4351 err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &attrKey, &attrRecord, &recordSize, &hint);
4352 } while (err == noErr);
4353
4354 out:
4355 if (err == btNotFound) {
4356 err = noErr;
4357 }
4358 return err;
4359 } /* FindOrigOverlapFiles */
4360
4361 /* Function: PrintOverlapFiles
4362 *
4363 * Description: Print the information about all unique overlapping files.
4364 * 1. Sort the overlap extent in increasing order of fileID
4365 * 2. For every unique fileID, prefix the string with fileID and find the
4366 * filename/path based on fileID.
4367 * If fileID > kHFSFirstUserCatalogNodeID, find path to file
4368 * Else, find name of the system file.
4369 * 3. Print the new string.
4370 * Note that the path is printed only for HFS Plus volumes and not for
4371 * plain HFS volumes. This is done by not allocating buffer for finding
4372 * file path.
4373 *
4374 * Input:
4375 * GPtr - Global scavenger structure pointer.
4376 *
4377 * Output:
4378 * nothing (void)
4379 */
4380 void PrintOverlapFiles (SGlobPtr GPtr)
4381 {
4382 OSErr err;
4383 ExtentsTable **extentsTableH;
4384 ExtentInfo *extentInfo;
4385 unsigned int numOverlapExtents;
4386 unsigned int buflen, filepathlen;
4387 char *filepath = NULL;
4388 UInt32 lastID = 0;
4389 Boolean printMsg;
4390 Boolean isHFSPlus;
4391 int i;
4392
4393 isHFSPlus = VolumeObjectIsHFSPlus();
4394
4395 extentsTableH = GPtr->overlappedExtents;
4396 numOverlapExtents = (**extentsTableH).count;
4397
4398 /* Sort the list according to file ID */
4399 qsort((**extentsTableH).extentInfo, numOverlapExtents, sizeof(ExtentInfo),
4400 CompareExtentFileID);
4401
4402 buflen = PATH_MAX * 4;
4403 /* Allocate buffer to read data */
4404 if (isHFSPlus) {
4405 filepath = malloc (buflen);
4406 }
4407
4408 for (i = 0; i < numOverlapExtents; i++) {
4409 extentInfo = &((**extentsTableH).extentInfo[i]);
4410
4411 /* Skip the same fileID */
4412 if (lastID == extentInfo->fileID) {
4413 continue;
4414 }
4415
4416 lastID = extentInfo->fileID;
4417 printMsg = false;
4418
4419 if (filepath) {
4420 filepathlen = buflen;
4421 if (extentInfo->fileID >= kHFSFirstUserCatalogNodeID) {
4422 /* Lookup the file path */
4423 err = GetFileNamePathByID (GPtr, extentInfo->fileID, filepath, &filepathlen, NULL, NULL, NULL);
4424 } else {
4425 /* Get system filename */
4426 err = GetSystemFileName (extentInfo->fileID, filepath, &filepathlen);
4427 }
4428
4429 if (err == noErr) {
4430 /* print fileID, filepath */
4431 fsckPrint(GPtr->context, E_OvlExt, extentInfo->fileID, filepath);
4432 printMsg = true;
4433 }
4434
4435 if (fsckGetVerbosity(GPtr->context) >= kDebugLog) {
4436 plog ("\textentType=0x%x, startBlock=0x%x, blockCount=0x%x, attrName=%s\n",
4437 extentInfo->forkType, extentInfo->startBlock, extentInfo->blockCount, extentInfo->attrname);
4438 }
4439 }
4440
4441 if (printMsg == false) {
4442 /* print only fileID */
4443 fsckPrint(GPtr->context, E_OvlExtID, extentInfo->fileID);
4444 }
4445 }
4446
4447 if (filepath) {
4448 free (filepath);
4449 }
4450
4451 return;
4452 } /* PrintOverlapFiles */
4453
4454 /* Function: CompareExtentFileID
4455 *
4456 * Description: Compares the fileID from two ExtentInfo and return the
4457 * comparison result. (since we have to arrange in ascending order)
4458 *
4459 * Input:
4460 * first and second - void pointers to ExtentInfo structure.
4461 *
4462 * Output:
4463 * >0 if first > second
4464 * =0 if first == second
4465 * <0 if first < second
4466 */
4467 static int CompareExtentFileID(const void *first, const void *second)
4468 {
4469 return (((ExtentInfo *)first)->fileID -
4470 ((ExtentInfo *)second)->fileID);
4471 } /* CompareExtentFileID */
4472
4473 /* Function: journal_replay
4474 *
4475 * Description: Replay journal on a journaled HFS+ volume. This function
4476 * returns success if the volume is not journaled or the journal was not
4477 * dirty. If there was any error in replaying the journal, a non-zero value
4478 * is returned.
4479 *
4480 * Output:
4481 * 0 - success, non-zero - failure.
4482 */
4483 //int journal_replay(SGlobPtr gptr)
4484 int journal_replay(const char *block_device)
4485 {
4486 int retval = 0;
4487 struct vfsconf vfc;
4488 int mib[4];
4489 int jfd;
4490
4491 jfd = open(block_device, O_RDWR);
4492 if (jfd == -1) {
4493 retval = errno;
4494 if (debug)
4495 fplog(stderr, "Unable to open block device %s: %s", block_device, strerror(errno));
4496 goto out;
4497 }
4498
4499 retval = getvfsbyname("hfs", &vfc);
4500 if (retval) {
4501 close(jfd);
4502 goto out;
4503 }
4504
4505 mib[0] = CTL_VFS;
4506 mib[1] = vfc.vfc_typenum;
4507 mib[2] = HFS_REPLAY_JOURNAL;
4508 mib[3] = jfd;
4509 retval = sysctl(mib, 4, NULL, NULL, NULL, 0);
4510 if (retval) {
4511 retval = errno;
4512 }
4513 (void)close(jfd);
4514
4515 out:
4516 return retval;
4517 }
4518