]> git.saurik.com Git - apple/hfs.git/blob - fsck_hfs/dfalib/SVerify1.c
hfs-366.70.1.tar.gz
[apple/hfs.git] / fsck_hfs / dfalib / SVerify1.c
1 /*
2 * Copyright (c) 1999-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 File: SVerify1.c
25
26 Contains: xxx put contents here xxx
27
28 Version: xxx put version here xxx
29
30 Copyright: © 1997-1999 by Apple Computer, Inc., all rights reserved.
31
32 */
33
34 #include "Scavenger.h"
35 #include "../cache.h"
36 #include <stdlib.h>
37 #include <stddef.h>
38 #include <unistd.h>
39 #include <errno.h>
40 #include <fcntl.h>
41 #include <limits.h>
42
43 #include <libkern/OSByteOrder.h>
44 #define SW16(x) OSSwapBigToHostInt16(x)
45 #define SW32(x) OSSwapBigToHostInt32(x)
46 #define SW64(x) OSSwapBigToHostInt64(x)
47
48 extern int OpenDeviceByUUID(void *uuidp, char **nameptr);
49
50 // internal routine prototypes
51
52 static int RcdValErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID parid );
53
54 static int RcdNameLockedErr( SGlobPtr GPtr, OSErr type, UInt32 incorrect );
55
56 static OSErr RcdMDBEmbededVolDescriptionErr( SGlobPtr GPtr, OSErr type, HFSMasterDirectoryBlock *mdb );
57
58 static OSErr CheckNodesFirstOffset( SGlobPtr GPtr, BTreeControlBlock *btcb );
59
60 static OSErr ScavengeVolumeType( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb, UInt32 *volumeType );
61 static OSErr SeekVolumeHeader( SGlobPtr GPtr, UInt64 startSector, UInt32 numSectors, UInt64 *vHSector );
62
63 /* overlapping extents verification functions prototype */
64 static OSErr AddExtentToOverlapList( SGlobPtr GPtr, HFSCatalogNodeID fileNumber, const char *attrName, UInt32 extentStartBlock, UInt32 extentBlockCount, UInt8 forkType );
65
66 static Boolean ExtentInfoExists( ExtentsTable **extentsTableH, ExtentInfo *extentInfo);
67
68 static void CheckHFSPlusExtentRecords(SGlobPtr GPtr, UInt32 fileID, const char *attrname, HFSPlusExtentRecord extent, UInt8 forkType);
69
70 static void CheckHFSExtentRecords(SGlobPtr GPtr, UInt32 fileID, HFSExtentRecord extent, UInt8 forkType);
71
72 static Boolean DoesOverlap(SGlobPtr GPtr, UInt32 fileID, const char *attrname, UInt32 startBlock, UInt32 blockCount, UInt8 forkType);
73
74 static int CompareExtentFileID(const void *first, const void *second);
75
76 /*
77 * Check if a volume is journaled.
78 *
79 * If journal_bit_only is true, the function only checks
80 * if kHFSVolumeJournaledBit is set or not. If the bit
81 * is set, function returns 1 otherwise 0.
82 *
83 * If journal_bit_only is false, in addition to checking
84 * kHFSVolumeJournaledBit, the function also checks if the
85 * last mounted version indicates failed journal replay,
86 * or runtime corruption was detected or simply the volume
87 * is not journaled and it was not unmounted cleanly.
88 * If all of the above conditions are false and the journal
89 * bit is set, function returns 1 to indicate that the
90 * volume is journaled truly otherwise returns 1 to fake
91 * that volume is not journaled.
92 *
93 * returns: 0 not journaled or any of the above conditions are true
94 * 1 journaled
95 *
96 */
97 int
98 CheckIfJournaled(SGlobPtr GPtr, Boolean journal_bit_only)
99 {
100 #define kIDSector 2
101
102 OSErr err;
103 int result;
104 HFSMasterDirectoryBlock *mdbp;
105 HFSPlusVolumeHeader *vhp;
106 SVCB *vcb = GPtr->calculatedVCB;
107 ReleaseBlockOptions rbOptions;
108 BlockDescriptor block;
109
110 vhp = (HFSPlusVolumeHeader *) NULL;
111 rbOptions = kReleaseBlock;
112
113 err = GetVolumeBlock(vcb, kIDSector, kGetBlock, &block);
114 if (err) return (0);
115
116 mdbp = (HFSMasterDirectoryBlock *) block.buffer;
117
118 if (mdbp->drSigWord == kHFSPlusSigWord || mdbp->drSigWord == kHFSXSigWord) {
119 vhp = (HFSPlusVolumeHeader *) block.buffer;
120
121 } else if (mdbp->drSigWord == kHFSSigWord) {
122
123 if (mdbp->drEmbedSigWord == kHFSPlusSigWord) {
124 UInt32 vhSector;
125 UInt32 blkSectors;
126
127 blkSectors = mdbp->drAlBlkSiz / 512;
128 vhSector = mdbp->drAlBlSt;
129 vhSector += blkSectors * mdbp->drEmbedExtent.startBlock;
130 vhSector += kIDSector;
131
132 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
133 err = GetVolumeBlock(vcb, vhSector, kGetBlock, &block);
134 if (err) return (0);
135
136 vhp = (HFSPlusVolumeHeader *) block.buffer;
137 mdbp = (HFSMasterDirectoryBlock *) NULL;
138
139 }
140 }
141
142 if ((vhp != NULL) && (ValidVolumeHeader(vhp) == noErr)) {
143 result = ((vhp->attributes & kHFSVolumeJournaledMask) != 0);
144 if (journal_bit_only == true) {
145 goto out;
146 }
147
148 // even if journaling is enabled for this volume, we'll return
149 // false if it wasn't unmounted cleanly and it was previously
150 // mounted by someone that doesn't know about journaling.
151 // or if lastMountedVersion is kFSKMountVersion
152 if ( vhp->lastMountedVersion == kFSKMountVersion ||
153 (vhp->attributes & kHFSVolumeInconsistentMask) ||
154 ((vhp->lastMountedVersion != kHFSJMountVersion) &&
155 (vhp->attributes & kHFSVolumeUnmountedMask) == 0)) {
156 result = 0;
157 }
158 } else {
159 result = 0;
160 }
161
162 out:
163 (void) ReleaseVolumeBlock(vcb, &block, rbOptions);
164
165 return (result);
166 }
167
168 /*
169 * Get the JournalInfoBlock from a volume.
170 *
171 * It borrows code to get the volume header. Note that it
172 * uses the primary volume header, not the alternate one.
173 * It returns 0 on success, or an error value.
174 * If requested, it will also set the block size (as a 32-bit
175 * value), via bsizep -- this is useful because the journal code
176 * needs to know the volume blocksize, but it doesn't necessarily
177 * have the header.
178 *
179 * Note also that it does direct reads, rather than going through
180 * the cache code. This simplifies getting the JIB.
181 */
182
183 static OSErr
184 GetJournalInfoBlock(SGlobPtr GPtr, JournalInfoBlock *jibp, UInt32 *bsizep)
185 {
186 #define kIDSector 2
187
188 OSErr err;
189 int result = 0;
190 UInt32 jiBlk = 0;
191 HFSMasterDirectoryBlock *mdbp;
192 HFSPlusVolumeHeader *vhp;
193 SVCB *vcb = GPtr->calculatedVCB;
194 ReleaseBlockOptions rbOptions;
195 BlockDescriptor block;
196 size_t blockSize = 0;
197 off_t embeddedOffset = 0;
198
199 vhp = (HFSPlusVolumeHeader *) NULL;
200 rbOptions = kReleaseBlock;
201
202 if (jibp == NULL)
203 return paramErr;
204
205 err = GetVolumeBlock(vcb, kIDSector, kGetBlock, &block);
206 if (err) return (err);
207
208 mdbp = (HFSMasterDirectoryBlock *) block.buffer;
209
210 if (mdbp->drSigWord == kHFSPlusSigWord || mdbp->drSigWord == kHFSXSigWord) {
211 vhp = (HFSPlusVolumeHeader *) block.buffer;
212
213 } else if (mdbp->drSigWord == kHFSSigWord) {
214
215 if (mdbp->drEmbedSigWord == kHFSPlusSigWord) {
216 UInt32 vhSector;
217 UInt32 blkSectors;
218
219 blkSectors = mdbp->drAlBlkSiz / 512;
220 vhSector = mdbp->drAlBlSt;
221 vhSector += blkSectors * mdbp->drEmbedExtent.startBlock;
222 vhSector += kIDSector;
223
224 embeddedOffset = (mdbp->drEmbedExtent.startBlock * mdbp->drAlBlkSiz) + (mdbp->drAlBlSt * Blk_Size);
225 if (debug)
226 plog("Embedded offset is %lld\n", embeddedOffset);
227
228 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
229 err = GetVolumeBlock(vcb, vhSector, kGetBlock, &block);
230 if (err) return (err);
231
232 vhp = (HFSPlusVolumeHeader *) block.buffer;
233 mdbp = (HFSMasterDirectoryBlock *) NULL;
234
235 }
236 }
237
238 if (vhp == NULL) {
239 result = paramErr;
240 goto out;
241 }
242 if ((err = ValidVolumeHeader(vhp)) != noErr) {
243 result = err;
244 goto out;
245 }
246
247 // journalInfoBlock is not automatically swapped
248 jiBlk = SW32(vhp->journalInfoBlock);
249 blockSize = vhp->blockSize;
250 (void)ReleaseVolumeBlock(vcb, &block, rbOptions);
251
252 if (jiBlk) {
253 int jfd = GPtr->DrvNum;
254 uint8_t block[blockSize];
255 ssize_t nread;
256
257 nread = pread(jfd, block, blockSize, (off_t)jiBlk * blockSize + embeddedOffset);
258 if (nread == blockSize) {
259 if (jibp)
260 memcpy(jibp, block, sizeof(JournalInfoBlock));
261 if (bsizep)
262 *bsizep = blockSize;
263 result = 0;
264 } else {
265 if (debug) {
266 plog("%s: Tried to read JIB, got %zd\n", __FUNCTION__, nread);
267 result = EINVAL;
268 }
269 }
270 }
271
272 out:
273 return (result);
274 }
275
276 /*
277 * Journal checksum calculation, taken directly from TN1150.
278 */
279 static int
280 calc_checksum(unsigned char *ptr, int len)
281 {
282 int i, cksum=0;
283
284 for(i=0; i < len; i++, ptr++) {
285 cksum = (cksum << 8) ^ (cksum + *ptr);
286 }
287
288 return (~cksum);
289 }
290
291 /*
292 * The journal_header structure is not defined in <hfs/hfs_format.h>;
293 * it's described in TN1150. It is on disk in the endian mode that was
294 * used to write it, so we may or may not need to swap the fields.
295 */
296 typedef struct journal_header {
297 UInt32 magic;
298 UInt32 endian;
299 UInt64 start;
300 UInt64 end;
301 UInt64 size;
302 UInt32 blhdr_size;
303 UInt32 checksum;
304 UInt32 jhdr_size;
305 UInt32 sequence_num;
306 } journal_header;
307
308 #define JOURNAL_HEADER_MAGIC 0x4a4e4c78
309 #define ENDIAN_MAGIC 0x12345678
310 #define JOURNAL_HEADER_CKSUM_SIZE (offsetof(struct journal_header, sequence_num))
311
312 /*
313 * Determine if a journal is empty.
314 * This code can use an in-filesystem, or external, journal.
315 * In general, it returns 0 if the journal exists, and appears to
316 * be non-empty (that is, start and end in the journal header are
317 * the same); it will return 1 if it exists and is empty, or if
318 * there was a problem getting the journal. (This behaviour was
319 * chosen because it mimics the existing behaviour of fsck_hfs,
320 * which has traditionally done nothing with the journal. Future
321 * versions may be more demanding.)
322 *
323 * <jp> is an OUT parameter: the contents of the structure it points
324 * to are filled in by this routine. (The reasoning for doing this
325 * is because this rountine has to open the journal info block, and read
326 * from the journal device, so putting this in another function was
327 * duplicative and error-prone. By making it a structure instead of
328 * discrete arguments, it can also be extended in the future if necessary.)
329 */
330 int
331 IsJournalEmpty(SGlobPtr GPtr, fsckJournalInfo_t *jp)
332 {
333 int retval = 1;
334 OSErr result;
335 OSErr err = 0;
336 JournalInfoBlock jib;
337 UInt32 bsize;
338
339 result = GetJournalInfoBlock(GPtr, &jib, &bsize);
340 if (result == 0) {
341 /* jib is not byte swapped */
342 /* If the journal needs to be initialized, it's empty. */
343 if ((SW32(jib.flags) & kJIJournalNeedInitMask) == 0) {
344 off_t hdrOffset = SW64(jib.offset);
345 struct journal_header *jhdr;
346 uint8_t block[bsize];
347 ssize_t nread;
348 int jfd = -1;
349
350 /* If it's an external journal, kJIJournalInSFMask will not be set */
351 if (SW32(jib.flags) & kJIJournalInFSMask) {
352 jfd = dup(GPtr->DrvNum);
353 jp->name = strdup(GPtr->deviceNode);
354 } else {
355 char **namePtr = jp ? &jp->name : NULL;
356 if (debug)
357 plog("External Journal device\n");
358 jfd = OpenDeviceByUUID(&jib.ext_jnl_uuid, namePtr);
359 }
360 if (jfd == -1) {
361 if (debug) {
362 plog("Unable to get journal file descriptor, journal flags = %#x\n", SW32(jib.flags));
363 }
364 goto out;
365 }
366 if (jp) {
367 jp->jnlfd = jfd;
368 jp->jnlOffset = SW64(jib.offset);
369 jp->jnlSize = SW64(jib.size);
370 }
371
372 nread = pread(jfd, block, bsize, hdrOffset);
373 if (nread == -1) {
374 if (debug) {
375 plog("Could not read journal from descriptor %d: %s", jfd, strerror(errno));
376 }
377 err = errno;
378 } else if (nread != bsize) {
379 if (debug) {
380 plog("Only read %zd bytes from journal (expected %zd)", nread, bsize);
381 err = EINVAL;
382 }
383 }
384 if (jp == NULL)
385 close(jfd);
386 /* We got the journal header, now we need to check it */
387 if (err == noErr) {
388 int swap = 0;
389 UInt32 cksum = 0;
390
391 jhdr = (struct journal_header*)block;
392
393 if (jhdr->magic == JOURNAL_HEADER_MAGIC ||
394 SW32(jhdr->magic) == JOURNAL_HEADER_MAGIC) {
395 if (jhdr->endian == ENDIAN_MAGIC)
396 swap = 0;
397 else if (SW32(jhdr->endian) == ENDIAN_MAGIC)
398 swap = 1;
399 else
400 swap = 2;
401
402 if (swap != 2) {
403 cksum = swap ? SW32(jhdr->checksum) : jhdr->checksum;
404 UInt32 calc_sum;
405 jhdr->checksum = 0;
406 /* Checksum calculation needs the checksum field to be zero. */
407 calc_sum = calc_checksum((unsigned char*)jhdr, JOURNAL_HEADER_CKSUM_SIZE);
408 /* But, for now, this is for debugging purposes only */
409 if (calc_sum != cksum) {
410 if (debug)
411 plog("Journal checksum doesn't match: orig %x != calc %x\n", cksum, calc_sum);
412 }
413 /* We have a journal, we got the header, now we check the start and end */
414 if (jhdr->start != jhdr->end) {
415 retval = 0;
416 if (debug)
417 plog("Non-empty journal: start = %lld, end = %lld\n",
418 swap ? SW64(jhdr->start) : jhdr->start,
419 swap ? SW64(jhdr->end) : jhdr->end);
420 }
421 }
422 }
423 }
424 }
425 }
426 out:
427 return retval;
428 }
429
430 /*
431 * The functions checks whether the volume is clean or dirty. It
432 * also marks the volume as clean/dirty depending on the type
433 * of operation specified. It modifies the volume header only
434 * if the old values are not same as the new values. If the volume
435 * header is updated, it also sets the last mounted version for HFS+.
436 *
437 * Input:
438 * GPtr - Pointer to scavenger global area
439 * operation - Type of operation to perform
440 * kCheckVolume, // check if volume is clean/dirty
441 * kMarkVolumeDirty, // mark the volume dirty
442 * kMarkVolumeClean // mark the volume clean
443 *
444 * Output:
445 * modified - true if the VH/MDB was modified, otherwise false.
446 * Return Value -
447 * -1 - if the volume is not an HFS/HFS+ volume
448 * 0 - if the volume was dirty or marked dirty
449 * 1 - if the volume was clean or marked clean
450 * If the operation requested was to mark the volume clean/dirty,
451 * the return value is dependent on type of operation (described above).
452 */
453 int CheckForClean(SGlobPtr GPtr, UInt8 operation, Boolean *modified)
454 {
455 enum { unknownVolume = -1, cleanUnmount = 1, dirtyUnmount = 0};
456 int result = unknownVolume;
457 Boolean update = false;
458 HFSMasterDirectoryBlock *mdbp;
459 HFSPlusVolumeHeader *vhp;
460 BlockDescriptor block;
461 ReleaseBlockOptions rbOptions;
462 UInt64 blockNum;
463 SVCB *vcb;
464
465 *modified = false;
466 vcb = GPtr->calculatedVCB;
467 block.buffer = NULL;
468 rbOptions = kReleaseBlock;
469
470 /* Get the block number for VH/MDB */
471 GetVolumeObjectBlockNum(&blockNum);
472 if (blockNum == 0) {
473 if (fsckGetVerbosity(GPtr->context) >= kDebugLog)
474 plog( "\t%s - unknown volume type \n", __FUNCTION__ );
475 goto ExitThisRoutine;
476 }
477
478 /* Get VH or MDB depending on the type of volume */
479 result = GetVolumeObjectPrimaryBlock(&block);
480 if (result) {
481 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
482 plog( "\t%s - could not get VHB/MDB at block %qd \n", __FUNCTION__, blockNum );
483 result = unknownVolume;
484 goto ExitThisRoutine;
485 }
486
487 result = cleanUnmount;
488
489 if (VolumeObjectIsHFSPlus()) {
490 vhp = (HFSPlusVolumeHeader *) block.buffer;
491
492 /* Check unmount bit and volume inconsistent bit */
493 if (((vhp->attributes & kHFSVolumeUnmountedMask) == 0) ||
494 (vhp->attributes & kHFSVolumeInconsistentMask))
495 result = dirtyUnmount;
496
497 /* Check last mounted version. If kFSKMountVersion, bad
498 * journal was encountered during mount. Force dirty volume.
499 */
500
501 if (vhp->lastMountedVersion == kFSKMountVersion) {
502 GPtr->JStat |= S_BadJournal;
503 RcdError (GPtr, E_BadJournal);
504 result = dirtyUnmount;
505 }
506
507 if (operation == kMarkVolumeDirty) {
508 /* Mark volume was not unmounted cleanly */
509 if (vhp->attributes & kHFSVolumeUnmountedMask) {
510 vhp->attributes &= ~kHFSVolumeUnmountedMask;
511 update = true;
512 }
513 /* Mark volume inconsistent */
514 if ((vhp->attributes & kHFSVolumeInconsistentMask) == 0) {
515 vhp->attributes |= kHFSVolumeInconsistentMask;
516 update = true;
517 }
518 } else if (operation == kMarkVolumeClean) {
519 /* Mark volume was unmounted cleanly */
520 if ((vhp->attributes & kHFSVolumeUnmountedMask) == 0) {
521 vhp->attributes |= kHFSVolumeUnmountedMask;
522 update = true;
523 }
524 /* Mark volume consistent */
525 if (vhp->attributes & kHFSVolumeInconsistentMask) {
526 vhp->attributes &= ~kHFSVolumeInconsistentMask;
527 update = true;
528 }
529 }
530
531 /* If any changes to VH, update the last mounted version */
532 if (update == true) {
533 vhp->lastMountedVersion = kFSCKMountVersion;
534 }
535 } else if (VolumeObjectIsHFS()) {
536 mdbp = (HFSMasterDirectoryBlock *) block.buffer;
537
538 /* Check unmount bit and volume inconsistent bit */
539 if (((mdbp->drAtrb & kHFSVolumeUnmountedMask) == 0) ||
540 (mdbp->drAtrb & kHFSVolumeInconsistentMask))
541 result = dirtyUnmount;
542
543 if (operation == kMarkVolumeDirty) {
544 /* Mark volume was not unmounted cleanly */
545 if (mdbp->drAtrb & kHFSVolumeUnmountedMask) {
546 mdbp->drAtrb &= ~kHFSVolumeUnmountedMask;
547 update = true;
548 }
549 /* Mark volume inconsistent */
550 if ((mdbp->drAtrb & kHFSVolumeInconsistentMask) == 0) {
551 mdbp->drAtrb |= kHFSVolumeInconsistentMask;
552 update = true;
553 }
554 } else if (operation == kMarkVolumeClean) {
555 /* Mark volume was unmounted cleanly */
556 if ((mdbp->drAtrb & kHFSVolumeUnmountedMask) == 0) {
557 mdbp->drAtrb |= kHFSVolumeUnmountedMask;
558 update = true;
559 }
560 /* Mark volume consistent */
561 if (mdbp->drAtrb & kHFSVolumeInconsistentMask) {
562 mdbp->drAtrb &= ~kHFSVolumeInconsistentMask;
563 update = true;
564 }
565 }
566 }
567
568 ExitThisRoutine:
569 if (update == true) {
570 *modified = true;
571 rbOptions = kForceWriteBlock;
572 /* Set appropriate return value */
573 if (operation == kMarkVolumeDirty) {
574 result = dirtyUnmount;
575 } else if (operation == kMarkVolumeClean) {
576 result = cleanUnmount;
577 }
578 }
579 if (block.buffer != NULL)
580 (void) ReleaseVolumeBlock(vcb, &block, rbOptions);
581
582 return (result);
583 }
584
585 /*------------------------------------------------------------------------------
586
587 Function: IVChk - (Initial Volume Check)
588
589 Function: Performs an initial check of the volume to be scavenged to confirm
590 that the volume can be accessed and that it is a HFS/HFS+ volume.
591
592 Input: GPtr - pointer to scavenger global area
593
594 Output: IVChk - function result:
595 0 = no error
596 n = error code
597 ------------------------------------------------------------------------------*/
598 #define kBitsPerSector 4096
599
600 OSErr IVChk( SGlobPtr GPtr )
601 {
602 OSErr err;
603 HFSMasterDirectoryBlock * myMDBPtr;
604 HFSPlusVolumeHeader * myVHBPtr;
605 UInt32 numABlks;
606 UInt32 minABlkSz;
607 UInt32 maxNumberOfAllocationBlocks;
608 UInt32 realAllocationBlockSize;
609 UInt32 realTotalBlocks;
610 UInt32 i;
611 BTreeControlBlock *btcb;
612 SVCB *vcb = GPtr->calculatedVCB;
613 VolumeObjectPtr myVOPtr;
614 UInt64 blockNum;
615 UInt64 totalSectors;
616 BlockDescriptor myBlockDescriptor;
617
618 // Set up
619 GPtr->TarID = AMDB_FNum; // target = alt MDB
620 GPtr->TarBlock = 0;
621 maxNumberOfAllocationBlocks = 0xFFFFFFFF;
622 realAllocationBlockSize = 0;
623 realTotalBlocks = 0;
624
625 myBlockDescriptor.buffer = NULL;
626 myVOPtr = GetVolumeObjectPtr( );
627
628 // check volume size
629 if ( myVOPtr->totalDeviceSectors < 3 ) {
630 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
631 plog("\tinvalid device information for volume - total sectors = %qd sector size = %d \n",
632 myVOPtr->totalDeviceSectors, myVOPtr->sectorSize);
633 return( 123 );
634 }
635
636 GetVolumeObjectBlockNum( &blockNum );
637 if ( blockNum == 0 || myVOPtr->volumeType == kUnknownVolumeType ) {
638 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
639 plog( "\t%s - unknown volume type \n", __FUNCTION__ );
640 err = R_BadSig; /* doesn't bear the HFS signature */
641 goto ReleaseAndBail;
642 }
643
644 // get Volume Header (HFS+) or Master Directory (HFS) block
645 err = GetVolumeObjectVHBorMDB( &myBlockDescriptor );
646 if ( err != noErr ) {
647 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
648 plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err );
649 goto ReleaseAndBail;
650 }
651 myMDBPtr = (HFSMasterDirectoryBlock *) myBlockDescriptor.buffer;
652
653 // if this is an HFS (kHFSVolumeType) volume and the MDB indicates this
654 // might contain an embedded HFS+ volume then we need to scan
655 // for an embedded HFS+ volume. I'm told there were some old problems
656 // where we could lose track of the embedded volume.
657 if ( VolumeObjectIsHFS( ) &&
658 (myMDBPtr->drEmbedSigWord != 0 ||
659 myMDBPtr->drEmbedExtent.blockCount != 0 ||
660 myMDBPtr->drEmbedExtent.startBlock != 0) ) {
661
662 err = ScavengeVolumeType( GPtr, myMDBPtr, &myVOPtr->volumeType );
663 if ( err == E_InvalidMDBdrAlBlSt )
664 err = RcdMDBEmbededVolDescriptionErr( GPtr, E_InvalidMDBdrAlBlSt, myMDBPtr );
665
666 if ( VolumeObjectIsEmbeddedHFSPlus( ) ) {
667 // we changed volume types so let's get the VHB
668 (void) ReleaseVolumeBlock( vcb, &myBlockDescriptor, kReleaseBlock );
669 myBlockDescriptor.buffer = NULL;
670 myMDBPtr = NULL;
671 err = GetVolumeObjectVHB( &myBlockDescriptor );
672 if ( err != noErr ) {
673 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
674 plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err );
675 WriteError( GPtr, E_InvalidVolumeHeader, 1, 0 );
676 err = E_InvalidVolumeHeader;
677 goto ReleaseAndBail;
678 }
679
680 GetVolumeObjectBlockNum( &blockNum ); // get the new Volume header block number
681 }
682 else {
683 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
684 plog( "\t%s - bad volume header - err %d \n", __FUNCTION__, err );
685 WriteError( GPtr, E_InvalidVolumeHeader, 1, 0 );
686 err = E_InvalidVolumeHeader;
687 goto ReleaseAndBail;
688 }
689 }
690
691 totalSectors = ( VolumeObjectIsEmbeddedHFSPlus( ) ) ? myVOPtr->totalEmbeddedSectors : myVOPtr->totalDeviceSectors;
692
693 // indicate what type of volume we are dealing with
694 if ( VolumeObjectIsHFSPlus( ) ) {
695
696 myVHBPtr = (HFSPlusVolumeHeader *) myBlockDescriptor.buffer;
697 if (myVHBPtr->attributes & kHFSVolumeJournaledMask) {
698 fsckPrint(GPtr->context, hfsJournalVolCheck);
699 } else {
700 fsckPrint(GPtr->context, hfsCheckNoJnl);
701 }
702 GPtr->numExtents = kHFSPlusExtentDensity;
703 vcb->vcbSignature = kHFSPlusSigWord;
704
705 // Further populate the VCB with VolumeHeader info
706 vcb->vcbAlBlSt = myVOPtr->embeddedOffset / 512;
707 vcb->vcbEmbeddedOffset = myVOPtr->embeddedOffset;
708 realAllocationBlockSize = myVHBPtr->blockSize;
709 realTotalBlocks = myVHBPtr->totalBlocks;
710 vcb->vcbNextCatalogID = myVHBPtr->nextCatalogID;
711 vcb->vcbCreateDate = myVHBPtr->createDate;
712 vcb->vcbAttributes = myVHBPtr->attributes & kHFSCatalogNodeIDsReused;
713
714 if ( myVHBPtr->attributesFile.totalBlocks == 0 )
715 vcb->vcbAttributesFile = NULL; /* XXX memory leak ? */
716
717 // Make sure the Extents B-Tree is set to use 16-bit key lengths.
718 // We access it before completely setting up the control block.
719 btcb = (BTreeControlBlock *) vcb->vcbExtentsFile->fcbBtree;
720 btcb->attributes |= kBTBigKeysMask;
721
722 // catch the case where the volume allocation block count is greater than
723 // maximum number of device allocation blocks. - bug 2916021
724 numABlks = myVOPtr->totalDeviceSectors / ( myVHBPtr->blockSize / Blk_Size );
725 if ( myVHBPtr->totalBlocks > numABlks ) {
726 RcdError( GPtr, E_NABlks );
727 err = E_NABlks;
728 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog ) {
729 plog( "\t%s - volume header total allocation blocks is greater than device size \n", __FUNCTION__ );
730 plog( "\tvolume allocation block count %d device allocation block count %d \n",
731 myVHBPtr->totalBlocks, numABlks );
732 }
733 goto ReleaseAndBail;
734 }
735 }
736 else if ( VolumeObjectIsHFS( ) ) {
737
738 // fsckPrint(GPtr->context, fsckCheckingVolume);
739 fsckPrint(GPtr->context, hfsCheckHFS);
740
741 GPtr->numExtents = kHFSExtentDensity;
742 vcb->vcbSignature = myMDBPtr->drSigWord;
743 maxNumberOfAllocationBlocks = 0xFFFF;
744 // set up next file ID, CheckBTreeKey makse sure we are under this value
745 vcb->vcbNextCatalogID = myMDBPtr->drNxtCNID;
746 vcb->vcbCreateDate = myMDBPtr->drCrDate;
747
748 realAllocationBlockSize = myMDBPtr->drAlBlkSiz;
749 realTotalBlocks = myMDBPtr->drNmAlBlks;
750 }
751
752 GPtr->TarBlock = blockNum; // target block
753
754 // verify volume allocation info
755 // Note: i is the number of sectors per allocation block
756 numABlks = totalSectors;
757 minABlkSz = Blk_Size; // init minimum ablock size
758 // loop while #ablocks won't fit
759 for( i = 2; numABlks > maxNumberOfAllocationBlocks; i++ ) {
760 minABlkSz = i * Blk_Size; // jack up minimum
761 numABlks = totalSectors / i; // recompute #ablocks, assuming this size
762 }
763
764 vcb->vcbBlockSize = realAllocationBlockSize;
765 numABlks = totalSectors / ( realAllocationBlockSize / Blk_Size );
766 if ( VolumeObjectIsHFSPlus( ) ) {
767 // HFS Plus allocation block size must be power of 2
768 if ( (realAllocationBlockSize < minABlkSz) ||
769 (realAllocationBlockSize & (realAllocationBlockSize - 1)) != 0 )
770 realAllocationBlockSize = 0;
771 }
772 else {
773 if ( (realAllocationBlockSize < minABlkSz) ||
774 (realAllocationBlockSize > Max_ABSiz) ||
775 ((realAllocationBlockSize % Blk_Size) != 0) )
776 realAllocationBlockSize = 0;
777 }
778
779 if ( realAllocationBlockSize == 0 ) {
780 RcdError( GPtr, E_ABlkSz );
781 err = E_ABlkSz; // bad allocation block size
782 goto ReleaseAndBail;
783 }
784
785 vcb->vcbTotalBlocks = realTotalBlocks;
786 vcb->vcbFreeBlocks = 0;
787
788 // Only do these tests on HFS volumes, since they are either
789 // or, getting the VolumeHeader would have already failed.
790 if ( VolumeObjectIsHFS( ) ) {
791 UInt32 bitMapSizeInSectors;
792
793 // Calculate the volume bitmap size
794 bitMapSizeInSectors = ( numABlks + kBitsPerSector - 1 ) / kBitsPerSector; // VBM size in blocks
795
796 //¥¥ Calculate the validaty of HFS Allocation blocks, I think realTotalBlocks == numABlks
797 numABlks = (totalSectors - 3 - bitMapSizeInSectors) / (realAllocationBlockSize / Blk_Size); // actual # of alloc blks
798
799 if ( realTotalBlocks > numABlks ) {
800 RcdError( GPtr, E_NABlks );
801 err = E_NABlks; // invalid number of allocation blocks
802 goto ReleaseAndBail;
803 }
804
805 if ( myMDBPtr->drVBMSt <= MDB_BlkN ) {
806 RcdError(GPtr,E_VBMSt);
807 err = E_VBMSt; // invalid VBM start block
808 goto ReleaseAndBail;
809 }
810 vcb->vcbVBMSt = myMDBPtr->drVBMSt;
811
812 if (myMDBPtr->drAlBlSt < (myMDBPtr->drVBMSt + bitMapSizeInSectors)) {
813 RcdError(GPtr,E_ABlkSt);
814 err = E_ABlkSt; // invalid starting alloc block
815 goto ReleaseAndBail;
816 }
817 vcb->vcbAlBlSt = myMDBPtr->drAlBlSt;
818 }
819
820 ReleaseAndBail:
821 if (myBlockDescriptor.buffer != NULL)
822 (void) ReleaseVolumeBlock(vcb, &myBlockDescriptor, kReleaseBlock);
823
824 return( err );
825 }
826
827
828 static OSErr ScavengeVolumeType( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb, UInt32 *volumeType )
829 {
830 UInt64 vHSector;
831 UInt64 startSector;
832 UInt64 altVHSector;
833 UInt64 hfsPlusSectors = 0;
834 UInt32 sectorsPerBlock;
835 UInt32 numSectorsToSearch;
836 OSErr err;
837 HFSPlusVolumeHeader *volumeHeader;
838 HFSExtentDescriptor embededExtent;
839 SVCB *calculatedVCB = GPtr->calculatedVCB;
840 VolumeObjectPtr myVOPtr;
841 UInt16 embedSigWord = mdb->drEmbedSigWord;
842 BlockDescriptor block;
843
844 /*
845 * If all of the embedded volume information is zero, then assume
846 * this really is a plain HFS disk like it says. Otherwise, if
847 * you reinitialize a large HFS Plus volume as HFS, the original
848 * embedded volume's volume header and alternate volume header will
849 * still be there, and we'll try to repair the embedded volume.
850 */
851 if (embedSigWord == 0 &&
852 mdb->drEmbedExtent.blockCount == 0 &&
853 mdb->drEmbedExtent.startBlock == 0)
854 {
855 *volumeType = kHFSVolumeType;
856 return noErr;
857 }
858
859 myVOPtr = GetVolumeObjectPtr( );
860 *volumeType = kEmbededHFSPlusVolumeType; // Assume HFS+
861
862 //
863 // First see if it is an HFS+ volume and the relevent structures look OK
864 //
865 if ( embedSigWord == kHFSPlusSigWord )
866 {
867 /* look for primary volume header */
868 vHSector = (UInt64)mdb->drAlBlSt +
869 ((UInt64)(mdb->drAlBlkSiz / Blk_Size) * (UInt64)mdb->drEmbedExtent.startBlock) + 2;
870
871 err = GetVolumeBlock(calculatedVCB, vHSector, kGetBlock, &block);
872 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
873 if ( err != noErr ) goto AssumeHFS;
874
875 myVOPtr->primaryVHB = vHSector;
876 err = ValidVolumeHeader( volumeHeader );
877 (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock);
878 if ( err == noErr ) {
879 myVOPtr->flags |= kVO_PriVHBOK;
880 return( noErr );
881 }
882 }
883
884 sectorsPerBlock = mdb->drAlBlkSiz / Blk_Size;
885
886 // Search the end of the disk to see if a Volume Header is present at all
887 if ( embedSigWord != kHFSPlusSigWord )
888 {
889 numSectorsToSearch = mdb->drAlBlkSiz / Blk_Size;
890 startSector = myVOPtr->totalDeviceSectors - 4 - numSectorsToSearch;
891
892 err = SeekVolumeHeader( GPtr, startSector, numSectorsToSearch, &altVHSector );
893 if ( err != noErr ) goto AssumeHFS;
894
895 // We found the Alt VH, so this must be a damaged embeded HFS+ volume
896 // Now Scavenge for the Primary VolumeHeader
897 myVOPtr->alternateVHB = altVHSector;
898 myVOPtr->flags |= kVO_AltVHBOK;
899 startSector = mdb->drAlBlSt + (4 * sectorsPerBlock); // Start looking at 4th HFS allocation block
900 numSectorsToSearch = 10 * sectorsPerBlock; // search for VH in next 10 allocation blocks
901
902 err = SeekVolumeHeader( GPtr, startSector, numSectorsToSearch, &vHSector );
903 if ( err != noErr ) goto AssumeHFS;
904
905 myVOPtr->primaryVHB = vHSector;
906 myVOPtr->flags |= kVO_PriVHBOK;
907 hfsPlusSectors = altVHSector - vHSector + 1 + 2 + 1; // numSectors + BB + end
908
909 // Fix the embeded extent
910 embededExtent.blockCount = hfsPlusSectors / sectorsPerBlock;
911 embededExtent.startBlock = (vHSector - 2 - mdb->drAlBlSt ) / sectorsPerBlock;
912 embedSigWord = kHFSPlusSigWord;
913
914 myVOPtr->embeddedOffset =
915 (embededExtent.startBlock * mdb->drAlBlkSiz) + (mdb->drAlBlSt * Blk_Size);
916 }
917 else
918 {
919 embedSigWord = mdb->drEmbedSigWord;
920 embededExtent.blockCount = mdb->drEmbedExtent.blockCount;
921 embededExtent.startBlock = mdb->drEmbedExtent.startBlock;
922 }
923
924 if ( embedSigWord == kHFSPlusSigWord )
925 {
926 startSector = 2 + mdb->drAlBlSt +
927 ((UInt64)embededExtent.startBlock * (mdb->drAlBlkSiz / Blk_Size));
928
929 err = SeekVolumeHeader( GPtr, startSector, mdb->drAlBlkSiz / Blk_Size, &vHSector );
930 if ( err != noErr ) goto AssumeHFS;
931
932 // Now replace the bad fields and mark the error
933 mdb->drEmbedExtent.blockCount = embededExtent.blockCount;
934 mdb->drEmbedExtent.startBlock = embededExtent.startBlock;
935 mdb->drEmbedSigWord = kHFSPlusSigWord;
936 mdb->drAlBlSt += vHSector - startSector; // Fix the bad field
937 myVOPtr->totalEmbeddedSectors = (mdb->drAlBlkSiz / Blk_Size) * mdb->drEmbedExtent.blockCount;
938 myVOPtr->embeddedOffset =
939 (mdb->drEmbedExtent.startBlock * mdb->drAlBlkSiz) + (mdb->drAlBlSt * Blk_Size);
940 myVOPtr->primaryVHB = vHSector;
941 myVOPtr->flags |= kVO_PriVHBOK;
942
943 GPtr->VIStat = GPtr->VIStat | S_MDB; // write out our MDB
944 return( E_InvalidMDBdrAlBlSt );
945 }
946
947 AssumeHFS:
948 *volumeType = kHFSVolumeType;
949 return( noErr );
950
951 } /* ScavengeVolumeType */
952
953
954 static OSErr SeekVolumeHeader( SGlobPtr GPtr, UInt64 startSector, UInt32 numSectors, UInt64 *vHSector )
955 {
956 OSErr err;
957 HFSPlusVolumeHeader *volumeHeader;
958 SVCB *calculatedVCB = GPtr->calculatedVCB;
959 BlockDescriptor block;
960
961 for ( *vHSector = startSector ; *vHSector < startSector + numSectors ; (*vHSector)++ )
962 {
963 err = GetVolumeBlock(calculatedVCB, *vHSector, kGetBlock, &block);
964 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
965 if ( err != noErr ) return( err );
966
967 err = ValidVolumeHeader(volumeHeader);
968
969 (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock);
970 if ( err == noErr )
971 return( noErr );
972 }
973
974 return( fnfErr );
975 }
976
977
978 #if 0 // not used at this time
979 static OSErr CheckWrapperExtents( SGlobPtr GPtr, HFSMasterDirectoryBlock *mdb )
980 {
981 OSErr err = noErr;
982
983 // See if Norton Disk Doctor 2.0 corrupted the catalog's first extent
984 if ( mdb->drCTExtRec[0].startBlock >= mdb->drEmbedExtent.startBlock)
985 {
986 // Fix the field in the in-memory copy, and record the error
987 mdb->drCTExtRec[0].startBlock = mdb->drXTExtRec[0].startBlock + mdb->drXTExtRec[0].blockCount;
988 GPtr->VIStat = GPtr->VIStat | S_MDB; // write out our MDB
989 err = RcdInvalidWrapperExtents( GPtr, E_InvalidWrapperExtents );
990 }
991
992 return err;
993 }
994 #endif
995
996 /*------------------------------------------------------------------------------
997
998 Function: CreateExtentsBTreeControlBlock
999
1000 Function: Create the calculated ExtentsBTree Control Block
1001
1002 Input: GPtr - pointer to scavenger global area
1003
1004 Output: - 0 = no error
1005 n = error code
1006 ------------------------------------------------------------------------------*/
1007
1008 OSErr CreateExtentsBTreeControlBlock( SGlobPtr GPtr )
1009 {
1010 OSErr err;
1011 SInt32 size;
1012 UInt32 numABlks;
1013 BTHeaderRec header;
1014 BTreeControlBlock * btcb;
1015 SVCB * vcb;
1016 BlockDescriptor block;
1017 Boolean isHFSPlus;
1018
1019 // Set up
1020 isHFSPlus = VolumeObjectIsHFSPlus( );
1021 GPtr->TarID = kHFSExtentsFileID; // target = extent file
1022 GPtr->TarBlock = kHeaderNodeNum; // target block = header node
1023 vcb = GPtr->calculatedVCB;
1024 btcb = GPtr->calculatedExtentsBTCB;
1025 block.buffer = NULL;
1026
1027 // get Volume Header (HFS+) or Master Directory (HFS) block
1028 err = GetVolumeObjectVHBorMDB( &block );
1029 if (err) goto exit;
1030 //
1031 // check out allocation info for the Extents File
1032 //
1033 if (isHFSPlus)
1034 {
1035 HFSPlusVolumeHeader *volumeHeader;
1036
1037 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
1038
1039 CopyMemory(volumeHeader->extentsFile.extents, GPtr->calculatedExtentsFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1040
1041 err = CheckFileExtents( GPtr, kHFSExtentsFileID, kDataFork, NULL, (void *)GPtr->calculatedExtentsFCB->fcbExtents32, &numABlks); // check out extent info
1042
1043 if (err) goto exit;
1044
1045 if ( volumeHeader->extentsFile.totalBlocks != numABlks ) // check out the PEOF
1046 {
1047 RcdError( GPtr, E_ExtPEOF );
1048 err = E_ExtPEOF;
1049 if (debug)
1050 plog("Extents File totalBlocks = %u, numABlks = %u\n", volumeHeader->extentsFile.totalBlocks, numABlks);
1051 goto exit;
1052 }
1053 else
1054 {
1055 GPtr->calculatedExtentsFCB->fcbLogicalSize = volumeHeader->extentsFile.logicalSize; // Set Extents tree's LEOF
1056 GPtr->calculatedExtentsFCB->fcbPhysicalSize = (UInt64)volumeHeader->extentsFile.totalBlocks *
1057 (UInt64)volumeHeader->blockSize; // Set Extents tree's PEOF
1058 }
1059
1060 //
1061 // Set up the minimal BTreeControlBlock structure
1062 //
1063
1064 // Read the BTreeHeader from disk & also validate it's node size.
1065 err = GetBTreeHeader(GPtr, GPtr->calculatedExtentsFCB, &header);
1066 if (err) goto exit;
1067
1068 btcb->maxKeyLength = kHFSPlusExtentKeyMaximumLength; // max key length
1069 btcb->keyCompareProc = (void *)CompareExtentKeysPlus;
1070 btcb->attributes |=kBTBigKeysMask; // HFS+ Extent files have 16-bit key length
1071 btcb->leafRecords = header.leafRecords;
1072 btcb->treeDepth = header.treeDepth;
1073 btcb->rootNode = header.rootNode;
1074 btcb->firstLeafNode = header.firstLeafNode;
1075 btcb->lastLeafNode = header.lastLeafNode;
1076
1077 btcb->nodeSize = header.nodeSize;
1078 btcb->totalNodes = ( GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize );
1079 btcb->freeNodes = btcb->totalNodes; // start with everything free
1080
1081 // Make sure the header nodes size field is correct by looking at the 1st record offset
1082 err = CheckNodesFirstOffset( GPtr, btcb );
1083 if ( (err != noErr) && (btcb->nodeSize != 1024) ) // default HFS+ Extents node size is 1024
1084 {
1085 btcb->nodeSize = 1024;
1086 btcb->totalNodes = ( GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize );
1087 btcb->freeNodes = btcb->totalNodes; // start with everything free
1088
1089 err = CheckNodesFirstOffset( GPtr, btcb );
1090 if (err) goto exit;
1091
1092 GPtr->EBTStat |= S_BTH; // update the Btree header
1093 }
1094 }
1095 else // Classic HFS
1096 {
1097 HFSMasterDirectoryBlock *alternateMDB;
1098
1099 alternateMDB = (HFSMasterDirectoryBlock *) block.buffer;
1100
1101 CopyMemory(alternateMDB->drXTExtRec, GPtr->calculatedExtentsFCB->fcbExtents16, sizeof(HFSExtentRecord) );
1102 // ExtDataRecToExtents(alternateMDB->drXTExtRec, GPtr->calculatedExtentsFCB->fcbExtents);
1103
1104
1105 err = CheckFileExtents( GPtr, kHFSExtentsFileID, kDataFork, NULL, (void *)GPtr->calculatedExtentsFCB->fcbExtents16, &numABlks); /* check out extent info */
1106 if (err) goto exit;
1107
1108 if (alternateMDB->drXTFlSize != ((UInt64)numABlks * (UInt64)GPtr->calculatedVCB->vcbBlockSize))// check out the PEOF
1109 {
1110 RcdError(GPtr,E_ExtPEOF);
1111 err = E_ExtPEOF;
1112 if (debug)
1113 plog("Alternate MDB drXTFlSize = %llu, should be %llu\n", (long long)alternateMDB->drXTFlSize, (long long)numABlks * (UInt64)GPtr->calculatedVCB->vcbBlockSize);
1114 goto exit;
1115 }
1116 else
1117 {
1118 GPtr->calculatedExtentsFCB->fcbPhysicalSize = alternateMDB->drXTFlSize; // set up PEOF and EOF in FCB
1119 GPtr->calculatedExtentsFCB->fcbLogicalSize = GPtr->calculatedExtentsFCB->fcbPhysicalSize;
1120 }
1121
1122 //
1123 // Set up the minimal BTreeControlBlock structure
1124 //
1125
1126 // Read the BTreeHeader from disk & also validate it's node size.
1127 err = GetBTreeHeader(GPtr, GPtr->calculatedExtentsFCB, &header);
1128 if (err) goto exit;
1129
1130 btcb->maxKeyLength = kHFSExtentKeyMaximumLength; // max key length
1131 btcb->keyCompareProc = (void *)CompareExtentKeys;
1132 btcb->leafRecords = header.leafRecords;
1133 btcb->treeDepth = header.treeDepth;
1134 btcb->rootNode = header.rootNode;
1135 btcb->firstLeafNode = header.firstLeafNode;
1136 btcb->lastLeafNode = header.lastLeafNode;
1137
1138 btcb->nodeSize = header.nodeSize;
1139 btcb->totalNodes = (GPtr->calculatedExtentsFCB->fcbPhysicalSize / btcb->nodeSize );
1140 btcb->freeNodes = btcb->totalNodes; // start with everything free
1141
1142 // Make sure the header nodes size field is correct by looking at the 1st record offset
1143 err = CheckNodesFirstOffset( GPtr, btcb );
1144 if (err) goto exit;
1145 }
1146
1147 if ( header.btreeType != kHFSBTreeType )
1148 {
1149 GPtr->EBTStat |= S_ReservedBTH; // Repair reserved fields in Btree header
1150 }
1151
1152 //
1153 // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes.
1154 //
1155 btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions
1156 if ( btcb->refCon == nil ) {
1157 err = R_NoMem;
1158 goto exit;
1159 }
1160 size = (btcb->totalNodes + 7) / 8; // size of BTree bit map
1161 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap
1162 if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil )
1163 {
1164 err = R_NoMem;
1165 goto exit;
1166 }
1167
1168 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is
1169 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes;// keep track of real free nodes for progress
1170 exit:
1171 if ( block.buffer != NULL )
1172 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1173
1174 return (err);
1175 }
1176
1177
1178
1179 /*------------------------------------------------------------------------------
1180
1181 Function: CheckNodesFirstOffset
1182
1183 Function: Minimal check verifies that the 1st offset is within bounds. If it's not
1184 the nodeSize may be wrong. In the future this routine could be modified
1185 to try different size values until one fits.
1186
1187 ------------------------------------------------------------------------------*/
1188 #define GetRecordOffset(btreePtr,node,index) (*(short *) ((UInt8 *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize))
1189 static OSErr CheckNodesFirstOffset( SGlobPtr GPtr, BTreeControlBlock *btcb )
1190 {
1191 NodeRec nodeRec;
1192 UInt16 offset;
1193 OSErr err;
1194
1195 (void) SetFileBlockSize(btcb->fcbPtr, btcb->nodeSize);
1196
1197 err = GetNode( btcb, kHeaderNodeNum, &nodeRec );
1198
1199 if ( err == noErr )
1200 {
1201 offset = GetRecordOffset( btcb, (NodeDescPtr)nodeRec.buffer, 0 );
1202 if ( (offset < sizeof (BTNodeDescriptor)) || // offset < minimum
1203 (offset & 1) || // offset is odd
1204 (offset >= btcb->nodeSize) ) // offset beyond end of node
1205 {
1206 if (debug) fprintf(stderr, "%s(%d): offset is wrong\n", __FUNCTION__, __LINE__);
1207 err = fsBTInvalidNodeErr;
1208 }
1209 }
1210
1211 if ( err != noErr )
1212 RcdError( GPtr, E_InvalidNodeSize );
1213
1214 (void) ReleaseNode(btcb, &nodeRec);
1215
1216 return( err );
1217 }
1218
1219
1220
1221 /*------------------------------------------------------------------------------
1222
1223 Function: ExtBTChk - (Extent BTree Check)
1224
1225 Function: Verifies the extent BTree structure.
1226
1227 Input: GPtr - pointer to scavenger global area
1228
1229 Output: ExtBTChk - function result:
1230 0 = no error
1231 n = error code
1232 ------------------------------------------------------------------------------*/
1233
1234 OSErr ExtBTChk( SGlobPtr GPtr )
1235 {
1236 OSErr err;
1237
1238 // Set up
1239 GPtr->TarID = kHFSExtentsFileID; // target = extent file
1240 GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB
1241
1242 //
1243 // check out the BTree structure
1244 //
1245
1246 err = BTCheck(GPtr, kCalculatedExtentRefNum, NULL);
1247 ReturnIfError( err ); // invalid extent file BTree
1248
1249 //
1250 // check out the allocation map structure
1251 //
1252
1253 err = BTMapChk( GPtr, kCalculatedExtentRefNum );
1254 ReturnIfError( err ); // Invalid extent BTree map
1255
1256 //
1257 // Make sure unused nodes in the B-tree are zero filled.
1258 //
1259 err = BTCheckUnusedNodes(GPtr, kCalculatedExtentRefNum, &GPtr->EBTStat);
1260 ReturnIfError( err );
1261
1262 //
1263 // compare BTree header record on disk with scavenger's BTree header record
1264 //
1265
1266 err = CmpBTH( GPtr, kCalculatedExtentRefNum );
1267 ReturnIfError( err );
1268
1269 //
1270 // compare BTree map on disk with scavenger's BTree map
1271 //
1272
1273 err = CmpBTM( GPtr, kCalculatedExtentRefNum );
1274
1275 return( err );
1276 }
1277
1278
1279
1280 /*------------------------------------------------------------------------------
1281
1282 Function: BadBlockFileExtentCheck - (Check extents of bad block file)
1283
1284 Function:
1285 Verifies the extents of bad block file (kHFSBadBlockFileID) that
1286 exist in extents Btree.
1287
1288 Note that the extents for other file IDs < kHFSFirstUserCatalogNodeID
1289 are being taken care in the following functions:
1290
1291 kHFSExtentsFileID - CreateExtentsBTreeControlBlock
1292 kHFSCatalogFileID - CreateCatalogBTreeControlBlock
1293 kHFSAllocationFileID - CreateExtendedAllocationsFCB
1294 kHFSStartupFileID - CreateExtendedAllocationsFCB
1295 kHFSAttributesFileID - CreateAttributesBTreeControlBlock
1296
1297 Input: GPtr - pointer to scavenger global area
1298
1299 Output: BadBlockFileExtentCheck - function result:
1300 0 = no error
1301 +n = error code
1302 ------------------------------------------------------------------------------*/
1303
1304 OSErr BadBlockFileExtentCheck( SGlobPtr GPtr )
1305 {
1306 UInt32 attributes;
1307 void *p;
1308 OSErr result;
1309 SVCB *vcb;
1310 Boolean isHFSPlus;
1311 BlockDescriptor block;
1312
1313 isHFSPlus = VolumeObjectIsHFSPlus( );
1314 block.buffer = NULL;
1315
1316 //
1317 // process the bad block extents (created by the disk init pkg to hide badspots)
1318 //
1319 vcb = GPtr->calculatedVCB;
1320
1321 result = GetVolumeObjectVHBorMDB( &block );
1322 if ( result != noErr ) goto ExitThisRoutine; // error, could't get it
1323
1324 p = (void *) block.buffer;
1325 attributes = isHFSPlus == true ? ((HFSPlusVolumeHeader*)p)->attributes : ((HFSMasterDirectoryBlock*)p)->drAtrb;
1326
1327 //¥¥ Does HFS+ honnor the same mask?
1328 if ( attributes & kHFSVolumeSparedBlocksMask ) // if any badspots
1329 {
1330 HFSPlusExtentRecord zeroXdr; // dummy passed to 'CheckFileExtents'
1331 UInt32 numBadBlocks;
1332
1333 ClearMemory ( zeroXdr, sizeof( HFSPlusExtentRecord ) );
1334 result = CheckFileExtents( GPtr, kHFSBadBlockFileID, kDataFork, NULL, (void *)zeroXdr, &numBadBlocks); // check and mark bitmap
1335 }
1336
1337 ExitThisRoutine:
1338 if ( block.buffer != NULL )
1339 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1340
1341 return (result);
1342 }
1343
1344
1345 /*------------------------------------------------------------------------------
1346
1347 Function: CreateCatalogBTreeControlBlock
1348
1349 Function: Create the calculated CatalogBTree Control Block
1350
1351 Input: GPtr - pointer to scavenger global area
1352
1353 Output: - 0 = no error
1354 n = error code
1355 ------------------------------------------------------------------------------*/
1356 OSErr CreateCatalogBTreeControlBlock( SGlobPtr GPtr )
1357 {
1358 OSErr err;
1359 SInt32 size;
1360 UInt32 numABlks;
1361 BTHeaderRec header;
1362 BTreeControlBlock * btcb;
1363 SVCB * vcb;
1364 BlockDescriptor block;
1365 Boolean isHFSPlus;
1366
1367 // Set up
1368 isHFSPlus = VolumeObjectIsHFSPlus( );
1369 GPtr->TarID = kHFSCatalogFileID;
1370 GPtr->TarBlock = kHeaderNodeNum;
1371 vcb = GPtr->calculatedVCB;
1372 btcb = GPtr->calculatedCatalogBTCB;
1373 block.buffer = NULL;
1374
1375 err = GetVolumeObjectVHBorMDB( &block );
1376 if ( err != noErr ) goto ExitThisRoutine; // error, could't get it
1377 //
1378 // check out allocation info for the Catalog File
1379 //
1380 if (isHFSPlus)
1381 {
1382 HFSPlusVolumeHeader * volumeHeader;
1383
1384 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
1385
1386 CopyMemory(volumeHeader->catalogFile.extents, GPtr->calculatedCatalogFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1387
1388 err = CheckFileExtents( GPtr, kHFSCatalogFileID, kDataFork, NULL, (void *)GPtr->calculatedCatalogFCB->fcbExtents32, &numABlks);
1389 if (err) goto exit;
1390
1391 if ( volumeHeader->catalogFile.totalBlocks != numABlks )
1392 {
1393 RcdError( GPtr, E_CatPEOF );
1394 err = E_CatPEOF;
1395 goto exit;
1396 }
1397 else
1398 {
1399 GPtr->calculatedCatalogFCB->fcbLogicalSize = volumeHeader->catalogFile.logicalSize;
1400 GPtr->calculatedCatalogFCB->fcbPhysicalSize = (UInt64)volumeHeader->catalogFile.totalBlocks *
1401 (UInt64)volumeHeader->blockSize;
1402 }
1403
1404 //
1405 // Set up the minimal BTreeControlBlock structure
1406 //
1407
1408 // read the BTreeHeader from disk & also validate it's node size.
1409 err = GetBTreeHeader(GPtr, GPtr->calculatedCatalogFCB, &header);
1410 if (err) goto exit;
1411
1412 btcb->maxKeyLength = kHFSPlusCatalogKeyMaximumLength; // max key length
1413
1414 /*
1415 * Figure out the type of key string compare
1416 * (case-insensitive or case-sensitive)
1417 *
1418 * To do: should enforce an "HX" volume is require for kHFSBinaryCompare.
1419 */
1420 if (header.keyCompareType == kHFSBinaryCompare)
1421 {
1422 btcb->keyCompareProc = (void *)CaseSensitiveCatalogKeyCompare;
1423 fsckPrint(GPtr->context, hfsCaseSensitive);
1424 }
1425 else
1426 {
1427 btcb->keyCompareProc = (void *)CompareExtendedCatalogKeys;
1428 }
1429 btcb->keyCompareType = header.keyCompareType;
1430 btcb->leafRecords = header.leafRecords;
1431 btcb->nodeSize = header.nodeSize;
1432 btcb->totalNodes = ( GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize );
1433 btcb->freeNodes = btcb->totalNodes; // start with everything free
1434 btcb->attributes |=(kBTBigKeysMask + kBTVariableIndexKeysMask); // HFS+ Catalog files have large, variable-sized keys
1435
1436 btcb->treeDepth = header.treeDepth;
1437 btcb->rootNode = header.rootNode;
1438 btcb->firstLeafNode = header.firstLeafNode;
1439 btcb->lastLeafNode = header.lastLeafNode;
1440
1441
1442 // Make sure the header nodes size field is correct by looking at the 1st record offset
1443 err = CheckNodesFirstOffset( GPtr, btcb );
1444 if ( (err != noErr) && (btcb->nodeSize != 4096) ) // default HFS+ Catalog node size is 4096
1445 {
1446 btcb->nodeSize = 4096;
1447 btcb->totalNodes = ( GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize );
1448 btcb->freeNodes = btcb->totalNodes; // start with everything free
1449
1450 err = CheckNodesFirstOffset( GPtr, btcb );
1451 if (err) goto exit;
1452
1453 GPtr->CBTStat |= S_BTH; // update the Btree header
1454 }
1455 }
1456 else // HFS
1457 {
1458 HFSMasterDirectoryBlock *alternateMDB;
1459
1460 alternateMDB = (HFSMasterDirectoryBlock *) block.buffer;
1461
1462 CopyMemory( alternateMDB->drCTExtRec, GPtr->calculatedCatalogFCB->fcbExtents16, sizeof(HFSExtentRecord) );
1463 // ExtDataRecToExtents(alternateMDB->drCTExtRec, GPtr->calculatedCatalogFCB->fcbExtents);
1464
1465 err = CheckFileExtents( GPtr, kHFSCatalogFileID, kDataFork, NULL, (void *)GPtr->calculatedCatalogFCB->fcbExtents16, &numABlks); /* check out extent info */
1466 if (err) goto exit;
1467
1468 if (alternateMDB->drCTFlSize != ((UInt64)numABlks * (UInt64)vcb->vcbBlockSize)) // check out the PEOF
1469 {
1470 RcdError( GPtr, E_CatPEOF );
1471 err = E_CatPEOF;
1472 goto exit;
1473 }
1474 else
1475 {
1476 GPtr->calculatedCatalogFCB->fcbPhysicalSize = alternateMDB->drCTFlSize; // set up PEOF and EOF in FCB
1477 GPtr->calculatedCatalogFCB->fcbLogicalSize = GPtr->calculatedCatalogFCB->fcbPhysicalSize;
1478 }
1479
1480 //
1481 // Set up the minimal BTreeControlBlock structure
1482 //
1483
1484 // read the BTreeHeader from disk & also validate it's node size.
1485 err = GetBTreeHeader(GPtr, GPtr->calculatedCatalogFCB, &header);
1486 if (err) goto exit;
1487
1488 btcb->maxKeyLength = kHFSCatalogKeyMaximumLength; // max key length
1489 btcb->keyCompareProc = (void *) CompareCatalogKeys;
1490 btcb->leafRecords = header.leafRecords;
1491 btcb->nodeSize = header.nodeSize;
1492 btcb->totalNodes = (GPtr->calculatedCatalogFCB->fcbPhysicalSize / btcb->nodeSize );
1493 btcb->freeNodes = btcb->totalNodes; // start with everything free
1494
1495 btcb->treeDepth = header.treeDepth;
1496 btcb->rootNode = header.rootNode;
1497 btcb->firstLeafNode = header.firstLeafNode;
1498 btcb->lastLeafNode = header.lastLeafNode;
1499
1500 // Make sure the header nodes size field is correct by looking at the 1st record offset
1501 err = CheckNodesFirstOffset( GPtr, btcb );
1502 if (err) goto exit;
1503 }
1504 #if 0
1505 plog(" Catalog B-tree is %qd bytes\n", (UInt64)btcb->totalNodes * (UInt64) btcb->nodeSize);
1506 #endif
1507
1508 if ( header.btreeType != kHFSBTreeType )
1509 {
1510 GPtr->CBTStat |= S_ReservedBTH; // Repair reserved fields in Btree header
1511 }
1512
1513 //
1514 // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes.
1515 //
1516
1517 btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions
1518 if ( btcb->refCon == nil ) {
1519 err = R_NoMem;
1520 goto exit;
1521 }
1522 size = (btcb->totalNodes + 7) / 8; // size of BTree bit map
1523 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap
1524 if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil )
1525 {
1526 err = R_NoMem;
1527 goto exit;
1528 }
1529
1530 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is
1531 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes; // keep track of real free nodes for progress
1532
1533 /* it should be OK at this point to get volume name and stuff it into our global */
1534 {
1535 OSErr result;
1536 UInt16 recSize;
1537 CatalogKey key;
1538 CatalogRecord record;
1539
1540 BuildCatalogKey( kHFSRootFolderID, NULL, isHFSPlus, &key );
1541 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, NULL, &record, &recSize, NULL );
1542 if ( result == noErr ) {
1543 if ( isHFSPlus ) {
1544 size_t len;
1545 HFSPlusCatalogThread * recPtr = &record.hfsPlusThread;
1546 (void) utf_encodestr( recPtr->nodeName.unicode,
1547 recPtr->nodeName.length * 2,
1548 GPtr->volumeName, &len, sizeof(GPtr->volumeName) );
1549 GPtr->volumeName[len] = '\0';
1550 }
1551 else {
1552 HFSCatalogThread * recPtr = &record.hfsThread;
1553 bcopy( &recPtr->nodeName[1], GPtr->volumeName, recPtr->nodeName[0] );
1554 GPtr->volumeName[ recPtr->nodeName[0] ] = '\0';
1555 }
1556 fsckPrint(GPtr->context, fsckVolumeName, GPtr->volumeName);
1557 }
1558 }
1559
1560 exit:
1561 ExitThisRoutine:
1562 if ( block.buffer != NULL )
1563 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1564
1565 return (err);
1566 }
1567
1568
1569 /*------------------------------------------------------------------------------
1570
1571 Function: CreateExtendedAllocationsFCB
1572
1573 Function: Create the calculated ExtentsBTree Control Block for
1574 kHFSAllocationFileID and kHFSStartupFileID.
1575
1576 Input: GPtr - pointer to scavenger global area
1577
1578 Output: - 0 = no error
1579 n = error code
1580 ------------------------------------------------------------------------------*/
1581 OSErr CreateExtendedAllocationsFCB( SGlobPtr GPtr )
1582 {
1583 OSErr err = 0;
1584 UInt32 numABlks;
1585 SVCB * vcb;
1586 Boolean isHFSPlus;
1587 BlockDescriptor block;
1588
1589 // Set up
1590 isHFSPlus = VolumeObjectIsHFSPlus( );
1591 GPtr->TarID = kHFSAllocationFileID;
1592 GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB
1593 vcb = GPtr->calculatedVCB;
1594 block.buffer = NULL;
1595
1596 //
1597 // check out allocation info for the allocation File
1598 //
1599
1600 if ( isHFSPlus )
1601 {
1602 SFCB * fcb;
1603 HFSPlusVolumeHeader *volumeHeader;
1604
1605 err = GetVolumeObjectVHB( &block );
1606 if ( err != noErr )
1607 goto exit;
1608 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
1609
1610 fcb = GPtr->calculatedAllocationsFCB;
1611 CopyMemory( volumeHeader->allocationFile.extents, fcb->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1612
1613 err = CheckFileExtents( GPtr, kHFSAllocationFileID, kDataFork, NULL, (void *)fcb->fcbExtents32, &numABlks);
1614 if (err) goto exit;
1615
1616 //
1617 // The allocation file will get processed in whole allocation blocks, or
1618 // maximal-sized cache blocks, whichever is smaller. This means the cache
1619 // doesn't need to cope with buffers that are larger than a cache block.
1620 if (vcb->vcbBlockSize < fscache.BlockSize)
1621 (void) SetFileBlockSize (fcb, vcb->vcbBlockSize);
1622 else
1623 (void) SetFileBlockSize (fcb, fscache.BlockSize);
1624
1625 if ( volumeHeader->allocationFile.totalBlocks != numABlks )
1626 {
1627 RcdError( GPtr, E_CatPEOF );
1628 err = E_CatPEOF;
1629 goto exit;
1630 }
1631 else
1632 {
1633 fcb->fcbLogicalSize = volumeHeader->allocationFile.logicalSize;
1634 fcb->fcbPhysicalSize = (UInt64) volumeHeader->allocationFile.totalBlocks *
1635 (UInt64) volumeHeader->blockSize;
1636 }
1637
1638 /* while we're here, also get startup file extents... */
1639 fcb = GPtr->calculatedStartupFCB;
1640 CopyMemory( volumeHeader->startupFile.extents, fcb->fcbExtents32, sizeof(HFSPlusExtentRecord) );
1641
1642 err = CheckFileExtents( GPtr, kHFSStartupFileID, kDataFork, NULL, (void *)fcb->fcbExtents32, &numABlks);
1643 if (err) goto exit;
1644
1645 fcb->fcbLogicalSize = volumeHeader->startupFile.logicalSize;
1646 fcb->fcbPhysicalSize = (UInt64) volumeHeader->startupFile.totalBlocks *
1647 (UInt64) volumeHeader->blockSize;
1648 }
1649
1650 exit:
1651 if (block.buffer)
1652 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
1653
1654 return (err);
1655
1656 }
1657
1658
1659 /*------------------------------------------------------------------------------
1660
1661 Function: CatHChk - (Catalog Hierarchy Check)
1662
1663 Function: Verifies the catalog hierarchy.
1664
1665 Input: GPtr - pointer to scavenger global area
1666
1667 Output: CatHChk - function result:
1668 0 = no error
1669 n = error code
1670 ------------------------------------------------------------------------------*/
1671
1672 OSErr CatHChk( SGlobPtr GPtr )
1673 {
1674 SInt16 i;
1675 OSErr result;
1676 UInt16 recSize;
1677 SInt16 selCode;
1678 UInt32 hint;
1679 UInt32 dirCnt;
1680 UInt32 filCnt;
1681 SInt16 rtdirCnt;
1682 SInt16 rtfilCnt;
1683 SVCB *calculatedVCB;
1684 SDPR *dprP;
1685 SDPR *dprP1;
1686 CatalogKey foundKey;
1687 Boolean validKeyFound;
1688 CatalogKey key;
1689 CatalogRecord record;
1690 CatalogRecord record2;
1691 HFSPlusCatalogFolder *largeCatalogFolderP;
1692 HFSPlusCatalogFile *largeCatalogFileP;
1693 HFSCatalogFile *smallCatalogFileP;
1694 HFSCatalogFolder *smallCatalogFolderP;
1695 CatalogName catalogName;
1696 UInt32 valence;
1697 CatalogRecord threadRecord;
1698 HFSCatalogNodeID parID;
1699 Boolean isHFSPlus;
1700
1701 // set up
1702 isHFSPlus = VolumeObjectIsHFSPlus( );
1703 calculatedVCB = GPtr->calculatedVCB;
1704 GPtr->TarID = kHFSCatalogFileID; /* target = catalog file */
1705 GPtr->TarBlock = 0; /* no target block yet */
1706
1707 //
1708 // position to the beginning of catalog
1709 //
1710
1711 //¥¥ Can we ignore this part by just taking advantage of setting the selCode = 0x8001;
1712 {
1713 BuildCatalogKey( 1, (const CatalogName *)nil, isHFSPlus, &key );
1714 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint );
1715
1716 GPtr->TarBlock = hint; /* set target block */
1717 if ( result != btNotFound )
1718 {
1719 RcdError( GPtr, E_CatRec );
1720 return( E_CatRec );
1721 }
1722 }
1723
1724 GPtr->DirLevel = 1;
1725 dprP = &(GPtr->DirPTPtr)[0];
1726 dprP->directoryID = 1;
1727
1728 dirCnt = filCnt = rtdirCnt = rtfilCnt = 0;
1729
1730 result = noErr;
1731 selCode = 0x8001; /* start with root directory */
1732
1733 //
1734 // enumerate the entire catalog
1735 //
1736 while ( (GPtr->DirLevel > 0) && (result == noErr) )
1737 {
1738 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
1739
1740 validKeyFound = true;
1741 record.recordType = 0;
1742
1743 // get the next record
1744 result = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &foundKey, &record, &recSize, &hint );
1745
1746 GPtr->TarBlock = hint; /* set target block */
1747 if ( result != noErr )
1748 {
1749 if ( result == btNotFound )
1750 {
1751 result = noErr;
1752 validKeyFound = false;
1753 }
1754 else
1755 {
1756 result = IntError( GPtr, result ); /* error from BTGetRecord */
1757 return( result );
1758 }
1759 }
1760 selCode = 1; /* get next rec from now on */
1761
1762 GPtr->itemsProcessed++;
1763
1764 //
1765 // if same ParID ...
1766 //
1767 parID = isHFSPlus == true ? foundKey.hfsPlus.parentID : foundKey.hfs.parentID;
1768 if ( (validKeyFound == true) && (parID == dprP->directoryID) )
1769 {
1770 dprP->offspringIndex++; /* increment offspring index */
1771
1772 // if new directory ...
1773
1774 if ( record.recordType == kHFSPlusFolderRecord )
1775 {
1776 result = CheckForStop( GPtr ); ReturnIfError( result ); // Permit the user to interrupt
1777
1778 largeCatalogFolderP = (HFSPlusCatalogFolder *) &record;
1779 GPtr->TarID = largeCatalogFolderP->folderID; // target ID = directory ID
1780 GPtr->CNType = record.recordType; // target CNode type = directory ID
1781 CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus );
1782
1783 if ( dprP->directoryID > 1 )
1784 {
1785 GPtr->DirLevel++; // we have a new directory level
1786 dirCnt++;
1787 }
1788 if ( dprP->directoryID == kHFSRootFolderID ) // bump root dir count
1789 rtdirCnt++;
1790
1791 if ( GPtr->DirLevel > GPtr->dirPathCount )
1792 {
1793 void *ptr;
1794
1795 ptr = realloc(GPtr->DirPTPtr, (GPtr->dirPathCount + CMMaxDepth) * sizeof(SDPR));
1796 if (ptr == nil)
1797 {
1798 fsckPrint(GPtr->context, E_CatDepth, GPtr->dirPathCount);
1799 return noErr; /* abort this check, but let other checks proceed */
1800 }
1801 ClearMemory((char *)ptr + (GPtr->dirPathCount * sizeof(SDPR)), (CMMaxDepth * sizeof(SDPR)));
1802 GPtr->dirPathCount += CMMaxDepth;
1803 GPtr->DirPTPtr = ptr;
1804 }
1805
1806 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
1807 dprP->directoryID = largeCatalogFolderP->folderID;
1808 dprP->offspringIndex = 1;
1809 dprP->directoryHint = hint;
1810 dprP->parentDirID = foundKey.hfsPlus.parentID;
1811 CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &dprP->directoryName, isHFSPlus );
1812
1813 for ( i = 1; i < GPtr->DirLevel; i++ )
1814 {
1815 dprP1 = &(GPtr->DirPTPtr)[i - 1];
1816 if (dprP->directoryID == dprP1->directoryID)
1817 {
1818 RcdError( GPtr,E_DirLoop ); // loop in directory hierarchy
1819 return( E_DirLoop );
1820 }
1821 }
1822
1823 /*
1824 * Find thread record
1825 */
1826 BuildCatalogKey( dprP->directoryID, (const CatalogName *) nil, isHFSPlus, &key );
1827 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint );
1828 if ( result != noErr ) {
1829 struct MissingThread *mtp;
1830
1831 /* Report the error */
1832 fsckPrint(GPtr->context, E_NoThd, dprP->directoryID);
1833
1834 /* HFS will exit here */
1835 if ( !isHFSPlus )
1836 return (E_NoThd);
1837 /*
1838 * A directory thread is missing. If we can find this
1839 * ID on the missing-thread list then we know where the
1840 * child entries reside and can resume our enumeration.
1841 */
1842 for (mtp = GPtr->missingThreadList; mtp != NULL; mtp = mtp->link) {
1843 if (mtp->threadID == dprP->directoryID) {
1844 mtp->thread.recordType = kHFSPlusFolderThreadRecord;
1845 mtp->thread.parentID = dprP->parentDirID;
1846 CopyCatalogName(&dprP->directoryName, (CatalogName *)&mtp->thread.nodeName, isHFSPlus);
1847
1848 /* Reposition to the first child of target directory */
1849 result = SearchBTreeRecord(GPtr->calculatedCatalogFCB, &mtp->nextKey,
1850 kNoHint, &foundKey, &threadRecord, &recSize, &hint);
1851 if (result) {
1852 return (E_NoThd);
1853 }
1854 selCode = 0; /* use current record instead of next */
1855 break;
1856 }
1857 }
1858 if (selCode != 0) {
1859 /*
1860 * A directory thread is missing but we know this
1861 * directory has no children (since we didn't find
1862 * its ID on the missing-thread list above).
1863 *
1864 * At this point we can resume the enumeration at
1865 * our previous position in our parent directory.
1866 */
1867 goto resumeAtParent;
1868 }
1869 }
1870 dprP->threadHint = hint;
1871 GPtr->TarBlock = hint;
1872 }
1873
1874 // LargeCatalogFile
1875 else if ( record.recordType == kHFSPlusFileRecord )
1876 {
1877 largeCatalogFileP = (HFSPlusCatalogFile *) &record;
1878 GPtr->TarID = largeCatalogFileP->fileID; // target ID = file number
1879 GPtr->CNType = record.recordType; // target CNode type = thread
1880 CopyCatalogName( (const CatalogName *) &foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus );
1881 filCnt++;
1882 if (dprP->directoryID == kHFSRootFolderID)
1883 rtfilCnt++;
1884 }
1885
1886 else if ( record.recordType == kHFSFolderRecord )
1887 {
1888 result = CheckForStop( GPtr ); ReturnIfError( result ); // Permit the user to interrupt
1889
1890 smallCatalogFolderP = (HFSCatalogFolder *) &record;
1891 GPtr->TarID = smallCatalogFolderP->folderID; /* target ID = directory ID */
1892 GPtr->CNType = record.recordType; /* target CNode type = directory ID */
1893 CopyCatalogName( (const CatalogName *) &key.hfs.nodeName, &GPtr->CName, isHFSPlus ); /* target CName = directory name */
1894
1895 if (dprP->directoryID > 1)
1896 {
1897 GPtr->DirLevel++; /* we have a new directory level */
1898 dirCnt++;
1899 }
1900 if (dprP->directoryID == kHFSRootFolderID) /* bump root dir count */
1901 rtdirCnt++;
1902
1903 if ( GPtr->DirLevel > GPtr->dirPathCount )
1904 {
1905 void *ptr;
1906
1907 ptr = realloc(GPtr->DirPTPtr, (GPtr->dirPathCount + CMMaxDepth) * sizeof(SDPR));
1908 if (ptr == nil)
1909 {
1910 fsckPrint(GPtr->context, E_CatDepth, GPtr->dirPathCount);
1911 return noErr; /* abort this check, but let other checks proceed */
1912 }
1913 ClearMemory((char *)ptr + (GPtr->dirPathCount * sizeof(SDPR)), (CMMaxDepth * sizeof(SDPR)));
1914 GPtr->dirPathCount += CMMaxDepth;
1915 GPtr->DirPTPtr = ptr;
1916 }
1917
1918 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
1919 dprP->directoryID = smallCatalogFolderP->folderID;
1920 dprP->offspringIndex = 1;
1921 dprP->directoryHint = hint;
1922 dprP->parentDirID = foundKey.hfs.parentID;
1923
1924 CopyCatalogName( (const CatalogName *) &foundKey.hfs.nodeName, &dprP->directoryName, isHFSPlus );
1925
1926 for (i = 1; i < GPtr->DirLevel; i++)
1927 {
1928 dprP1 = &(GPtr->DirPTPtr)[i - 1];
1929 if (dprP->directoryID == dprP1->directoryID)
1930 {
1931 RcdError( GPtr,E_DirLoop ); /* loop in directory hierarchy */
1932 return( E_DirLoop );
1933 }
1934 }
1935
1936 BuildCatalogKey( dprP->directoryID, (const CatalogName *)0, isHFSPlus, &key );
1937 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, kNoHint, &foundKey, &threadRecord, &recSize, &hint );
1938 if (result != noErr )
1939 {
1940 result = IntError(GPtr,result); /* error from BTSearch */
1941 return(result);
1942 }
1943 dprP->threadHint = hint; /* save hint for thread */
1944 GPtr->TarBlock = hint; /* set target block */
1945 }
1946
1947 // HFSCatalogFile...
1948 else if ( record.recordType == kHFSFileRecord )
1949 {
1950 smallCatalogFileP = (HFSCatalogFile *) &record;
1951 GPtr->TarID = smallCatalogFileP->fileID; /* target ID = file number */
1952 GPtr->CNType = record.recordType; /* target CNode type = thread */
1953 CopyCatalogName( (const CatalogName *) &foundKey.hfs.nodeName, &GPtr->CName, isHFSPlus ); /* target CName = directory name */
1954 filCnt++;
1955 if (dprP->directoryID == kHFSRootFolderID)
1956 rtfilCnt++;
1957 }
1958
1959 // Unknown/Bad record type
1960 else
1961 {
1962 M_DebugStr("\p Unknown-Bad record type");
1963 return( 123 );
1964 }
1965 }
1966
1967 //
1968 // if not same ParID or no record
1969 //
1970 else if ( (record.recordType == kHFSFileThreadRecord) || (record.recordType == kHFSPlusFileThreadRecord) ) /* it's a file thread, skip past it */
1971 {
1972 GPtr->TarID = parID; // target ID = file number
1973 GPtr->CNType = record.recordType; // target CNode type = thread
1974 GPtr->CName.ustr.length = 0; // no target CName
1975 }
1976
1977 else
1978 {
1979 resumeAtParent:
1980 GPtr->TarID = dprP->directoryID; /* target ID = current directory ID */
1981 GPtr->CNType = record.recordType; /* target CNode type = directory */
1982 CopyCatalogName( (const CatalogName *) &dprP->directoryName, &GPtr->CName, isHFSPlus ); // copy the string name
1983
1984 // re-locate current directory
1985 CopyCatalogName( (const CatalogName *) &dprP->directoryName, &catalogName, isHFSPlus );
1986 BuildCatalogKey( dprP->parentDirID, (const CatalogName *)&catalogName, isHFSPlus, &key );
1987 result = SearchBTreeRecord( GPtr->calculatedCatalogFCB, &key, dprP->directoryHint, &foundKey, &record2, &recSize, &hint );
1988
1989 if ( result != noErr )
1990 {
1991 result = IntError(GPtr,result); /* error from BTSearch */
1992 return(result);
1993 }
1994 GPtr->TarBlock = hint; /* set target block */
1995
1996
1997 valence = isHFSPlus == true ? record2.hfsPlusFolder.valence : (UInt32)record2.hfsFolder.valence;
1998
1999 if ( valence != dprP->offspringIndex -1 ) /* check its valence */
2000 if ( ( result = RcdValErr( GPtr, E_DirVal, dprP->offspringIndex -1, valence, dprP->parentDirID ) ) )
2001 return( result );
2002
2003 GPtr->DirLevel--; /* move up a level */
2004
2005 if(GPtr->DirLevel > 0)
2006 {
2007 dprP = &(GPtr->DirPTPtr)[GPtr->DirLevel - 1];
2008 GPtr->TarID = dprP->directoryID; /* target ID = current directory ID */
2009 GPtr->CNType = record.recordType; /* target CNode type = directory */
2010 CopyCatalogName( (const CatalogName *) &dprP->directoryName, &GPtr->CName, isHFSPlus );
2011 }
2012 }
2013 } // end while
2014
2015 //
2016 // verify directory and file counts (all nonfatal, repairable errors)
2017 //
2018 if (!isHFSPlus && (rtdirCnt != calculatedVCB->vcbNmRtDirs)) /* check count of dirs in root */
2019 if ( ( result = RcdValErr(GPtr,E_RtDirCnt,rtdirCnt,calculatedVCB->vcbNmRtDirs,0) ) )
2020 return( result );
2021
2022 if (!isHFSPlus && (rtfilCnt != calculatedVCB->vcbNmFls)) /* check count of files in root */
2023 if ( ( result = RcdValErr(GPtr,E_RtFilCnt,rtfilCnt,calculatedVCB->vcbNmFls,0) ) )
2024 return( result );
2025
2026 if (dirCnt != calculatedVCB->vcbFolderCount) /* check count of dirs in volume */
2027 if ( ( result = RcdValErr(GPtr,E_DirCnt,dirCnt,calculatedVCB->vcbFolderCount,0) ) )
2028 return( result );
2029
2030 if (filCnt != calculatedVCB->vcbFileCount) /* check count of files in volume */
2031 if ( ( result = RcdValErr(GPtr,E_FilCnt,filCnt,calculatedVCB->vcbFileCount,0) ) )
2032 return( result );
2033
2034 return( noErr );
2035
2036 } /* end of CatHChk */
2037
2038
2039
2040 /*------------------------------------------------------------------------------
2041
2042 Function: CreateAttributesBTreeControlBlock
2043
2044 Function: Create the calculated AttributesBTree Control Block
2045
2046 Input: GPtr - pointer to scavenger global area
2047
2048 Output: - 0 = no error
2049 n = error code
2050 ------------------------------------------------------------------------------*/
2051 OSErr CreateAttributesBTreeControlBlock( SGlobPtr GPtr )
2052 {
2053 OSErr err = 0;
2054 SInt32 size;
2055 UInt32 numABlks;
2056 BTreeControlBlock * btcb;
2057 SVCB * vcb;
2058 Boolean isHFSPlus;
2059 BTHeaderRec header;
2060 BlockDescriptor block;
2061
2062 // Set up
2063 isHFSPlus = VolumeObjectIsHFSPlus( );
2064 GPtr->TarID = kHFSAttributesFileID;
2065 GPtr->TarBlock = kHeaderNodeNum;
2066 block.buffer = NULL;
2067 btcb = GPtr->calculatedAttributesBTCB;
2068 vcb = GPtr->calculatedVCB;
2069
2070 //
2071 // check out allocation info for the Attributes File
2072 //
2073
2074 if (isHFSPlus)
2075 {
2076 HFSPlusVolumeHeader *volumeHeader;
2077
2078 err = GetVolumeObjectVHB( &block );
2079 if ( err != noErr )
2080 goto exit;
2081 volumeHeader = (HFSPlusVolumeHeader *) block.buffer;
2082
2083 CopyMemory( volumeHeader->attributesFile.extents, GPtr->calculatedAttributesFCB->fcbExtents32, sizeof(HFSPlusExtentRecord) );
2084
2085 err = CheckFileExtents( GPtr, kHFSAttributesFileID, kDataFork, NULL, (void *)GPtr->calculatedAttributesFCB->fcbExtents32, &numABlks);
2086 if (err) goto exit;
2087
2088 if ( volumeHeader->attributesFile.totalBlocks != numABlks ) // check out the PEOF
2089 {
2090 RcdError( GPtr, E_CatPEOF );
2091 err = E_CatPEOF;
2092 goto exit;
2093 }
2094 else
2095 {
2096 GPtr->calculatedAttributesFCB->fcbLogicalSize = (UInt64) volumeHeader->attributesFile.logicalSize; // Set Attributes tree's LEOF
2097 GPtr->calculatedAttributesFCB->fcbPhysicalSize = (UInt64) volumeHeader->attributesFile.totalBlocks *
2098 (UInt64) volumeHeader->blockSize; // Set Attributes tree's PEOF
2099 }
2100
2101 //
2102 // See if we actually have an attributes BTree
2103 //
2104 if (numABlks == 0)
2105 {
2106 btcb->maxKeyLength = 0;
2107 btcb->keyCompareProc = 0;
2108 btcb->leafRecords = 0;
2109 btcb->nodeSize = 0;
2110 btcb->totalNodes = 0;
2111 btcb->freeNodes = 0;
2112 btcb->attributes = 0;
2113
2114 btcb->treeDepth = 0;
2115 btcb->rootNode = 0;
2116 btcb->firstLeafNode = 0;
2117 btcb->lastLeafNode = 0;
2118
2119 // GPtr->calculatedVCB->attributesRefNum = 0;
2120 GPtr->calculatedVCB->vcbAttributesFile = NULL;
2121 }
2122 else
2123 {
2124 // read the BTreeHeader from disk & also validate it's node size.
2125 err = GetBTreeHeader(GPtr, GPtr->calculatedAttributesFCB, &header);
2126 if (err) goto exit;
2127
2128 btcb->maxKeyLength = kAttributeKeyMaximumLength; // max key length
2129 btcb->keyCompareProc = (void *)CompareAttributeKeys;
2130 btcb->leafRecords = header.leafRecords;
2131 btcb->nodeSize = header.nodeSize;
2132 btcb->totalNodes = ( GPtr->calculatedAttributesFCB->fcbPhysicalSize / btcb->nodeSize );
2133 btcb->freeNodes = btcb->totalNodes; // start with everything free
2134 btcb->attributes |=(kBTBigKeysMask + kBTVariableIndexKeysMask); // HFS+ Attributes files have large, variable-sized keys
2135
2136 btcb->treeDepth = header.treeDepth;
2137 btcb->rootNode = header.rootNode;
2138 btcb->firstLeafNode = header.firstLeafNode;
2139 btcb->lastLeafNode = header.lastLeafNode;
2140
2141 //
2142 // Make sure the header nodes size field is correct by looking at the 1st record offset
2143 //
2144 err = CheckNodesFirstOffset( GPtr, btcb );
2145 if (err) goto exit;
2146 }
2147 }
2148 else
2149 {
2150 btcb->maxKeyLength = 0;
2151 btcb->keyCompareProc = 0;
2152 btcb->leafRecords = 0;
2153 btcb->nodeSize = 0;
2154 btcb->totalNodes = 0;
2155 btcb->freeNodes = 0;
2156 btcb->attributes = 0;
2157
2158 btcb->treeDepth = 0;
2159 btcb->rootNode = 0;
2160 btcb->firstLeafNode = 0;
2161 btcb->lastLeafNode = 0;
2162
2163 GPtr->calculatedVCB->vcbAttributesFile = NULL;
2164 }
2165
2166 //
2167 // set up our DFA extended BTCB area. Will we have enough memory on all HFS+ volumes.
2168 //
2169 btcb->refCon = AllocateClearMemory( sizeof(BTreeExtensionsRec) ); // allocate space for our BTCB extensions
2170 if ( btcb->refCon == nil ) {
2171 err = R_NoMem;
2172 goto exit;
2173 }
2174
2175 if (btcb->totalNodes == 0)
2176 {
2177 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = nil;
2178 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = 0;
2179 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = 0;
2180 }
2181 else
2182 {
2183 if ( btcb->refCon == nil ) {
2184 err = R_NoMem;
2185 goto exit;
2186 }
2187 size = (btcb->totalNodes + 7) / 8; // size of BTree bit map
2188 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr = AllocateClearMemory(size); // get precleared bitmap
2189 if ( ((BTreeExtensionsRec*)btcb->refCon)->BTCBMPtr == nil )
2190 {
2191 err = R_NoMem;
2192 goto exit;
2193 }
2194
2195 ((BTreeExtensionsRec*)btcb->refCon)->BTCBMSize = size; // remember how long it is
2196 ((BTreeExtensionsRec*)btcb->refCon)->realFreeNodeCount = header.freeNodes; // keep track of real free nodes for progress
2197 }
2198
2199 exit:
2200 if (block.buffer)
2201 (void) ReleaseVolumeBlock(vcb, &block, kReleaseBlock);
2202
2203 return (err);
2204 }
2205
2206 /*
2207 * Function: RecordLastAttrBits
2208 *
2209 * Description:
2210 * Updates the Chinese Remainder Theorem buckets with extended attribute
2211 * information for the previous fileID stored in the global structure.
2212 *
2213 * Input:
2214 * GPtr - pointer to scavenger global area
2215 * * GPtr->lastAttrInfo.fileID - fileID of last attribute seen
2216 *
2217 * Output: Nothing
2218 */
2219 static void RecordLastAttrBits(SGlobPtr GPtr)
2220 {
2221 /* lastAttrInfo structure is initialized to zero and hence ignore
2222 * recording information for fileID = 0. fileIDs < 16 (except for
2223 * fileID = 2) can have extended attributes but do not have
2224 * corresponding entry in catalog Btree. Ignore recording these
2225 * fileIDs for Chinese Remainder Theorem buckets. Currently we only
2226 * set extended attributes for fileID = 1 among these fileIDs
2227 * and this can change in future (see 3984119)
2228 */
2229 if ((GPtr->lastAttrInfo.fileID == 0) ||
2230 ((GPtr->lastAttrInfo.fileID < kHFSFirstUserCatalogNodeID) &&
2231 (GPtr->lastAttrInfo.fileID != kHFSRootFolderID))) {
2232 return;
2233 }
2234
2235 if (GPtr->lastAttrInfo.hasSecurity == true) {
2236 /* fileID has both extended attribute and ACL */
2237 RecordXAttrBits(GPtr, kHFSHasAttributesMask | kHFSHasSecurityMask,
2238 GPtr->lastAttrInfo.fileID, kCalculatedAttributesRefNum);
2239 GPtr->lastAttrInfo.hasSecurity = false;
2240 } else {
2241 /* fileID only has extended attribute */
2242 RecordXAttrBits(GPtr, kHFSHasAttributesMask,
2243 GPtr->lastAttrInfo.fileID, kCalculatedAttributesRefNum);
2244 }
2245 }
2246
2247 /*
2248 * Function: setLastAttrAllocInfo
2249 *
2250 * Description:
2251 * Set the global structure of last extended attribute with
2252 * the allocation block information. Also set the isValid to true
2253 * to indicate that the data is valid and should be used to verify
2254 * allocation blocks.
2255 *
2256 * Input:
2257 * GPtr - pointer to scavenger global area
2258 * totalBlocks - total blocks allocated by the attribute
2259 * logicalSize - logical size of the attribute
2260 * calculatedBlocks - blocks accounted by the attribute in current extent
2261 *
2262 * Output: Nothing
2263 */
2264 static void setLastAttrAllocInfo(SGlobPtr GPtr, u_int32_t totalBlocks,
2265 u_int64_t logicalSize, u_int32_t calculatedTotalBlocks)
2266 {
2267 GPtr->lastAttrInfo.totalBlocks = totalBlocks;
2268 GPtr->lastAttrInfo.logicalSize = logicalSize;
2269 GPtr->lastAttrInfo.calculatedTotalBlocks = calculatedTotalBlocks;
2270 GPtr->lastAttrInfo.isValid = true;
2271 }
2272
2273 /*
2274 * Function: CheckLastAttrAllocation
2275 *
2276 * Description:
2277 * Checks the allocation block information stored for the last
2278 * extended attribute seen during extended attribute BTree traversal.
2279 * Always resets the information stored for last EA allocation.
2280 *
2281 * Input: GPtr - pointer to scavenger global area
2282 *
2283 * Output: int - function result:
2284 * zero - no error
2285 * non-zero - error
2286 */
2287 static int CheckLastAttrAllocation(SGlobPtr GPtr)
2288 {
2289 int result = 0;
2290 u_int64_t bytes;
2291
2292 if (GPtr->lastAttrInfo.isValid == true) {
2293 if (GPtr->lastAttrInfo.totalBlocks !=
2294 GPtr->lastAttrInfo.calculatedTotalBlocks) {
2295 result = RecordBadAllocation(GPtr->lastAttrInfo.fileID,
2296 GPtr->lastAttrInfo.attrname, kEAData,
2297 GPtr->lastAttrInfo.totalBlocks,
2298 GPtr->lastAttrInfo.calculatedTotalBlocks);
2299 } else {
2300 bytes = (u_int64_t)GPtr->lastAttrInfo.calculatedTotalBlocks *
2301 (u_int64_t)GPtr->calculatedVCB->vcbBlockSize;
2302 if (GPtr->lastAttrInfo.logicalSize > bytes) {
2303 result = RecordTruncation(GPtr->lastAttrInfo.fileID,
2304 GPtr->lastAttrInfo.attrname, kEAData,
2305 GPtr->lastAttrInfo.logicalSize, bytes);
2306 }
2307 }
2308
2309 /* Invalidate information in the global structure */
2310 GPtr->lastAttrInfo.isValid = false;
2311 }
2312
2313 return (result);
2314 }
2315
2316 /*------------------------------------------------------------------------------
2317 Function: CheckAttributeRecord
2318
2319 Description:
2320 This is call back function called for all leaf records in
2321 Attribute BTree during the verify and repair stage. The basic
2322 functionality of the function is same during verify and repair
2323 stages except that whenever it finds corruption, the verify
2324 stage prints message and the repair stage repairs it. In the verify
2325 stage, this function accounts for allocation blocks used
2326 by extent-based extended attributes and also updates the chinese
2327 remainder theorem buckets corresponding the extended attribute
2328 and security bit.
2329
2330 1. Only in the verify stage, if the fileID or attribute name of current
2331 extended attribute are not same as the previous attribute, check the
2332 allocation block counts for the previous attribute.
2333
2334 2. Only in the verify stage, If the fileID of current attribute is not
2335 same as the previous attribute, record the previous fileID information
2336 for Chinese Remainder Theorem.
2337
2338 3. For attribute type,
2339 kHFSPlusAttrForkData:
2340 ---------------------
2341 Do all of the following during verify stage and nothing in repair
2342 stage -
2343
2344 Check the start block for extended attribute from the key. If not
2345 zero, print error.
2346
2347 Account for blocks occupied by this extent and store the allocation
2348 information for this extent to check in future. Also update the
2349 last attribute information in the global structure.
2350
2351 kHFSPlusAttrExtents:
2352 --------------------
2353 If the current attribute's fileID is not same as previous fileID, or
2354 if the previous recordType is not a valid forkData or overflow extent
2355 record, report an error in verify stage or mark it for deletion in
2356 repair stage.
2357
2358 Do all of the following during verify stage and nothing in repair
2359 stage -
2360
2361 Check the start block for extended attribute from the key. If not
2362 equal to the total blocks seen uptil last attribtue, print error.
2363
2364 Account for blocks occupied by this extent. Update previous
2365 attribute allocation information with blocks seen in current
2366 extent. Also update last attribute block information in the global
2367 structure.
2368
2369 kHFSPlusAttrInlineData:
2370 -----------------------
2371 Only in the verify stage, check if the start block in the key is
2372 equal to zero. If not, print error.
2373
2374 Unknown type:
2375 -------------
2376 In verify stage, report error. In repair stage, mark the record
2377 to delete.
2378
2379 4. If a record is marked for deletion, delete the record.
2380
2381 5. Before exiting from the function, always do the following -
2382 a. Indicate if the extended attribute was an ACL
2383 b. Update previous fileID and recordType with current information.
2384 c. Update previous attribute name with current attribute name.
2385
2386 Input: GPtr - pointer to scavenger global area
2387 key - key for current attribute
2388 rec - attribute record
2389 reclen - length of the record
2390
2391 Output: int - function result:
2392 0 = no error
2393 n = error code
2394 ------------------------------------------------------------------------------*/
2395 int
2396 CheckAttributeRecord(SGlobPtr GPtr, const HFSPlusAttrKey *key, const HFSPlusAttrRecord *rec, UInt16 reclen)
2397 {
2398 int result = 0;
2399 unsigned char attrname[XATTR_MAXNAMELEN+1];
2400 size_t attrlen;
2401 u_int32_t blocks;
2402 u_int32_t fileID;
2403 struct attributeInfo *prevAttr;
2404 Boolean isSameAttr = true;
2405 Boolean doDelete = false;
2406 u_int16_t dfaStage = GetDFAStage();
2407
2408 /* Assert if volume is not HFS Plus */
2409 assert(VolumeObjectIsHFSPlus() == true);
2410
2411 prevAttr = &(GPtr->lastAttrInfo);
2412 fileID = key->fileID;
2413 /* Convert unicode attribute name to UTF-8 string */
2414 (void) utf_encodestr(key->attrName, key->attrNameLen * 2, attrname, &attrlen, sizeof(attrname));
2415 attrname[attrlen] = '\0';
2416
2417 /* Compare the current attribute to last attribute seen */
2418 if ((fileID != prevAttr->fileID) ||
2419 (strcmp((char *)attrname, (char *)prevAttr->attrname) != 0)) {
2420 isSameAttr = false;
2421 }
2422
2423 /* We check allocation block information and record EA information for
2424 * CRT bucket in verify stage and hence no need to do it again in
2425 * repair stage.
2426 */
2427 if (dfaStage == kVerifyStage) {
2428 /* Different attribute - check allocation block information */
2429 if (isSameAttr == false) {
2430 result = CheckLastAttrAllocation(GPtr);
2431 if (result) {
2432 goto update_out;
2433 }
2434 }
2435
2436 /* Different fileID - record information in CRT bucket */
2437 if (fileID != prevAttr->fileID) {
2438 RecordLastAttrBits(GPtr);
2439 }
2440 }
2441
2442 switch (rec->recordType) {
2443 case kHFSPlusAttrForkData: {
2444 /* Check start block only in verify stage to avoid printing message
2445 * in repair stage. Note that this corruption is not repairable
2446 * currently. Also check extents only in verify stage to avoid
2447 * false overlap extents error.
2448 */
2449 if (dfaStage == kVerifyStage) {
2450 /* Start block in the key should be zero */
2451 if (key->startBlock != 0) {
2452 RcdError(GPtr, E_ABlkSt);
2453 result = E_ABlkSt;
2454 goto err_out;
2455 }
2456
2457 /* Check the extent information and record overlapping extents, if any */
2458 result = CheckFileExtents (GPtr, fileID, kEAData, attrname,
2459 rec->forkData.theFork.extents, &blocks);
2460 if (result) {
2461 goto update_out;
2462 }
2463
2464 /* Store allocation information to check in future */
2465 (void) setLastAttrAllocInfo(GPtr, rec->forkData.theFork.totalBlocks,
2466 rec->forkData.theFork.logicalSize, blocks);
2467 }
2468 break;
2469 }
2470
2471 case kHFSPlusAttrExtents: {
2472 /* Different attribute/fileID or incorrect previous record type */
2473 if ((isSameAttr == false) ||
2474 ((prevAttr->recordType != kHFSPlusAttrExtents) &&
2475 (prevAttr->recordType != kHFSPlusAttrForkData))) {
2476 if (dfaStage == kRepairStage) {
2477 /* Delete record in repair stage */
2478 doDelete = true;
2479 } else {
2480 /* Report error in verify stage */
2481 RcdError(GPtr, E_AttrRec);
2482 GPtr->ABTStat |= S_AttrRec;
2483 goto err_out;
2484 }
2485 }
2486
2487 /* Check start block only in verify stage to avoid printing message
2488 * in repair stage. Note that this corruption is not repairable
2489 * currently. Also check extents only in verify stage to avoid
2490 * false overlap extents error.
2491 */
2492 if (dfaStage == kVerifyStage) {
2493 /* startBlock in the key should be equal to total blocks
2494 * seen uptil last attribute.
2495 */
2496 if (key->startBlock != prevAttr->calculatedTotalBlocks) {
2497 RcdError(GPtr, E_ABlkSt);
2498 result = E_ABlkSt;
2499 goto err_out;
2500 }
2501
2502 /* Check the extent information and record overlapping extents, if any */
2503 result = CheckFileExtents (GPtr, fileID, kEAData, attrname,
2504 rec->overflowExtents.extents, &blocks);
2505 if (result) {
2506 goto update_out;
2507 }
2508
2509 /* Increment the blocks seen uptil now for this attribute */
2510 prevAttr->calculatedTotalBlocks += blocks;
2511 }
2512 break;
2513 }
2514
2515 case kHFSPlusAttrInlineData: {
2516 /* Check start block only in verify stage to avoid printing message
2517 * in repair stage.
2518 */
2519 if (dfaStage == kVerifyStage) {
2520 /* Start block in the key should be zero */
2521 if (key->startBlock != 0) {
2522 RcdError(GPtr, E_ABlkSt);
2523 result = E_ABlkSt;
2524 goto err_out;
2525 }
2526 }
2527 break;
2528 }
2529
2530 default: {
2531 /* Unknown attribute record */
2532 if (dfaStage == kRepairStage) {
2533 /* Delete record in repair stage */
2534 doDelete = true;
2535 } else {
2536 /* Report error in verify stage */
2537 RcdError(GPtr, E_AttrRec);
2538 GPtr->ABTStat |= S_AttrRec;
2539 goto err_out;
2540 }
2541 break;
2542 }
2543 };
2544
2545 if (doDelete == true) {
2546 result = DeleteBTreeRecord(GPtr->calculatedAttributesFCB, key);
2547 DPRINTF (d_info|d_xattr, "%s: Deleting attribute %s for fileID %d, type = %d\n", __FUNCTION__, attrname, key->fileID, rec->recordType);
2548 if (result) {
2549 DPRINTF (d_error|d_xattr, "%s: Error in deleting record for %s for fileID %d, type = %d\n", __FUNCTION__, attrname, key->fileID, rec->recordType);
2550 }
2551
2552 /* Set flags to mark header and map dirty */
2553 GPtr->ABTStat |= S_BTH + S_BTM;
2554 goto err_out;
2555 }
2556
2557 update_out:
2558 /* Note that an ACL exists for this fileID */
2559 if (strcmp((char *)attrname, KAUTH_FILESEC_XATTR) == 0) {
2560 prevAttr->hasSecurity = true;
2561 }
2562
2563 /* Always update the last recordType, fileID and attribute name before exiting */
2564 prevAttr->recordType = rec->recordType;
2565 prevAttr->fileID = fileID;
2566 (void) strlcpy((char *)prevAttr->attrname, (char *)attrname, sizeof(prevAttr->attrname));
2567
2568 goto out;
2569
2570 err_out:
2571 /* If the current record is invalid/bogus, decide whether to update
2572 * fileID stored in global structure for future comparison based on the
2573 * previous fileID.
2574 * If the current bogus record's fileID is different from fileID of the
2575 * previous good record, we do not want to account for bogus fileID in
2576 * the Chinese Remainder Theorem when we see next good record.
2577 * Hence reset the fileID in global structure to dummy value. Example,
2578 * if the fileIDs are 10 15 20 and record with ID=15 is bogus, we do not
2579 * want to account for record with ID=15.
2580 * If the current bogus record's fileID is same as the fileID of the
2581 * previous good record, we want to account for this fileID in the
2582 * next good record we see after this bogus record. Hence do not
2583 * reset the fileID to dummy value. Example, if the records have fileID
2584 * 10 10 30 and the second record with ID=10 is bogus, we want to
2585 * account for ID=10 when we see record with ID=30.
2586 */
2587 if (prevAttr->fileID != fileID) {
2588 prevAttr->fileID = 0;
2589 }
2590
2591 out:
2592 return(result);
2593 }
2594
2595 /* Function: RecordXAttrBits
2596 *
2597 * Description:
2598 * This function increments the prime number buckets for the associated
2599 * prime bucket set based on the flags and btreetype to determine
2600 * the discrepancy between the attribute btree and catalog btree for
2601 * extended attribute data consistency. This function is based on
2602 * Chinese Remainder Theorem.
2603 *
2604 * Alogrithm:
2605 * 1. If none of kHFSHasAttributesMask or kHFSHasSecurity mask is set,
2606 * return.
2607 * 2. Based on btreetype and the flags, determine which prime number
2608 * bucket should be updated. Initialize pointers accordingly.
2609 * 3. Divide the fileID with pre-defined prime numbers. Store the
2610 * remainder.
2611 * 4. Increment each prime number bucket at an offset of the
2612 * corresponding remainder with one.
2613 *
2614 * Input: 1. GPtr - pointer to global scavenger area
2615 * 2. flags - can include kHFSHasAttributesMask and/or kHFSHasSecurityMask
2616 * 3. fileid - fileID for which particular extended attribute is seen
2617 * 4. btreetye - can be kHFSPlusCatalogRecord or kHFSPlusAttributeRecord
2618 * indicates which btree prime number bucket should be incremented
2619 *
2620 * Output: nil
2621 */
2622 void RecordXAttrBits(SGlobPtr GPtr, UInt16 flags, HFSCatalogNodeID fileid, UInt16 btreetype)
2623 {
2624 PrimeBuckets *cur_attr = NULL;
2625 PrimeBuckets *cur_sec = NULL;
2626
2627 if ( ((flags & kHFSHasAttributesMask) == 0) &&
2628 ((flags & kHFSHasSecurityMask) == 0) ) {
2629 /* No attributes exists for this fileID */
2630 goto out;
2631 }
2632
2633 /* Determine which bucket are we updating */
2634 if (btreetype == kCalculatedCatalogRefNum) {
2635 /* Catalog BTree buckets */
2636 if (flags & kHFSHasAttributesMask) {
2637 cur_attr = &(GPtr->CBTAttrBucket);
2638 GPtr->cat_ea_count++;
2639 }
2640 if (flags & kHFSHasSecurityMask) {
2641 cur_sec = &(GPtr->CBTSecurityBucket);
2642 GPtr->cat_acl_count++;
2643 }
2644 } else if (btreetype == kCalculatedAttributesRefNum) {
2645 /* Attribute BTree buckets */
2646 if (flags & kHFSHasAttributesMask) {
2647 cur_attr = &(GPtr->ABTAttrBucket);
2648 GPtr->attr_ea_count++;
2649 }
2650 if (flags & kHFSHasSecurityMask) {
2651 cur_sec = &(GPtr->ABTSecurityBucket);
2652 GPtr->attr_acl_count++;
2653 }
2654 } else {
2655 /* Incorrect btreetype found */
2656 goto out;
2657 }
2658
2659 if (cur_attr) {
2660 add_prime_bucket_uint32(cur_attr, fileid);
2661 }
2662
2663 if (cur_sec) {
2664 add_prime_bucket_uint32(cur_sec, fileid);
2665 }
2666
2667 out:
2668 return;
2669 }
2670
2671 /* Function: CompareXattrPrimeBuckets
2672 *
2673 * Description:
2674 * This function compares the prime number buckets for catalog btree
2675 * and attribute btree for the given attribute type (normal attribute
2676 * bit or security bit).
2677 *
2678 * Input: 1. GPtr - pointer to global scavenger area
2679 * 2. BitMask - indicate which attribute type should be compared.
2680 * can include kHFSHasAttributesMask and/or kHFSHasSecurityMask
2681 * Output: zero - buckets were compared successfully
2682 * non-zero - buckets were not compared
2683 */
2684 static int CompareXattrPrimeBuckets(SGlobPtr GPtr, UInt16 BitMask)
2685 {
2686 int result = 1;
2687 PrimeBuckets *cat; /* Catalog BTree */
2688 PrimeBuckets *attr; /* Attribute BTree */
2689
2690 /* Find the correct PrimeBuckets to compare */
2691 if (BitMask & kHFSHasAttributesMask) {
2692 /* Compare buckets for attribute bit */
2693 cat = &(GPtr->CBTAttrBucket);
2694 attr = &(GPtr->ABTAttrBucket);
2695 } else if (BitMask & kHFSHasSecurityMask) {
2696 /* Compare buckets for security bit */
2697 cat = &(GPtr->CBTSecurityBucket);
2698 attr = &(GPtr->ABTSecurityBucket);
2699 } else {
2700 plog ("%s: Incorrect BitMask found.\n", __FUNCTION__);
2701 goto out;
2702 }
2703
2704 result = compare_prime_buckets(cat, attr);
2705 if (result) {
2706 char catbtree[32], attrbtree[32];
2707 /* Unequal values found, set the error bit in ABTStat */
2708 if (BitMask & kHFSHasAttributesMask) {
2709 fsckPrint(GPtr->context, E_IncorrectAttrCount);
2710 sprintf (catbtree, "%u", GPtr->cat_ea_count);
2711 sprintf (attrbtree, "%u", GPtr->attr_ea_count);
2712 fsckPrint(GPtr->context, E_BadValue, attrbtree, catbtree);
2713 GPtr->ABTStat |= S_AttributeCount;
2714 } else {
2715 fsckPrint(GPtr->context, E_IncorrectSecurityCount);
2716 sprintf (catbtree, "%u", GPtr->cat_acl_count);
2717 sprintf (attrbtree, "%u", GPtr->attr_acl_count);
2718 fsckPrint (GPtr->context, E_BadValue, attrbtree, catbtree);
2719 GPtr->ABTStat |= S_SecurityCount;
2720 }
2721 }
2722
2723 result = 0;
2724
2725 out:
2726 return result;
2727 }
2728
2729 /*------------------------------------------------------------------------------
2730
2731 Function: AttrBTChk - (Attributes BTree Check)
2732
2733 Function: Verifies the attributes BTree structure.
2734
2735 Input: GPtr - pointer to scavenger global area
2736
2737 Output: ExtBTChk - function result:
2738 0 = no error
2739 n = error code
2740 ------------------------------------------------------------------------------*/
2741
2742 OSErr AttrBTChk( SGlobPtr GPtr )
2743 {
2744 OSErr err;
2745
2746 //
2747 // If this volume has no attributes BTree, then skip this check
2748 //
2749 if (GPtr->calculatedVCB->vcbAttributesFile == NULL)
2750 return noErr;
2751
2752 // Write the status message here to avoid potential confusion to user.
2753 fsckPrint(GPtr->context, hfsExtAttrBTCheck);
2754
2755 // Set up
2756 GPtr->TarID = kHFSAttributesFileID; // target = attributes file
2757 GetVolumeObjectBlockNum( &GPtr->TarBlock ); // target block = VHB/MDB
2758
2759 //
2760 // check out the BTree structure
2761 //
2762
2763 err = BTCheck( GPtr, kCalculatedAttributesRefNum, (CheckLeafRecordProcPtr)CheckAttributeRecord);
2764 ReturnIfError( err ); // invalid attributes file BTree
2765
2766 // check the allocation block information about the last attribute
2767 err = CheckLastAttrAllocation(GPtr);
2768 ReturnIfError(err);
2769
2770 // record the last fileID for Chinese Remainder Theorem comparison
2771 RecordLastAttrBits(GPtr);
2772
2773 // compare the attributes prime buckets calculated from catalog btree and attribute btree
2774 err = CompareXattrPrimeBuckets(GPtr, kHFSHasAttributesMask);
2775 ReturnIfError( err );
2776
2777 // compare the security prime buckets calculated from catalog btree and attribute btree
2778 err = CompareXattrPrimeBuckets(GPtr, kHFSHasSecurityMask);
2779 ReturnIfError( err );
2780
2781 //
2782 // check out the allocation map structure
2783 //
2784
2785 err = BTMapChk( GPtr, kCalculatedAttributesRefNum );
2786 ReturnIfError( err ); // Invalid attributes BTree map
2787
2788 //
2789 // Make sure unused nodes in the B-tree are zero filled.
2790 //
2791 err = BTCheckUnusedNodes(GPtr, kCalculatedAttributesRefNum, &GPtr->ABTStat);
2792 ReturnIfError( err );
2793
2794 //
2795 // compare BTree header record on disk with scavenger's BTree header record
2796 //
2797
2798 err = CmpBTH( GPtr, kCalculatedAttributesRefNum );
2799 ReturnIfError( err );
2800
2801 //
2802 // compare BTree map on disk with scavenger's BTree map
2803 //
2804
2805 err = CmpBTM( GPtr, kCalculatedAttributesRefNum );
2806
2807 return( err );
2808 }
2809
2810
2811 /*------------------------------------------------------------------------------
2812
2813 Name: RcdValErr - (Record Valence Error)
2814
2815 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
2816 list, to describe an incorrect valence count for possible repair.
2817
2818 Input: GPtr - ptr to scavenger global data
2819 type - error code (E_xxx), which should be >0
2820 correct - the correct valence, as computed here
2821 incorrect - the incorrect valence as found in volume
2822 parid - the parent id, if S_Valence error
2823
2824 Output: 0 - no error
2825 R_NoMem - not enough mem to allocate record
2826 ------------------------------------------------------------------------------*/
2827
2828 static int RcdValErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID parid ) /* the ParID, if needed */
2829 {
2830 RepairOrderPtr p; /* the new node we compile */
2831 SInt16 n; /* size of node we allocate */
2832 Boolean isHFSPlus;
2833 char goodStr[32], badStr[32];
2834
2835 isHFSPlus = VolumeObjectIsHFSPlus( );
2836 fsckPrint(GPtr->context, type);
2837 sprintf(goodStr, "%u", correct);
2838 sprintf(badStr, "%u", incorrect);
2839 fsckPrint(GPtr->context, E_BadValue, goodStr, badStr);
2840
2841 if (type == E_DirVal) /* if normal directory valence error */
2842 n = CatalogNameSize( &GPtr->CName, isHFSPlus);
2843 else
2844 n = 0; /* other errors don't need the name */
2845
2846 p = AllocMinorRepairOrder( GPtr,n ); /* get the node */
2847 if (p==NULL) /* quit if out of room */
2848 return (R_NoMem);
2849
2850 p->type = type; /* save error info */
2851 p->correct = correct;
2852 p->incorrect = incorrect;
2853 p->parid = parid;
2854
2855 if ( n != 0 ) /* if name needed */
2856 CopyCatalogName( (const CatalogName *) &GPtr->CName, (CatalogName*)&p->name, isHFSPlus );
2857
2858 GPtr->CatStat |= S_Valence; /* set flag to trigger repair */
2859
2860 return( noErr ); /* successful return */
2861 }
2862
2863 /*------------------------------------------------------------------------------
2864
2865 Name: RcdHsFldCntErr - (Record HasFolderCount)
2866
2867 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
2868 list, to describe folder flag missing the HasFolderCount bit
2869
2870 Input: GPtr - ptr to scavenger global data
2871 type - error code (E_xxx), which should be >0
2872 correct - the folder mask, as computed here
2873 incorrect - the folder mask, as found in volume
2874 fid - the folder id
2875
2876 Output: 0 - no error
2877 R_NoMem - not enough mem to allocate record
2878 ------------------------------------------------------------------------------*/
2879
2880 int RcdHsFldCntErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID fid )
2881 {
2882 RepairOrderPtr p; /* the new node we compile */
2883 char goodStr[32], badStr[32];
2884 fsckPrint(GPtr->context, type, fid);
2885 sprintf(goodStr, "%#x", correct);
2886 sprintf(badStr, "%#x", incorrect);
2887 fsckPrint(GPtr->context, E_BadValue, goodStr, badStr);
2888
2889 p = AllocMinorRepairOrder( GPtr,0 ); /* get the node */
2890 if (p==NULL) /* quit if out of room */
2891 return (R_NoMem);
2892
2893 p->type = type; /* save error info */
2894 p->correct = correct;
2895 p->incorrect = incorrect;
2896 p->parid = fid;
2897
2898 return( noErr ); /* successful return */
2899 }
2900 /*------------------------------------------------------------------------------
2901
2902 Name: RcdFCntErr - (Record Folder Count)
2903
2904 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
2905 list, to describe an incorrect folder count for possible repair.
2906
2907 Input: GPtr - ptr to scavenger global data
2908 type - error code (E_xxx), which should be >0
2909 correct - the correct folder count, as computed here
2910 incorrect - the incorrect folder count as found in volume
2911 fid - the folder id
2912
2913 Output: 0 - no error
2914 R_NoMem - not enough mem to allocate record
2915 ------------------------------------------------------------------------------*/
2916
2917 int RcdFCntErr( SGlobPtr GPtr, OSErr type, UInt32 correct, UInt32 incorrect, HFSCatalogNodeID fid )
2918 {
2919 RepairOrderPtr p; /* the new node we compile */
2920 char goodStr[32], badStr[32];
2921
2922 fsckPrint(GPtr->context, type, fid);
2923 sprintf(goodStr, "%u", correct);
2924 sprintf(badStr, "%u", incorrect);
2925 fsckPrint(GPtr->context, E_BadValue, goodStr, badStr);
2926
2927 p = AllocMinorRepairOrder( GPtr,0 ); /* get the node */
2928 if (p==NULL) /* quit if out of room */
2929 return (R_NoMem);
2930
2931 p->type = type; /* save error info */
2932 p->correct = correct;
2933 p->incorrect = incorrect;
2934 p->parid = fid;
2935
2936 return( noErr ); /* successful return */
2937 }
2938
2939 /*------------------------------------------------------------------------------
2940
2941 Name: RcdMDBAllocationBlockStartErr - (Record Allocation Block Start Error)
2942
2943 Function: Allocates a RepairOrder node and linking it into the 'GPtr->RepairP'
2944 list, to describe the error for possible repair.
2945
2946 Input: GPtr - ptr to scavenger global data
2947 type - error code (E_xxx), which should be >0
2948 correct - the correct valence, as computed here
2949 incorrect - the incorrect valence as found in volume
2950
2951 Output: 0 - no error
2952 R_NoMem - not enough mem to allocate record
2953 ------------------------------------------------------------------------------*/
2954
2955 static OSErr RcdMDBEmbededVolDescriptionErr( SGlobPtr GPtr, OSErr type, HFSMasterDirectoryBlock *mdb )
2956 {
2957 RepairOrderPtr p; // the new node we compile
2958 EmbededVolDescription *desc;
2959
2960 RcdError( GPtr, type ); // first, record the error
2961
2962 p = AllocMinorRepairOrder( GPtr, sizeof(EmbededVolDescription) ); // get the node
2963 if ( p == nil ) return( R_NoMem );
2964
2965 p->type = type; // save error info
2966 desc = (EmbededVolDescription *) &(p->name);
2967 desc->drAlBlSt = mdb->drAlBlSt;
2968 desc->drEmbedSigWord = mdb->drEmbedSigWord;
2969 desc->drEmbedExtent.startBlock = mdb->drEmbedExtent.startBlock;
2970 desc->drEmbedExtent.blockCount = mdb->drEmbedExtent.blockCount;
2971
2972 GPtr->VIStat |= S_InvalidWrapperExtents; // set flag to trigger repair
2973
2974 return( noErr ); // successful return
2975 }
2976
2977
2978 #if 0 // not used at this time
2979 /*------------------------------------------------------------------------------
2980
2981 Name: RcdInvalidWrapperExtents - (Record Invalid Wrapper Extents)
2982
2983 Function: Allocates a RepairOrder node and linking it into the 'GPtr->RepairP'
2984 list, to describe the error for possible repair.
2985
2986 Input: GPtr - ptr to scavenger global data
2987 type - error code (E_xxx), which should be >0
2988 correct - the correct valence, as computed here
2989 incorrect - the incorrect valence as found in volume
2990
2991 Output: 0 - no error
2992 R_NoMem - not enough mem to allocate record
2993 ------------------------------------------------------------------------------*/
2994
2995 static OSErr RcdInvalidWrapperExtents( SGlobPtr GPtr, OSErr type )
2996 {
2997 RepairOrderPtr p; // the new node we compile
2998
2999 RcdError( GPtr, type ); // first, record the error
3000
3001 p = AllocMinorRepairOrder( GPtr, 0 ); // get the node
3002 if ( p == nil ) return( R_NoMem );
3003
3004 p->type = type; // save error info
3005
3006 GPtr->VIStat |= S_BadMDBdrAlBlSt; // set flag to trigger repair
3007
3008 return( noErr ); // successful return
3009 }
3010 #endif
3011
3012
3013 #if 0 // We just check and fix them in SRepair.c
3014 /*------------------------------------------------------------------------------
3015
3016 Name: RcdOrphanedExtentErr
3017
3018 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
3019 list, to describe an locked volume name for possible repair.
3020
3021 Input: GPtr - ptr to scavenger global data
3022 type - error code (E_xxx), which should be >0
3023 incorrect - the incorrect file flags as found in file record
3024
3025 Output: 0 - no error
3026 R_NoMem - not enough mem to allocate record
3027 ------------------------------------------------------------------------------*/
3028
3029 static OSErr RcdOrphanedExtentErr ( SGlobPtr GPtr, SInt16 type, void *theKey )
3030 {
3031 RepairOrderPtr p; /* the new node we compile */
3032 SInt16 n; /* size of node we allocate */
3033 Boolean isHFSPlus;
3034
3035 isHFSPlus = VolumeObjectIsHFSPlus( );
3036 RcdError( GPtr,type ); /* first, record the error */
3037
3038 if ( isHFSPlus )
3039 n = sizeof( HFSPlusExtentKey );
3040 else
3041 n = sizeof( HFSExtentKey );
3042
3043 p = AllocMinorRepairOrder( GPtr, n ); /* get the node */
3044 if ( p == NULL ) /* quit if out of room */
3045 return( R_NoMem );
3046
3047 CopyMemory( theKey, p->name, n ); /* copy in the key */
3048
3049 p->type = type; /* save error info */
3050
3051 GPtr->EBTStat |= S_OrphanedExtent; /* set flag to trigger repair */
3052
3053 return( noErr ); /* successful return */
3054 }
3055 #endif
3056
3057
3058 /*------------------------------------------------------------------------------
3059
3060 Function: VInfoChk - (Volume Info Check)
3061
3062 Function: Verifies volume level information.
3063
3064 Input: GPtr - pointer to scavenger global area
3065
3066 Output: VInfoChk - function result:
3067 0 = no error
3068 n = error code
3069 ------------------------------------------------------------------------------*/
3070
3071 OSErr VInfoChk( SGlobPtr GPtr )
3072 {
3073 OSErr result;
3074 UInt16 recSize;
3075 Boolean isHFSPlus;
3076 UInt32 hint;
3077 UInt64 maxClump;
3078 SVCB *vcb;
3079 VolumeObjectPtr myVOPtr;
3080 CatalogRecord record;
3081 CatalogKey foundKey;
3082 BlockDescriptor altBlock;
3083 BlockDescriptor priBlock;
3084
3085 vcb = GPtr->calculatedVCB;
3086 altBlock.buffer = priBlock.buffer = NULL;
3087 isHFSPlus = VolumeObjectIsHFSPlus( );
3088 myVOPtr = GetVolumeObjectPtr( );
3089
3090 // locate the catalog record for the root directoryÉ
3091 result = GetBTreeRecord( GPtr->calculatedCatalogFCB, 0x8001, &foundKey, &record, &recSize, &hint );
3092 GPtr->TarID = kHFSCatalogFileID; /* target = catalog */
3093 GPtr->TarBlock = hint; /* target block = returned hint */
3094 if ( result != noErr )
3095 {
3096 result = IntError( GPtr, result );
3097 return( result );
3098 }
3099
3100 GPtr->TarID = AMDB_FNum; // target = alternate MDB or VHB
3101 GetVolumeObjectAlternateBlockNum( &GPtr->TarBlock );
3102 result = GetVolumeObjectAlternateBlock( &altBlock );
3103
3104 // invalidate if we have not marked the alternate as OK
3105 if ( isHFSPlus ) {
3106 if ( (myVOPtr->flags & kVO_AltVHBOK) == 0 )
3107 result = badMDBErr;
3108 }
3109 else if ( (myVOPtr->flags & kVO_AltMDBOK) == 0 ) {
3110 result = badMDBErr;
3111 }
3112 if ( result != noErr ) {
3113 GPtr->VIStat = GPtr->VIStat | S_MDB;
3114 if ( VolumeObjectIsHFS( ) ) {
3115 WriteError( GPtr, E_MDBDamaged, 0, 0 );
3116 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3117 plog("\tinvalid alternate MDB at %qd result %d \n", GPtr->TarBlock, result);
3118 }
3119 else {
3120 WriteError( GPtr, E_VolumeHeaderDamaged, 0, 0 );
3121 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3122 plog("\tinvalid alternate VHB at %qd result %d \n", GPtr->TarBlock, result);
3123 }
3124 result = noErr;
3125 goto exit;
3126 }
3127
3128 GPtr->TarID = MDB_FNum; // target = primary MDB or VHB
3129 GetVolumeObjectPrimaryBlockNum( &GPtr->TarBlock );
3130 result = GetVolumeObjectPrimaryBlock( &priBlock );
3131
3132 // invalidate if we have not marked the primary as OK
3133 if ( isHFSPlus ) {
3134 if ( (myVOPtr->flags & kVO_PriVHBOK) == 0 )
3135 result = badMDBErr;
3136 }
3137 else if ( (myVOPtr->flags & kVO_PriMDBOK) == 0 ) {
3138 result = badMDBErr;
3139 }
3140 if ( result != noErr ) {
3141 GPtr->VIStat = GPtr->VIStat | S_MDB;
3142 if ( VolumeObjectIsHFS( ) ) {
3143 WriteError( GPtr, E_MDBDamaged, 1, 0 );
3144 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3145 plog("\tinvalid primary MDB at %qd result %d \n", GPtr->TarBlock, result);
3146 }
3147 else {
3148 WriteError( GPtr, E_VolumeHeaderDamaged, 1, 0 );
3149 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3150 plog("\tinvalid primary VHB at %qd result %d \n", GPtr->TarBlock, result);
3151 }
3152 result = noErr;
3153 goto exit;
3154 }
3155
3156 // check to see that embedded HFS plus volumes still have both (alternate and primary) MDBs
3157 if ( VolumeObjectIsEmbeddedHFSPlus( ) &&
3158 ( (myVOPtr->flags & kVO_PriMDBOK) == 0 || (myVOPtr->flags & kVO_AltMDBOK) == 0 ) )
3159 {
3160 GPtr->VIStat |= S_WMDB;
3161 WriteError( GPtr, E_MDBDamaged, 0, 0 );
3162 if ( fsckGetVerbosity(GPtr->context) >= kDebugLog )
3163 plog("\tinvalid wrapper MDB \n");
3164 }
3165
3166 if ( isHFSPlus )
3167 {
3168 HFSPlusVolumeHeader * volumeHeader;
3169 HFSPlusVolumeHeader * alternateVolumeHeader;
3170
3171 alternateVolumeHeader = (HFSPlusVolumeHeader *) altBlock.buffer;
3172 volumeHeader = (HFSPlusVolumeHeader *) priBlock.buffer;
3173
3174 maxClump = (UInt64) (vcb->vcbTotalBlocks / 4) * vcb->vcbBlockSize; /* max clump = 1/4 volume size */
3175
3176 // check out creation and last mod dates
3177 vcb->vcbCreateDate = alternateVolumeHeader->createDate; // use creation date in alt MDB
3178 vcb->vcbModifyDate = volumeHeader->modifyDate; // don't change last mod date
3179 vcb->vcbCheckedDate = volumeHeader->checkedDate; // don't change checked date
3180
3181 // 3882639: Removed check for volume attributes in HFS Plus
3182 vcb->vcbAttributes = volumeHeader->attributes;
3183
3184 // verify allocation map ptr
3185 if ( volumeHeader->nextAllocation < vcb->vcbTotalBlocks )
3186 vcb->vcbNextAllocation = volumeHeader->nextAllocation;
3187 else
3188 vcb->vcbNextAllocation = 0;
3189
3190 // verify default clump sizes
3191 if ( (volumeHeader->rsrcClumpSize > 0) &&
3192 (volumeHeader->rsrcClumpSize <= kMaxClumpSize) &&
3193 ((volumeHeader->rsrcClumpSize % vcb->vcbBlockSize) == 0) )
3194 vcb->vcbRsrcClumpSize = volumeHeader->rsrcClumpSize;
3195 else if ( (alternateVolumeHeader->rsrcClumpSize > 0) &&
3196 (alternateVolumeHeader->rsrcClumpSize <= kMaxClumpSize) &&
3197 ((alternateVolumeHeader->rsrcClumpSize % vcb->vcbBlockSize) == 0) )
3198 vcb->vcbRsrcClumpSize = alternateVolumeHeader->rsrcClumpSize;
3199 else if (4ULL * vcb->vcbBlockSize <= kMaxClumpSize)
3200 vcb->vcbRsrcClumpSize = 4 * vcb->vcbBlockSize;
3201 else
3202 vcb->vcbRsrcClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3203
3204 if ( vcb->vcbRsrcClumpSize > kMaxClumpSize )
3205 vcb->vcbRsrcClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3206
3207 if ( (volumeHeader->dataClumpSize > 0) && (volumeHeader->dataClumpSize <= kMaxClumpSize) &&
3208 ((volumeHeader->dataClumpSize % vcb->vcbBlockSize) == 0) )
3209 vcb->vcbDataClumpSize = volumeHeader->dataClumpSize;
3210 else if ( (alternateVolumeHeader->dataClumpSize > 0) &&
3211 (alternateVolumeHeader->dataClumpSize <= kMaxClumpSize) &&
3212 ((alternateVolumeHeader->dataClumpSize % vcb->vcbBlockSize) == 0) )
3213 vcb->vcbDataClumpSize = alternateVolumeHeader->dataClumpSize;
3214 else if (4ULL * vcb->vcbBlockSize <= kMaxClumpSize)
3215 vcb->vcbDataClumpSize = 4 * vcb->vcbBlockSize;
3216 else
3217 vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3218
3219 if ( vcb->vcbDataClumpSize > kMaxClumpSize )
3220 vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3221
3222 /* Verify next CNode ID.
3223 * If volumeHeader->nextCatalogID < vcb->vcbNextCatalogID, probably
3224 * nextCatalogID has wrapped around.
3225 * If volumeHeader->nextCatalogID > vcb->vcbNextCatalogID, probably
3226 * many files were created and deleted, followed by no new file
3227 * creation.
3228 */
3229 if ( (volumeHeader->nextCatalogID > vcb->vcbNextCatalogID) )
3230 vcb->vcbNextCatalogID = volumeHeader->nextCatalogID;
3231
3232 //¥¥TBD location and unicode? volumename
3233 // verify the volume name
3234 result = ChkCName( GPtr, (const CatalogName*) &foundKey.hfsPlus.nodeName, isHFSPlus );
3235
3236 // verify last backup date and backup seqence number
3237 vcb->vcbBackupDate = volumeHeader->backupDate; /* don't change last backup date */
3238
3239 // verify write count
3240 vcb->vcbWriteCount = volumeHeader->writeCount; /* don't change write count */
3241
3242 // check out extent file clump size
3243 if ( ((volumeHeader->extentsFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3244 (volumeHeader->extentsFile.clumpSize <= maxClump) )
3245 vcb->vcbExtentsFile->fcbClumpSize = volumeHeader->extentsFile.clumpSize;
3246 else if ( ((alternateVolumeHeader->extentsFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3247 (alternateVolumeHeader->extentsFile.clumpSize <= maxClump) )
3248 vcb->vcbExtentsFile->fcbClumpSize = alternateVolumeHeader->extentsFile.clumpSize;
3249 else
3250 vcb->vcbExtentsFile->fcbClumpSize =
3251 (alternateVolumeHeader->extentsFile.extents[0].blockCount * vcb->vcbBlockSize);
3252
3253 // check out catalog file clump size
3254 if ( ((volumeHeader->catalogFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3255 (volumeHeader->catalogFile.clumpSize <= maxClump) )
3256 vcb->vcbCatalogFile->fcbClumpSize = volumeHeader->catalogFile.clumpSize;
3257 else if ( ((alternateVolumeHeader->catalogFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3258 (alternateVolumeHeader->catalogFile.clumpSize <= maxClump) )
3259 vcb->vcbCatalogFile->fcbClumpSize = alternateVolumeHeader->catalogFile.clumpSize;
3260 else
3261 vcb->vcbCatalogFile->fcbClumpSize =
3262 (alternateVolumeHeader->catalogFile.extents[0].blockCount * vcb->vcbBlockSize);
3263
3264 // check out allocations file clump size
3265 if ( ((volumeHeader->allocationFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3266 (volumeHeader->allocationFile.clumpSize <= maxClump) )
3267 vcb->vcbAllocationFile->fcbClumpSize = volumeHeader->allocationFile.clumpSize;
3268 else if ( ((alternateVolumeHeader->allocationFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3269 (alternateVolumeHeader->allocationFile.clumpSize <= maxClump) )
3270 vcb->vcbAllocationFile->fcbClumpSize = alternateVolumeHeader->allocationFile.clumpSize;
3271 else
3272 vcb->vcbAllocationFile->fcbClumpSize =
3273 (alternateVolumeHeader->allocationFile.extents[0].blockCount * vcb->vcbBlockSize);
3274
3275 // check out attribute file clump size
3276 if (vcb->vcbAttributesFile) {
3277 if ( ((volumeHeader->attributesFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3278 (volumeHeader->attributesFile.clumpSize <= maxClump) &&
3279 (volumeHeader->attributesFile.clumpSize != 0))
3280 vcb->vcbAttributesFile->fcbClumpSize = volumeHeader->attributesFile.clumpSize;
3281 else if ( ((alternateVolumeHeader->attributesFile.clumpSize % vcb->vcbBlockSize) == 0) &&
3282 (alternateVolumeHeader->attributesFile.clumpSize <= maxClump) &&
3283 (alternateVolumeHeader->attributesFile.clumpSize != 0))
3284 vcb->vcbAttributesFile->fcbClumpSize = alternateVolumeHeader->attributesFile.clumpSize;
3285 else if (vcb->vcbCatalogFile->fcbClumpSize != 0)
3286 // The original attribute clump may be too small, use catalog's
3287 vcb->vcbAttributesFile->fcbClumpSize = vcb->vcbCatalogFile->fcbClumpSize;
3288 else
3289 vcb->vcbAttributesFile->fcbClumpSize =
3290 alternateVolumeHeader->attributesFile.extents[0].blockCount * vcb->vcbBlockSize;
3291 }
3292
3293 CopyMemory( volumeHeader->finderInfo, vcb->vcbFinderInfo, sizeof(vcb->vcbFinderInfo) );
3294
3295 // Now compare verified Volume Header info (in the form of a vcb) with Volume Header info on disk
3296 result = CompareVolumeHeader( GPtr, volumeHeader );
3297
3298 // check to see that embedded volume info is correct in both wrapper MDBs
3299 CheckEmbeddedVolInfoInMDBs( GPtr );
3300
3301 }
3302 else // HFS
3303 {
3304 HFSMasterDirectoryBlock *mdbP;
3305 HFSMasterDirectoryBlock *alternateMDB;
3306
3307 //
3308 // get volume name from BTree Key
3309 //
3310
3311 alternateMDB = (HFSMasterDirectoryBlock *) altBlock.buffer;
3312 mdbP = (HFSMasterDirectoryBlock *) priBlock.buffer;
3313
3314 maxClump = (UInt64) (vcb->vcbTotalBlocks / 4) * vcb->vcbBlockSize; /* max clump = 1/4 volume size */
3315
3316 // check out creation and last mod dates
3317 vcb->vcbCreateDate = alternateMDB->drCrDate; /* use creation date in alt MDB */
3318 vcb->vcbModifyDate = mdbP->drLsMod; /* don't change last mod date */
3319
3320 // verify volume attribute flags
3321 if ( (mdbP->drAtrb & VAtrb_Msk) == 0 )
3322 vcb->vcbAttributes = mdbP->drAtrb;
3323 else
3324 vcb->vcbAttributes = VAtrb_DFlt;
3325
3326 // verify allocation map ptr
3327 if ( mdbP->drAllocPtr < vcb->vcbTotalBlocks )
3328 vcb->vcbNextAllocation = mdbP->drAllocPtr;
3329 else
3330 vcb->vcbNextAllocation = 0;
3331
3332 // verify default clump size
3333 if ( (mdbP->drClpSiz > 0) &&
3334 (mdbP->drClpSiz <= maxClump) &&
3335 ((mdbP->drClpSiz % vcb->vcbBlockSize) == 0) )
3336 vcb->vcbDataClumpSize = mdbP->drClpSiz;
3337 else if ( (alternateMDB->drClpSiz > 0) &&
3338 (alternateMDB->drClpSiz <= maxClump) &&
3339 ((alternateMDB->drClpSiz % vcb->vcbBlockSize) == 0) )
3340 vcb->vcbDataClumpSize = alternateMDB->drClpSiz;
3341 else
3342 vcb->vcbDataClumpSize = 4 * vcb->vcbBlockSize;
3343
3344 if ( vcb->vcbDataClumpSize > kMaxClumpSize )
3345 vcb->vcbDataClumpSize = vcb->vcbBlockSize; /* for very large volumes, just use 1 allocation block */
3346
3347 // verify next CNode ID
3348 if ( (mdbP->drNxtCNID > vcb->vcbNextCatalogID) && (mdbP->drNxtCNID <= (vcb->vcbNextCatalogID + 4096)) )
3349 vcb->vcbNextCatalogID = mdbP->drNxtCNID;
3350
3351 // verify the volume name
3352 result = ChkCName( GPtr, (const CatalogName*) &vcb->vcbVN, isHFSPlus );
3353 if ( result == noErr )
3354 if ( CmpBlock( mdbP->drVN, vcb->vcbVN, vcb->vcbVN[0] + 1 ) == 0 )
3355 CopyMemory( mdbP->drVN, vcb->vcbVN, kHFSMaxVolumeNameChars + 1 ); /* ...we have a good one */
3356
3357 // verify last backup date and backup seqence number
3358 vcb->vcbBackupDate = mdbP->drVolBkUp; /* don't change last backup date */
3359 vcb->vcbVSeqNum = mdbP->drVSeqNum; /* don't change last backup sequence # */
3360
3361 // verify write count
3362 vcb->vcbWriteCount = mdbP->drWrCnt; /* don't change write count */
3363
3364 // check out extent file and catalog clump sizes
3365 if ( ((mdbP->drXTClpSiz % vcb->vcbBlockSize) == 0) && (mdbP->drXTClpSiz <= maxClump) )
3366 vcb->vcbExtentsFile->fcbClumpSize = mdbP->drXTClpSiz;
3367 else if ( ((alternateMDB->drXTClpSiz % vcb->vcbBlockSize) == 0) && (alternateMDB->drXTClpSiz <= maxClump) )
3368 vcb->vcbExtentsFile->fcbClumpSize = alternateMDB->drXTClpSiz;
3369 else
3370 vcb->vcbExtentsFile->fcbClumpSize = (alternateMDB->drXTExtRec[0].blockCount * vcb->vcbBlockSize);
3371
3372 if ( ((mdbP->drCTClpSiz % vcb->vcbBlockSize) == 0) && (mdbP->drCTClpSiz <= maxClump) )
3373 vcb->vcbCatalogFile->fcbClumpSize = mdbP->drCTClpSiz;
3374 else if ( ((alternateMDB->drCTClpSiz % vcb->vcbBlockSize) == 0) && (alternateMDB->drCTClpSiz <= maxClump) )
3375 vcb->vcbCatalogFile->fcbClumpSize = alternateMDB->drCTClpSiz;
3376 else
3377 vcb->vcbCatalogFile->fcbClumpSize = (alternateMDB->drCTExtRec[0].blockCount * vcb->vcbBlockSize);
3378
3379 // just copy Finder info for now
3380 CopyMemory(mdbP->drFndrInfo, vcb->vcbFinderInfo, sizeof(mdbP->drFndrInfo));
3381
3382 // now compare verified MDB info with MDB info on disk
3383 result = CmpMDB( GPtr, mdbP);
3384 }
3385
3386 exit:
3387 if (priBlock.buffer)
3388 (void) ReleaseVolumeBlock(vcb, &priBlock, kReleaseBlock);
3389 if (altBlock.buffer)
3390 (void) ReleaseVolumeBlock(vcb, &altBlock, kReleaseBlock);
3391
3392 return (result);
3393
3394 } /* end of VInfoChk */
3395
3396
3397 /*------------------------------------------------------------------------------
3398
3399 Function: VLockedChk - (Volume Name Locked Check)
3400
3401 Function: Makes sure the volume name isn't locked. If it is locked, generate a repair order.
3402
3403 This function is not called if file sharing is operating.
3404
3405 Input: GPtr - pointer to scavenger global area
3406
3407 Output: VInfoChk - function result:
3408 0 = no error
3409 n = error code
3410 ------------------------------------------------------------------------------*/
3411
3412 OSErr VLockedChk( SGlobPtr GPtr )
3413 {
3414 UInt32 hint;
3415 CatalogKey foundKey;
3416 CatalogRecord record;
3417 UInt16 recSize;
3418 OSErr result;
3419 UInt16 frFlags;
3420 Boolean isHFSPlus;
3421 SVCB *calculatedVCB = GPtr->calculatedVCB;
3422 VolumeObjectPtr myVOPtr;
3423
3424 myVOPtr = GetVolumeObjectPtr( );
3425 isHFSPlus = VolumeObjectIsHFSPlus( );
3426 GPtr->TarID = kHFSCatalogFileID; /* target = catalog file */
3427 GPtr->TarBlock = 0; /* no target block yet */
3428
3429 //
3430 // locate the catalog record for the root directory
3431 //
3432 result = GetBTreeRecord( GPtr->calculatedCatalogFCB, 0x8001, &foundKey, &record, &recSize, &hint );
3433
3434 if ( result)
3435 {
3436 RcdError( GPtr, E_EntryNotFound );
3437 return( E_EntryNotFound );
3438 }
3439
3440 // put the volume name in the VCB
3441 if ( isHFSPlus == false )
3442 {
3443 CopyMemory( foundKey.hfs.nodeName, calculatedVCB->vcbVN, sizeof(calculatedVCB->vcbVN) );
3444 }
3445 else if ( myVOPtr->volumeType != kPureHFSPlusVolumeType )
3446 {
3447 HFSMasterDirectoryBlock *mdbP;
3448 BlockDescriptor block;
3449
3450 block.buffer = NULL;
3451 if ( (myVOPtr->flags & kVO_PriMDBOK) != 0 )
3452 result = GetVolumeObjectPrimaryMDB( &block );
3453 else
3454 result = GetVolumeObjectAlternateMDB( &block );
3455 if ( result == noErr ) {
3456 mdbP = (HFSMasterDirectoryBlock *) block.buffer;
3457 CopyMemory( mdbP->drVN, calculatedVCB->vcbVN, sizeof(mdbP->drVN) );
3458 }
3459 if ( block.buffer != NULL )
3460 (void) ReleaseVolumeBlock(calculatedVCB, &block, kReleaseBlock );
3461 ReturnIfError(result);
3462 }
3463 else // Because we don't have the unicode converters, just fill it with a dummy name.
3464 {
3465 CopyMemory( "\x0dPure HFS Plus", calculatedVCB->vcbVN, sizeof(Str27) );
3466 }
3467
3468 GPtr->TarBlock = hint;
3469 if ( isHFSPlus )
3470 CopyCatalogName( (const CatalogName *)&foundKey.hfsPlus.nodeName, &GPtr->CName, isHFSPlus );
3471 else
3472 CopyCatalogName( (const CatalogName *)&foundKey.hfs.nodeName, &GPtr->CName, isHFSPlus );
3473
3474 if ( (record.recordType == kHFSPlusFolderRecord) || (record.recordType == kHFSFolderRecord) )
3475 {
3476 frFlags = record.recordType == kHFSPlusFolderRecord ?
3477 record.hfsPlusFolder.userInfo.frFlags :
3478 record.hfsFolder.userInfo.frFlags;
3479
3480 if ( frFlags & fNameLocked ) // name locked bit set?
3481 RcdNameLockedErr( GPtr, E_LockedDirName, frFlags );
3482 }
3483
3484 return( noErr );
3485 }
3486
3487
3488 /*------------------------------------------------------------------------------
3489
3490 Name: RcdNameLockedErr
3491
3492 Function: Allocates a RepairOrder node and linkg it into the 'GPtr->RepairP'
3493 list, to describe an locked volume name for possible repair.
3494
3495 Input: GPtr - ptr to scavenger global data
3496 type - error code (E_xxx), which should be >0
3497 incorrect - the incorrect file flags as found in file record
3498
3499 Output: 0 - no error
3500 R_NoMem - not enough mem to allocate record
3501 ------------------------------------------------------------------------------*/
3502
3503 static int RcdNameLockedErr( SGlobPtr GPtr, SInt16 type, UInt32 incorrect ) /* for a consistency check */
3504 {
3505 RepairOrderPtr p; /* the new node we compile */
3506 int n; /* size of node we allocate */
3507 Boolean isHFSPlus;
3508
3509 isHFSPlus = VolumeObjectIsHFSPlus( );
3510 RcdError( GPtr, type ); /* first, record the error */
3511
3512 n = CatalogNameSize( &GPtr->CName, isHFSPlus );
3513
3514 p = AllocMinorRepairOrder( GPtr, n ); /* get the node */
3515 if ( p==NULL ) /* quit if out of room */
3516 return ( R_NoMem );
3517
3518 CopyCatalogName( (const CatalogName *) &GPtr->CName, (CatalogName*)&p->name, isHFSPlus );
3519
3520 p->type = type; /* save error info */
3521 p->correct = incorrect & ~fNameLocked; /* mask off the name locked bit */
3522 p->incorrect = incorrect;
3523 p->maskBit = (UInt16)fNameLocked;
3524 p->parid = 1;
3525
3526 GPtr->CatStat |= S_LockedDirName; /* set flag to trigger repair */
3527
3528 return( noErr ); /* successful return */
3529 }
3530
3531 /*------------------------------------------------------------------------------
3532
3533 Name: RecordBadExtent
3534
3535 Function: Allocates a RepairOrder for repairing bad extent.
3536
3537 Input: GPtr - ptr to scavenger global data
3538 fileID - fileID of the file with bad extent
3539 forkType - bad extent's fork type
3540 startBlock - start block of the bad extent record
3541 badExtentIndex - index of bad extent entry in the extent record
3542
3543 Output: 0 - no error
3544 R_NoMem - not enough mem to allocate record
3545 ------------------------------------------------------------------------------*/
3546
3547 static int RecordBadExtent(SGlobPtr GPtr, UInt32 fileID, UInt8 forkType,
3548 UInt32 startBlock, UInt32 badExtentIndex)
3549 {
3550 RepairOrderPtr p;
3551 Boolean isHFSPlus;
3552
3553 isHFSPlus = VolumeObjectIsHFSPlus();
3554
3555 p = AllocMinorRepairOrder(GPtr, 0);
3556 if (p == NULL) {
3557 return(R_NoMem);
3558 }
3559
3560 p->type = E_ExtEnt;
3561 p->forkType = forkType;
3562 p->correct = badExtentIndex;
3563 p->hint = startBlock;
3564 p->parid = fileID;
3565
3566 GPtr->CatStat |= S_BadExtent;
3567 return (0);
3568 }
3569
3570 /*
3571 * Build a catalog node thread key.
3572 */
3573 __unused static void
3574 buildthreadkey(UInt32 parentID, int std_hfs, CatalogKey *key)
3575 {
3576 if (std_hfs) {
3577 key->hfs.keyLength = kHFSCatalogKeyMinimumLength;
3578 key->hfs.reserved = 0;
3579 key->hfs.parentID = parentID;
3580 key->hfs.nodeName[0] = 0;
3581 } else {
3582 key->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength;
3583 key->hfsPlus.parentID = parentID;
3584 key->hfsPlus.nodeName.length = 0;
3585 }
3586 }
3587
3588
3589 static void
3590 printpath(SGlobPtr GPtr, UInt32 fileID)
3591 {
3592 int result;
3593 char path[PATH_MAX * 4];
3594 unsigned int pathlen = PATH_MAX * 4;
3595
3596 if (fileID < kHFSFirstUserCatalogNodeID) {
3597 switch(fileID) {
3598 case kHFSExtentsFileID:
3599 printf("$Extents_Overflow_File\n");
3600 return;
3601 case kHFSCatalogFileID:
3602 printf("$Catalog_File\n");
3603 return;
3604 case kHFSAllocationFileID:
3605 printf("$Allocation_Bitmap_File\n");
3606 return;
3607 case kHFSAttributesFileID:
3608 printf("$Attributes_File\n");
3609 return;
3610 default:
3611 printf("$File_ID_%d\n", fileID);
3612 return;
3613 }
3614 }
3615
3616 result = GetFileNamePathByID(GPtr, fileID, path, &pathlen, NULL, NULL, NULL);
3617 if (result) {
3618 printf ("error %d getting path for id=%u\n", result, fileID);
3619 }
3620
3621 printf("\"ROOT_OF_VOLUME%s\" (file id=%u)\n", path, fileID);
3622 }
3623
3624 void
3625 CheckPhysicalMatch(SVCB *vcb, UInt32 startblk, UInt32 blkcount, UInt32 fileNumber, UInt8 forkType)
3626 {
3627 int i;
3628 u_int64_t blk, blk1, blk2;
3629 u_int64_t offset;
3630
3631 offset = (u_int64_t) startblk * (u_int64_t) vcb->vcbBlockSize;
3632
3633 if (vcb->vcbSignature == kHFSPlusSigWord)
3634 offset += vcb->vcbEmbeddedOffset; // offset into the wrapper
3635 else
3636 offset += vcb->vcbAlBlSt * 512ULL; // offset to start of volume
3637
3638 blk1 = offset / gBlockSize;
3639 blk2 = blk1 + ((blkcount * vcb->vcbBlockSize) / gBlockSize);
3640
3641 for (i = 0; i < gBlkListEntries; ++i) {
3642 blk = gBlockList[i];
3643
3644 if (blk >= blk1 && blk < blk2) {
3645 // printf("block %d is in file %d\n", blk, fileNumber);
3646 /* Do we need to grow the found blocks list? */
3647 if (gFoundBlockEntries % FOUND_BLOCKS_QUANTUM == 0) {
3648 struct found_blocks *new_blocks;
3649 new_blocks = realloc(gFoundBlocksList, (gFoundBlockEntries + FOUND_BLOCKS_QUANTUM) * sizeof(struct found_blocks));
3650 if (new_blocks == NULL) {
3651 fprintf(stderr, "CheckPhysicalMatch: Out of memory!\n");
3652 return;
3653 }
3654 gFoundBlocksList = new_blocks;
3655 }
3656 gFoundBlocksList[gFoundBlockEntries].block = blk;
3657 gFoundBlocksList[gFoundBlockEntries].fileID = fileNumber;
3658 ++gFoundBlockEntries;
3659 }
3660 }
3661 }
3662
3663 static int compare_found_blocks(const void *x1_arg, const void *x2_arg)
3664 {
3665 const struct found_blocks *x1 = x1_arg;
3666 const struct found_blocks *x2 = x2_arg;
3667
3668 if (x1->block < x2->block)
3669 return -1;
3670 else if (x1->block > x2->block)
3671 return 1;
3672 else {
3673 if (x1->fileID < x2->fileID)
3674 return -1;
3675 else if (x1->fileID > x2->fileID)
3676 return 1;
3677 }
3678
3679 return 0;
3680 }
3681
3682 void
3683 dumpblocklist(SGlobPtr GPtr)
3684 {
3685 int i, j;
3686 u_int64_t block;
3687
3688 /* Sort the found blocks */
3689 qsort(gFoundBlocksList, gFoundBlockEntries, sizeof(struct found_blocks), compare_found_blocks);
3690
3691 /*
3692 * Print out the blocks with matching files. In the case of overlapped
3693 * extents, the same block number will be printed multiple times, with
3694 * each file containing an overlapping extent. If overlapping extents
3695 * come from the same file, then that path will be printed multiple times.
3696 */
3697 for (i = 0; i < gFoundBlockEntries; ++i) {
3698 block = gFoundBlocksList[i].block;
3699
3700 printf("block %llu:\t", (unsigned long long) block);
3701 printpath(GPtr, gFoundBlocksList[i].fileID);
3702
3703 /* Remove block from the gBlockList */
3704 for (j = 0; j < gBlkListEntries; ++j) {
3705 if (gBlockList[j] == block) {
3706 gBlockList[j] = gBlockList[--gBlkListEntries];
3707 break;
3708 }
3709 }
3710 }
3711
3712 /* Print out the blocks without matching files */
3713 for (j = 0; j < gBlkListEntries; ++j) {
3714 printf("block %llu:\t*** NO MATCH ***\n", (unsigned long long) gBlockList[j]);
3715 }
3716 }
3717
3718 /*------------------------------------------------------------------------------
3719
3720 Function: CheckFileExtents - (Check File Extents)
3721
3722 Description:
3723 Verifies the extent info for a file data or extented attribute data. It
3724 checks the correctness of extent data. If the extent information is
3725 correct/valid, it updates in-memory volume bitmap, total number of valid
3726 blocks for given file, and if overlapping extents exist, adds them to
3727 the overlap extents list. If the extent information is not correct, it
3728 considers the file truncated beyond the bad extent entry and reports
3729 only the total number of good blocks seen. Therefore the caller detects
3730 adds the extent information to repair order. It does not include the
3731 invalid extent and any extents after it for checking volume bitmap and
3732 hence overlapping extents. Note that currently the function
3733 returns error if invalid extent is found for system files or for
3734 extended attributes.
3735
3736 For data fork and resource fork of file - This function checks extent
3737 record present in catalog record as well as extent overflow records, if
3738 any, for given fileID.
3739
3740 For extended attribute data - This function only checks the extent record
3741 passed as parameter. If any extended attribute has overflow extents in
3742 the attribute btree, this function does not look them up. It is the left
3743 to the caller to check remaining extents for given file's extended attribute.
3744
3745 Input:
3746 GPtr - pointer to scavenger global area
3747 fileNumber - file number for fork/extended attribute
3748 forkType - fork type
3749 00 - kDataFork - data fork
3750 01 - kEAData - extended attribute data extent
3751 ff - kRsrcFork - resource fork
3752 attrname - if fork type is kEAData, attrname contains pointer to the
3753 name of extended attribute whose extent is being checked; else
3754 it should be NULL. Note that the function assumes that this is
3755 NULL-terminated string.
3756 extents - ptr to 1st extent record for the file
3757
3758 Output:
3759 CheckFileExtents - function result:
3760 noErr = no error
3761 n = error code
3762 blocksUsed - number of allocation blocks allocated to the file
3763 ------------------------------------------------------------------------------*/
3764
3765 OSErr CheckFileExtents( SGlobPtr GPtr, UInt32 fileNumber, UInt8 forkType,
3766 const unsigned char *attrname, const void *extents,
3767 UInt32 *blocksUsed)
3768 {
3769 UInt32 blockCount = 0;
3770 UInt32 extentBlockCount;
3771 UInt32 extentStartBlock;
3772 UInt32 hint;
3773 HFSPlusExtentKey key;
3774 HFSPlusExtentKey extentKey;
3775 HFSPlusExtentRecord extentRecord;
3776 UInt16 recSize;
3777 OSErr err = noErr;
3778 SInt16 i;
3779 Boolean firstRecord;
3780 Boolean isHFSPlus;
3781 unsigned int lastExtentIndex;
3782 Boolean foundBadExtent;
3783
3784 /* For all extended attribute extents, the attrname should not be NULL */
3785 if (forkType == kEAData) {
3786 assert(attrname != NULL);
3787 }
3788
3789 isHFSPlus = VolumeObjectIsHFSPlus( );
3790 firstRecord = true;
3791 foundBadExtent = false;
3792 lastExtentIndex = GPtr->numExtents;
3793
3794 while ( (extents != nil) && (err == noErr) )
3795 {
3796 // checkout the extent record first
3797 err = ChkExtRec( GPtr, fileNumber, extents, &lastExtentIndex );
3798 if (err != noErr) {
3799 DPRINTF (d_info, "%s: Bad extent for fileID %u in extent %u for startblock %u\n", __FUNCTION__, fileNumber, lastExtentIndex, blockCount);
3800 if (cur_debug_level & d_dump_record)
3801 {
3802 plog("Extents:\n");
3803 HexDump(extents, sizeof(HFSPlusExtentRecord), FALSE);
3804 plog("\n");
3805 }
3806
3807 /* Stop verification if bad extent is found for system file or EA */
3808 if ((fileNumber < kHFSFirstUserCatalogNodeID) ||
3809 (forkType == kEAData)) {
3810 break;
3811 }
3812
3813 /* store information about bad extent in repair order */
3814 (void) RecordBadExtent(GPtr, fileNumber, forkType, blockCount, lastExtentIndex);
3815 foundBadExtent = true;
3816 err = noErr;
3817 }
3818
3819 /* Check only till the last valid extent entry reported by ChkExtRec */
3820 for ( i=0 ; i<lastExtentIndex ; i++ ) // now checkout the extents
3821 {
3822 // HFS+/HFS moving extent fields into local variables for evaluation
3823 if ( isHFSPlus == true )
3824 {
3825 extentBlockCount = ((HFSPlusExtentDescriptor *)extents)[i].blockCount;
3826 extentStartBlock = ((HFSPlusExtentDescriptor *)extents)[i].startBlock;
3827 }
3828 else
3829 {
3830 extentBlockCount = ((HFSExtentDescriptor *)extents)[i].blockCount;
3831 extentStartBlock = ((HFSExtentDescriptor *)extents)[i].startBlock;
3832 }
3833
3834 if ( extentBlockCount == 0 )
3835 break;
3836
3837 if (gBlkListEntries != 0)
3838 CheckPhysicalMatch(GPtr->calculatedVCB, extentStartBlock, extentBlockCount, fileNumber, forkType);
3839
3840 err = CaptureBitmapBits(extentStartBlock, extentBlockCount);
3841 if (err == E_OvlExt) {
3842 err = AddExtentToOverlapList(GPtr, fileNumber, (char *)attrname, extentStartBlock, extentBlockCount, forkType);
3843 }
3844
3845 blockCount += extentBlockCount;
3846 }
3847
3848 if ( fileNumber == kHFSExtentsFileID ) // Extents file has no overflow extents
3849 break;
3850
3851 /* Found bad extent for this file, do not find any extents after
3852 * current extent. We assume that the file is truncated at the
3853 * bad extent entry
3854 */
3855 if (foundBadExtent == true) {
3856 break;
3857 }
3858
3859 /* For extended attributes, only check the extent passed as parameter. The
3860 * caller will take care of checking other extents, if any, for given
3861 * extended attribute.
3862 */
3863 if (forkType == kEAData) {
3864 break;
3865 }
3866
3867 if ( firstRecord == true )
3868 {
3869 firstRecord = false;
3870
3871 // Set up the extent key
3872 BuildExtentKey( isHFSPlus, forkType, fileNumber, blockCount, (void *)&key );
3873
3874 err = SearchBTreeRecord( GPtr->calculatedExtentsFCB, &key, kNoHint, (void *) &extentKey, (void *) &extentRecord, &recSize, &hint );
3875
3876 if ( err == btNotFound )
3877 {
3878 err = noErr; // no more extent records
3879 extents = nil;
3880 break;
3881 }
3882 else if ( err != noErr )
3883 {
3884 err = IntError( GPtr, err ); // error from SearchBTreeRecord
3885 return( err );
3886 }
3887 }
3888 else
3889 {
3890 err = GetBTreeRecord( GPtr->calculatedExtentsFCB, 1, &extentKey, extentRecord, &recSize, &hint );
3891
3892 if ( err == btNotFound )
3893 {
3894 err = noErr; // no more extent records
3895 extents = nil;
3896 break;
3897 }
3898 else if ( err != noErr )
3899 {
3900 err = IntError( GPtr, err ); /* error from BTGetRecord */
3901 return( err );
3902 }
3903
3904 // Check same file and fork
3905 if ( isHFSPlus )
3906 {
3907 if ( (extentKey.fileID != fileNumber) || (extentKey.forkType != forkType) )
3908 break;
3909 }
3910 else
3911 {
3912 if ( (((HFSExtentKey *) &extentKey)->fileID != fileNumber) || (((HFSExtentKey *) &extentKey)->forkType != forkType) )
3913 break;
3914 }
3915 }
3916
3917 extents = (void *) &extentRecord;
3918 }
3919
3920 *blocksUsed = blockCount;
3921
3922 return( err );
3923 }
3924
3925
3926 void BuildExtentKey( Boolean isHFSPlus, UInt8 forkType, HFSCatalogNodeID fileNumber, UInt32 blockNumber, void * key )
3927 {
3928 if ( isHFSPlus )
3929 {
3930 HFSPlusExtentKey *hfsPlusKey = (HFSPlusExtentKey*) key;
3931
3932 hfsPlusKey->keyLength = kHFSPlusExtentKeyMaximumLength;
3933 hfsPlusKey->forkType = forkType;
3934 hfsPlusKey->pad = 0;
3935 hfsPlusKey->fileID = fileNumber;
3936 hfsPlusKey->startBlock = blockNumber;
3937 }
3938 else
3939 {
3940 HFSExtentKey *hfsKey = (HFSExtentKey*) key;
3941
3942 hfsKey->keyLength = kHFSExtentKeyMaximumLength;
3943 hfsKey->forkType = forkType;
3944 hfsKey->fileID = fileNumber;
3945 hfsKey->startBlock = (UInt16) blockNumber;
3946 }
3947 }
3948
3949
3950
3951 //
3952 // Adds this extent to our OverlappedExtentList for later repair.
3953 //
3954 static OSErr AddExtentToOverlapList( SGlobPtr GPtr, HFSCatalogNodeID fileNumber, const char *attrname, UInt32 extentStartBlock, UInt32 extentBlockCount, UInt8 forkType )
3955 {
3956 size_t newHandleSize;
3957 ExtentInfo extentInfo;
3958 ExtentsTable **extentsTableH;
3959 size_t attrlen;
3960
3961 ClearMemory(&extentInfo, sizeof(extentInfo));
3962 extentInfo.fileID = fileNumber;
3963 extentInfo.startBlock = extentStartBlock;
3964 extentInfo.blockCount = extentBlockCount;
3965 extentInfo.forkType = forkType;
3966 /* store the name of extended attribute */
3967 if (forkType == kEAData) {
3968 assert(attrname != NULL);
3969
3970 attrlen = strlen(attrname) + 1;
3971 extentInfo.attrname = malloc(attrlen);
3972 if (extentInfo.attrname == NULL) {
3973 return(memFullErr);
3974 }
3975 strlcpy(extentInfo.attrname, attrname, attrlen);
3976 }
3977
3978 // If it's uninitialized
3979 if ( GPtr->overlappedExtents == nil )
3980 {
3981 GPtr->overlappedExtents = (ExtentsTable **) NewHandleClear( sizeof(ExtentsTable) );
3982 extentsTableH = GPtr->overlappedExtents;
3983 }
3984 else
3985 {
3986 extentsTableH = GPtr->overlappedExtents;
3987
3988 if ( ExtentInfoExists( extentsTableH, &extentInfo) == true )
3989 return( noErr );
3990
3991 // Grow the Extents table for a new entry.
3992 newHandleSize = ( sizeof(ExtentInfo) ) + ( GetHandleSize( (Handle)extentsTableH ) );
3993 SetHandleSize( (Handle)extentsTableH, newHandleSize );
3994 }
3995
3996 // Copy the new extents into the end of the table
3997 CopyMemory( &extentInfo, &((**extentsTableH).extentInfo[(**extentsTableH).count]), sizeof(ExtentInfo) );
3998
3999 // Update the overlap extent bit
4000 GPtr->VIStat |= S_OverlappingExtents;
4001
4002 // Update the extent table count
4003 (**extentsTableH).count++;
4004
4005 return( noErr );
4006 }
4007
4008
4009 /* Compare if the given extentInfo exsists in the extents table */
4010 static Boolean ExtentInfoExists( ExtentsTable **extentsTableH, ExtentInfo *extentInfo)
4011 {
4012 UInt32 i;
4013 ExtentInfo *aryExtentInfo;
4014
4015
4016 for ( i = 0 ; i < (**extentsTableH).count ; i++ )
4017 {
4018 aryExtentInfo = &((**extentsTableH).extentInfo[i]);
4019
4020 if ( extentInfo->fileID == aryExtentInfo->fileID )
4021 {
4022 if ( (extentInfo->startBlock == aryExtentInfo->startBlock) &&
4023 (extentInfo->blockCount == aryExtentInfo->blockCount) &&
4024 (extentInfo->forkType == aryExtentInfo->forkType) )
4025 {
4026 /* startBlock, blockCount, forkType are same.
4027 * Compare the extended attribute names, if they exist.
4028 */
4029
4030 /* If no attribute name exists, the two extents are same */
4031 if ((extentInfo->attrname == NULL) &&
4032 (aryExtentInfo->attrname == NULL)) {
4033 return(true);
4034 }
4035
4036 /* If only one attribute name exists, the two extents are not same */
4037 if (((extentInfo->attrname != NULL) && (aryExtentInfo->attrname == NULL)) ||
4038 ((extentInfo->attrname == NULL) && (aryExtentInfo->attrname != NULL))) {
4039 return(false);
4040 }
4041
4042 /* Both attribute name exist. Compare the names */
4043 if (!strcmp(extentInfo->attrname, aryExtentInfo->attrname)) {
4044 return (true);
4045 } else {
4046 return (false);
4047 }
4048
4049 }
4050 }
4051 }
4052
4053 return( false );
4054 }
4055
4056 /* Function : DoesOverlap
4057 *
4058 * Description:
4059 * This function takes a start block and the count of blocks in a
4060 * given extent and compares it against the list of overlapped
4061 * extents in the global structure.
4062 * This is useful in finding the original files that overlap with
4063 * the files found in catalog btree check. If a file is found
4064 * overlapping, it is added to the overlap list.
4065 *
4066 * Input:
4067 * 1. GPtr - global scavenger pointer.
4068 * 2. fileID - file ID being checked.
4069 * 3. attrname - name of extended attribute being checked, should be NULL for regular files
4070 * 4. startBlock - start block in extent.
4071 * 5. blockCount - total number of blocks in extent.
4072 * 6. forkType - type of fork being check (kDataFork, kRsrcFork, kEAData).
4073 *
4074 * Output: isOverlapped - Boolean value of true or false.
4075 */
4076 static Boolean DoesOverlap(SGlobPtr GPtr, UInt32 fileID, const char *attrname, UInt32 startBlock, UInt32 blockCount, UInt8 forkType)
4077 {
4078 int i;
4079 Boolean isOverlapped = false;
4080 ExtentInfo *curExtentInfo;
4081 ExtentsTable **extentsTableH = GPtr->overlappedExtents;
4082
4083 for (i = 0; i < (**extentsTableH).count; i++) {
4084 curExtentInfo = &((**extentsTableH).extentInfo[i]);
4085 /* Check extents */
4086 if (curExtentInfo->startBlock < startBlock) {
4087 if ((curExtentInfo->startBlock + curExtentInfo->blockCount) > startBlock) {
4088 isOverlapped = true;
4089 break;
4090 }
4091 } else { /* curExtentInfo->startBlock >= startBlock */
4092 if (curExtentInfo->startBlock < (startBlock + blockCount)) {
4093 isOverlapped = true;
4094 break;
4095 }
4096 }
4097 } /* for loop Extents Table */
4098
4099 /* Add this extent to overlap list */
4100 if (isOverlapped) {
4101 AddExtentToOverlapList(GPtr, fileID, attrname, startBlock, blockCount, forkType);
4102 }
4103
4104 return isOverlapped;
4105 } /* DoesOverlap */
4106
4107 /* Function : CheckHFSPlusExtentRecords
4108 *
4109 * Description:
4110 * For all valid extents, this function calls DoesOverlap to find
4111 * if a given extent is overlapping with another extent existing
4112 * in the overlap list.
4113 *
4114 * Input:
4115 * 1. GPtr - global scavenger pointer.
4116 * 2. fileID - file ID being checked.
4117 * 3. attrname - name of extended attribute being checked, should be NULL for regular files
4118 * 4. extent - extent information to check.
4119 * 5. forkType - type of fork being check (kDataFork, kRsrcFork, kEAData).
4120 *
4121 * Output: None.
4122 */
4123 static void CheckHFSPlusExtentRecords(SGlobPtr GPtr, UInt32 fileID, const char *attrname, HFSPlusExtentRecord extent, UInt8 forkType)
4124 {
4125 int i;
4126
4127 /* Check for overlapping extents for all extents in given extent data */
4128 for (i = 0; i < kHFSPlusExtentDensity; i++) {
4129 if (extent[i].startBlock == 0) {
4130 break;
4131 }
4132 DoesOverlap(GPtr, fileID, attrname, extent[i].startBlock, extent[i].blockCount, forkType);
4133 }
4134 return;
4135 } /* CheckHFSPlusExtentRecords */
4136
4137 /* Function : CheckHFSExtentRecords
4138 *
4139 * Description:
4140 * For all valid extents, this function calls DoesOverlap to find
4141 * if a given extent is overlapping with another extent existing
4142 * in the overlap list.
4143 *
4144 * Input:
4145 * 1. GPtr - global scavenger pointer.
4146 * 2. fileID - file ID being checked.
4147 * 3. extent - extent information to check.
4148 * 4. forkType - type of fork being check (kDataFork, kRsrcFork).
4149 *
4150 * Output: None.
4151 */
4152 static void CheckHFSExtentRecords(SGlobPtr GPtr, UInt32 fileID, HFSExtentRecord extent, UInt8 forkType)
4153 {
4154 int i;
4155
4156 /* Check for overlapping extents for all extents in given extents */
4157 for (i = 0; i < kHFSExtentDensity; i++) {
4158 if (extent[i].startBlock == 0) {
4159 break;
4160 }
4161 DoesOverlap(GPtr, fileID, NULL, extent[i].startBlock, extent[i].blockCount, forkType);
4162 }
4163 return;
4164 } /* CheckHFSExtentRecords */
4165
4166 /* Function: FindOrigOverlapFiles
4167 *
4168 * Description:
4169 * This function is called only if btree check results in
4170 * overlapped extents errors. The btree checks do not find
4171 * out the original files whose extents are overlapping with one
4172 * being reported in its check. This function finds out all the
4173 * original files whose that are being overlapped.
4174 *
4175 * This function relies on comparison of extents with Overlap list
4176 * created in verify stage. The list is also updated with the
4177 * overlapped extents found in this function.
4178 *
4179 * 1. Compare extents for all the files located in volume header.
4180 * 2. Traverse catalog btree and compare extents of all files.
4181 * 3. Traverse extents btree and compare extents for all entries.
4182 *
4183 * Input: GPtr - pointer to global scanvenger area.
4184 *
4185 * Output: err - function result
4186 * zero means success
4187 * non-zero means failure
4188 */
4189 int FindOrigOverlapFiles(SGlobPtr GPtr)
4190 {
4191 OSErr err = noErr;
4192 Boolean isHFSPlus;
4193
4194 UInt16 selCode; /* select access pattern for BTree */
4195 UInt16 recordSize;
4196 UInt32 hint;
4197
4198 CatalogRecord catRecord;
4199 CatalogKey catKey;
4200
4201 ExtentRecord extentRecord;
4202 ExtentKey extentKey;
4203
4204 HFSPlusAttrRecord attrRecord;
4205 HFSPlusAttrKey attrKey;
4206 char attrName[XATTR_MAXNAMELEN];
4207 size_t len;
4208
4209 SVCB *calculatedVCB = GPtr->calculatedVCB;
4210
4211 isHFSPlus = VolumeObjectIsHFSPlus();
4212
4213 /* Check file extents from volume header */
4214 if (isHFSPlus) {
4215 /* allocation file */
4216 if (calculatedVCB->vcbAllocationFile) {
4217 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbAllocationFile->fcbFileID, NULL,
4218 calculatedVCB->vcbAllocationFile->fcbExtents32, kDataFork);
4219 }
4220
4221 /* extents file */
4222 if (calculatedVCB->vcbExtentsFile) {
4223 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbExtentsFile->fcbFileID, NULL,
4224 calculatedVCB->vcbExtentsFile->fcbExtents32, kDataFork);
4225 }
4226
4227 /* catalog file */
4228 if (calculatedVCB->vcbCatalogFile) {
4229 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbCatalogFile->fcbFileID, NULL,
4230 calculatedVCB->vcbCatalogFile->fcbExtents32, kDataFork);
4231 }
4232
4233 /* attributes file */
4234 if (calculatedVCB->vcbAttributesFile) {
4235 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbAttributesFile->fcbFileID, NULL,
4236 calculatedVCB->vcbAttributesFile->fcbExtents32, kDataFork);
4237 }
4238
4239 /* startup file */
4240 if (calculatedVCB->vcbStartupFile) {
4241 CheckHFSPlusExtentRecords(GPtr, calculatedVCB->vcbStartupFile->fcbFileID, NULL,
4242 calculatedVCB->vcbStartupFile->fcbExtents32, kDataFork);
4243 }
4244 } else {
4245 /* extents file */
4246 if (calculatedVCB->vcbExtentsFile) {
4247 CheckHFSExtentRecords(GPtr, calculatedVCB->vcbExtentsFile->fcbFileID,
4248 calculatedVCB->vcbExtentsFile->fcbExtents16, kDataFork);
4249 }
4250
4251 /* catalog file */
4252 if (calculatedVCB->vcbCatalogFile) {
4253 CheckHFSExtentRecords(GPtr, calculatedVCB->vcbCatalogFile->fcbFileID,
4254 calculatedVCB->vcbCatalogFile->fcbExtents16, kDataFork);
4255 }
4256 }
4257
4258 /* Traverse the catalog btree */
4259 selCode = 0x8001; /* Get first record from BTree */
4260 err = GetBTreeRecord(GPtr->calculatedCatalogFCB, selCode, &catKey, &catRecord, &recordSize, &hint);
4261 if (err != noErr) {
4262 goto traverseExtents;
4263 }
4264 selCode = 1; /* Get next record */
4265 do {
4266 if ((catRecord.recordType == kHFSPlusFileRecord) ||
4267 (catRecord.recordType == kHFSFileRecord)) {
4268
4269 if (isHFSPlus) {
4270 /* HFSPlus data fork */
4271 CheckHFSPlusExtentRecords(GPtr, catRecord.hfsPlusFile.fileID, NULL,
4272 catRecord.hfsPlusFile.dataFork.extents, kDataFork);
4273
4274 /* HFSPlus resource fork */
4275 CheckHFSPlusExtentRecords(GPtr, catRecord.hfsPlusFile.fileID, NULL,
4276 catRecord.hfsPlusFile.resourceFork.extents, kRsrcFork);
4277 } else {
4278 /* HFS data extent */
4279 CheckHFSExtentRecords(GPtr, catRecord.hfsFile.fileID,
4280 catRecord.hfsFile.dataExtents, kDataFork);
4281
4282 /* HFS resource extent */
4283 CheckHFSExtentRecords(GPtr, catRecord.hfsFile.fileID,
4284 catRecord.hfsFile.rsrcExtents, kRsrcFork);
4285 }
4286 }
4287
4288 /* Access the next record */
4289 err = GetBTreeRecord( GPtr->calculatedCatalogFCB, selCode, &catKey, &catRecord, &recordSize, &hint );
4290 } while (err == noErr);
4291
4292 traverseExtents:
4293 /* Traverse the extents btree */
4294 selCode = 0x8001; /* Get first record from BTree */
4295 err = GetBTreeRecord(GPtr->calculatedExtentsFCB, selCode, &extentKey, &extentRecord, &recordSize, &hint);
4296 if (err != noErr) {
4297 goto traverseAttribute;
4298 }
4299 selCode = 1; /* Get next record */
4300 do {
4301 if (isHFSPlus) {
4302 CheckHFSPlusExtentRecords(GPtr, extentKey.hfsPlus.fileID, NULL,
4303 extentRecord.hfsPlus, extentKey.hfsPlus.forkType);
4304 } else {
4305 CheckHFSExtentRecords(GPtr, extentKey.hfs.fileID, extentRecord.hfs,
4306 extentKey.hfs.forkType);
4307 }
4308
4309 /* Access the next record */
4310 err = GetBTreeRecord(GPtr->calculatedExtentsFCB, selCode, &extentKey, &extentRecord, &recordSize, &hint);
4311 } while (err == noErr);
4312
4313 traverseAttribute:
4314 /* Extended attributes are only supported in HFS Plus */
4315 if (!isHFSPlus) {
4316 goto out;
4317 }
4318
4319 /* Traverse the attribute btree */
4320 selCode = 0x8001; /* Get first record from BTree */
4321 /* Warning: Attribute record of type kHFSPlusAttrInlineData may be
4322 * truncated on read! (4425232). This function only uses recordType
4323 * field from inline attribute record.
4324 */
4325 err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &attrKey, &attrRecord, &recordSize, &hint);
4326 if (err != noErr) {
4327 goto out;
4328 }
4329 selCode = 1; /* Get next record */
4330 do {
4331 if (attrRecord.recordType == kHFSPlusAttrForkData) {
4332 (void) utf_encodestr(attrKey.attrName, attrKey.attrNameLen * 2, (unsigned char *)attrName, &len, sizeof(attrName));
4333 attrName[len] = '\0';
4334
4335 CheckHFSPlusExtentRecords(GPtr, attrKey.fileID, attrName, attrRecord.forkData.theFork.extents, kEAData);
4336 } else if (attrRecord.recordType == kHFSPlusAttrExtents) {
4337 (void) utf_encodestr(attrKey.attrName, attrKey.attrNameLen * 2, (unsigned char *)attrName, &len, sizeof(attrName));
4338 attrName[len] = '\0';
4339
4340 CheckHFSPlusExtentRecords(GPtr, attrKey.fileID, attrName, attrRecord.overflowExtents.extents, kEAData);
4341 }
4342
4343 /* Access the next record
4344 * Warning: Attribute record of type kHFSPlusAttrInlineData may be
4345 * truncated on read! (4425232). This function only uses recordType
4346 * field from inline attribute record.
4347 */
4348 err = GetBTreeRecord(GPtr->calculatedAttributesFCB, selCode, &attrKey, &attrRecord, &recordSize, &hint);
4349 } while (err == noErr);
4350
4351 out:
4352 if (err == btNotFound) {
4353 err = noErr;
4354 }
4355 return err;
4356 } /* FindOrigOverlapFiles */
4357
4358 /* Function: PrintOverlapFiles
4359 *
4360 * Description: Print the information about all unique overlapping files.
4361 * 1. Sort the overlap extent in increasing order of fileID
4362 * 2. For every unique fileID, prefix the string with fileID and find the
4363 * filename/path based on fileID.
4364 * If fileID > kHFSFirstUserCatalogNodeID, find path to file
4365 * Else, find name of the system file.
4366 * 3. Print the new string.
4367 * Note that the path is printed only for HFS Plus volumes and not for
4368 * plain HFS volumes. This is done by not allocating buffer for finding
4369 * file path.
4370 *
4371 * Input:
4372 * GPtr - Global scavenger structure pointer.
4373 *
4374 * Output:
4375 * nothing (void)
4376 */
4377 void PrintOverlapFiles (SGlobPtr GPtr)
4378 {
4379 OSErr err;
4380 ExtentsTable **extentsTableH;
4381 ExtentInfo *extentInfo;
4382 unsigned int numOverlapExtents;
4383 unsigned int buflen, filepathlen;
4384 char *filepath = NULL;
4385 UInt32 lastID = 0;
4386 Boolean printMsg;
4387 Boolean isHFSPlus;
4388 int i;
4389
4390 isHFSPlus = VolumeObjectIsHFSPlus();
4391
4392 extentsTableH = GPtr->overlappedExtents;
4393 numOverlapExtents = (**extentsTableH).count;
4394
4395 /* Sort the list according to file ID */
4396 qsort((**extentsTableH).extentInfo, numOverlapExtents, sizeof(ExtentInfo),
4397 CompareExtentFileID);
4398
4399 buflen = PATH_MAX * 4;
4400 /* Allocate buffer to read data */
4401 if (isHFSPlus) {
4402 filepath = malloc (buflen);
4403 }
4404
4405 for (i = 0; i < numOverlapExtents; i++) {
4406 extentInfo = &((**extentsTableH).extentInfo[i]);
4407
4408 /* Skip the same fileID */
4409 if (lastID == extentInfo->fileID) {
4410 continue;
4411 }
4412
4413 lastID = extentInfo->fileID;
4414 printMsg = false;
4415
4416 if (filepath) {
4417 filepathlen = buflen;
4418 if (extentInfo->fileID >= kHFSFirstUserCatalogNodeID) {
4419 /* Lookup the file path */
4420 err = GetFileNamePathByID (GPtr, extentInfo->fileID, filepath, &filepathlen, NULL, NULL, NULL);
4421 } else {
4422 /* Get system filename */
4423 err = GetSystemFileName (extentInfo->fileID, filepath, &filepathlen);
4424 }
4425
4426 if (err == noErr) {
4427 /* print fileID, filepath */
4428 fsckPrint(GPtr->context, E_OvlExt, extentInfo->fileID, filepath);
4429 printMsg = true;
4430 }
4431
4432 if (fsckGetVerbosity(GPtr->context) >= kDebugLog) {
4433 plog ("\textentType=0x%x, startBlock=0x%x, blockCount=0x%x, attrName=%s\n",
4434 extentInfo->forkType, extentInfo->startBlock, extentInfo->blockCount, extentInfo->attrname);
4435 }
4436 }
4437
4438 if (printMsg == false) {
4439 /* print only fileID */
4440 fsckPrint(GPtr->context, E_OvlExtID, extentInfo->fileID);
4441 }
4442 }
4443
4444 if (filepath) {
4445 free (filepath);
4446 }
4447
4448 return;
4449 } /* PrintOverlapFiles */
4450
4451 /* Function: CompareExtentFileID
4452 *
4453 * Description: Compares the fileID from two ExtentInfo and return the
4454 * comparison result. (since we have to arrange in ascending order)
4455 *
4456 * Input:
4457 * first and second - void pointers to ExtentInfo structure.
4458 *
4459 * Output:
4460 * >0 if first > second
4461 * =0 if first == second
4462 * <0 if first < second
4463 */
4464 static int CompareExtentFileID(const void *first, const void *second)
4465 {
4466 return (((ExtentInfo *)first)->fileID -
4467 ((ExtentInfo *)second)->fileID);
4468 } /* CompareExtentFileID */
4469
4470 /* Function: journal_replay
4471 *
4472 * Description: Replay journal on a journaled HFS+ volume. This function
4473 * returns success if the volume is not journaled or the journal was not
4474 * dirty. If there was any error in replaying the journal, a non-zero value
4475 * is returned.
4476 *
4477 * Output:
4478 * 0 - success, non-zero - failure.
4479 */
4480 //int journal_replay(SGlobPtr gptr)
4481 int journal_replay(const char *block_device)
4482 {
4483 int retval = 0;
4484 struct vfsconf vfc;
4485 int mib[4];
4486 int jfd;
4487
4488 jfd = open(block_device, O_RDWR);
4489 if (jfd == -1) {
4490 retval = errno;
4491 if (debug)
4492 fplog(stderr, "Unable to open block device %s: %s", block_device, strerror(errno));
4493 goto out;
4494 }
4495
4496 retval = getvfsbyname("hfs", &vfc);
4497 if (retval) {
4498 close(jfd);
4499 goto out;
4500 }
4501
4502 mib[0] = CTL_VFS;
4503 mib[1] = vfc.vfc_typenum;
4504 mib[2] = HFS_REPLAY_JOURNAL;
4505 mib[3] = jfd;
4506 retval = sysctl(mib, 4, NULL, NULL, NULL, 0);
4507 if (retval) {
4508 retval = errno;
4509 }
4510 (void)close(jfd);
4511
4512 out:
4513 return retval;
4514 }
4515