2 * Copyright (c) 1999-2015 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
26 Contains: Initialization code for HFS and HFS Plus volumes.
28 Copyright: � 1984-1999 by Apple Computer, Inc., all rights reserved.
32 #include <sys/param.h>
33 #include <sys/types.h>
36 #include <sys/errno.h>
38 #include <sys/sysctl.h>
39 #include <sys/vmmeter.h>
53 * CommonCrypto is meant to be a more stable API than OpenSSL.
54 * Defining COMMON_DIGEST_FOR_OPENSSL gives API-compatibility
55 * with OpenSSL, so we don't have to change the code.
57 #define COMMON_DIGEST_FOR_OPENSSL
58 #include <CommonCrypto/CommonDigest.h>
60 #include <libkern/OSByteOrder.h>
62 #include <CoreFoundation/CFString.h>
63 #include <CoreFoundation/CFStringEncodingExt.h>
64 #include <IOKit/IOKitLib.h>
65 #include <IOKit/storage/IOMedia.h>
67 #include <TargetConditionals.h>
69 extern Boolean
_CFStringGetFileSystemRepresentation(CFStringRef string
, UInt8
*buffer
, CFIndex maxBufLen
);
72 #include <hfs/hfs_format.h>
73 #include <hfs/hfs_mount.h>
74 #include "hfs_endian.h"
76 #include "newfs_hfs.h"
78 #ifndef NEWFS_HFS_DEBUG
80 # define NEWFS_HFS_DEBUG 1
82 # define NEWFS_HFS_DEBUG 0
86 #define HFS_BOOT_DATA "/usr/share/misc/hfsbootdata"
88 #define HFS_JOURNAL_FILE ".journal"
89 #define HFS_JOURNAL_INFO ".journal_info_block"
91 #define kJournalFileType 0x6a726e6c /* 'jrnl' */
94 typedef HFSMasterDirectoryBlock HFS_MDB
;
103 struct ExtentRecord
{
104 HFSPlusExtentKey key
;
105 HFSPlusExtentRecord record
;
106 } __attribute__((aligned(2), packed
));
107 static size_t numOverflowExtents
= 0;
108 static struct ExtentRecord
*overflowExtents
= NULL
;
110 struct filefork gDTDBFork
, gSystemFork
, gReadMeFork
;
112 static void WriteVH
__P((const DriveInfo
*driveInfo
, HFSPlusVolumeHeader
*hp
));
113 static void InitVH
__P((hfsparams_t
*defaults
, UInt64 sectors
,
114 HFSPlusVolumeHeader
*header
));
116 static int AllocateExtent(UInt8
*buffer
, UInt32 startBlock
, UInt32 blockCount
);
117 static int MarkExtentUsed(const DriveInfo
*, HFSPlusVolumeHeader
*, UInt32
, UInt32
);
119 static void WriteExtentsFile
__P((const DriveInfo
*dip
, UInt64 startingSector
,
120 const hfsparams_t
*dp
, HFSExtentDescriptor
*bbextp
, void *buffer
,
121 UInt32
*bytesUsed
, UInt32
*mapNodes
));
123 static void WriteAttributesFile(const DriveInfo
*driveInfo
, UInt64 startingSector
,
124 const hfsparams_t
*dp
, HFSExtentDescriptor
*bbextp
, void *buffer
,
125 UInt32
*bytesUsed
, UInt32
*mapNodes
);
127 static void WriteCatalogFile
__P((const DriveInfo
*dip
, UInt64 startingSector
,
128 const hfsparams_t
*dp
, HFSPlusVolumeHeader
*header
, void *buffer
,
129 UInt32
*bytesUsed
, UInt32
*mapNodes
));
130 static int WriteJournalInfo(const DriveInfo
*driveInfo
, UInt64 startingSector
,
131 const hfsparams_t
*dp
, HFSPlusVolumeHeader
*header
,
133 static void InitCatalogRoot_HFSPlus
__P((const hfsparams_t
*dp
, const HFSPlusVolumeHeader
*header
, void * buffer
));
135 static void WriteMapNodes
__P((const DriveInfo
*driveInfo
, UInt64 diskStart
,
136 UInt32 firstMapNode
, UInt32 mapNodes
, UInt16 btNodeSize
, void *buffer
));
137 static void WriteBuffer
__P((const DriveInfo
*driveInfo
, UInt64 startingSector
,
138 UInt64 byteCount
, const void *buffer
));
139 static UInt32 Largest
__P((UInt32 a
, UInt32 b
, UInt32 c
, UInt32 d
));
141 static UInt32
GetDefaultEncoding();
143 static UInt32 UTCToLocal
__P((UInt32 utcTime
));
145 static int ConvertUTF8toUnicode
__P((const UInt8
* source
, size_t bufsize
,
146 UniChar
* unibuf
, UInt16
*charcount
));
148 static int getencodinghint(unsigned char *name
);
150 #define VOLUMEUUIDVALUESIZE 2
151 typedef union VolumeUUID
{
152 UInt32 value
[VOLUMEUUIDVALUESIZE
];
158 void GenerateVolumeUUID(VolumeUUID
*newVolumeID
);
160 void SETOFFSET (void *buffer
, UInt16 btNodeSize
, SInt16 recOffset
, SInt16 vecOffset
);
161 #define SETOFFSET(buf,ndsiz,offset,rec) \
162 (*(SInt16 *)((UInt8 *)(buf) + (ndsiz) + (-2 * (rec))) = (SWAP_BE16 (offset)))
164 #define BYTESTOBLKS(bytes,blks) DivideAndRoundUp((bytes),(blks))
166 #define ROUNDUP(x, u) (((x) % (u) == 0) ? (x) : ((x)/(u) + 1) * (u))
168 #if TARGET_OS_EMBEDDED
169 #define ENCODING_TO_BIT(e) \
172 #define ENCODING_TO_BIT(e) \
174 ((e) == kCFStringEncodingMacUkrainian ? 48 : \
175 ((e) == kCFStringEncodingMacFarsi ? 49 : 0)))
180 struct cp_root_xattr
{
184 u_int8_t reserved3
[16];
185 } __attribute__((aligned(2), packed
));
189 * Create a series of (sequential!) extents for the
190 * requested file. It tries to create the requested
191 * number, but may be stymied by the file size, and
192 * the number of minimum blocks.
195 createExtents(HFSPlusForkData
*file
,
201 if (NEWFS_HFS_DEBUG
== 0) {
203 * The common case, for non-debug.
205 file
->extents
[0].startBlock
= startBlock
;
206 file
->extents
[0].blockCount
= file
->totalBlocks
;
208 UInt32 blocksLeft
, blocksTotal
= 0, blockStep
;
212 if (numExtents
== 1) {
213 // The common case, no need to do any math
214 file
->extents
[0].startBlock
= startBlock
;
215 file
->extents
[0].blockCount
= file
->totalBlocks
;
218 if (file
->totalBlocks
< numExtents
)
219 numExtents
= file
->totalBlocks
;
221 blocksLeft
= file
->totalBlocks
;
224 * The intent here is to split the number of blocks into the
225 * requested number of extents. So first we determine how
226 * many blocks should go in each extent -- that's blockStep.
227 * If we have been giving minBlocks, we need to make sure it's
228 * a multiple of that. (In general, the values are going to be
229 * 1 or 2 for minBlocks.)
231 * If there are more requested extents than blocks, the division
232 * works out to zero... so we limit blockStep to minBlocks.
235 blockStep
= blocksLeft
/ numExtents
;
238 * To allow invalid extent lengths, set minBlocks to 1, and
239 * comment out the next two if statements.
241 if ((blockStep
% minBlocks
) != 0)
242 blockStep
= (blockStep
/ minBlocks
) * minBlocks
;
244 blockStep
= minBlocks
;
247 * Now, after that, we may still not have the right number, since
248 * the math may not work out properly. So we can work around that
249 * by making the first extent have all the spares.
251 if ((blockStep
* numExtents
) < blocksLeft
) {
252 // Need to adjust the first one.
253 firstAdjust
= blocksLeft
- (blockStep
* numExtents
);
254 if ((firstAdjust
% minBlocks
) != 0)
255 firstAdjust
= ROUNDUP(firstAdjust
, minBlocks
);
259 * Now, at this point, start handing out blocks to each extent.
260 * First to the 8 extents in the fork descriptor.
262 for (i
= 0; i
< 8 && blocksLeft
> 0; i
++) {
263 int n
= MIN(blockStep
+ firstAdjust
, blocksLeft
);
264 file
->extents
[i
].startBlock
= startBlock
+ blocksTotal
;
265 file
->extents
[i
].blockCount
= n
;
271 * Then, if there are any left, to the overflow extents.
273 while (blocksLeft
> 0) {
274 struct ExtentRecord tmp
;
276 memset(&tmp
, 0, sizeof(tmp
));
277 tmp
.key
.keyLength
= SWAP_BE16(sizeof(HFSPlusExtentKey
) - sizeof(uint16_t));
278 tmp
.key
.forkType
= 0;
279 tmp
.key
.fileID
= SWAP_BE32(fileID
);
280 tmp
.key
.startBlock
= SWAP_BE32(blocksTotal
);
281 for (i
= 0; i
< 8 && blocksLeft
> 0; i
++) {
282 int n
= MIN(blockStep
, blocksLeft
);
283 tmp
.record
[i
].startBlock
= SWAP_BE32(blocksTotal
+ bcount
+ startBlock
);
284 tmp
.record
[i
].blockCount
= SWAP_BE32(n
);
288 blocksTotal
+= bcount
;
289 overflowExtents
= realloc(overflowExtents
, (numOverflowExtents
+1) * sizeof(*overflowExtents
));
290 overflowExtents
[numOverflowExtents
++] = tmp
;
297 * wipefs() in -lutil knows about multiple filesystem formats.
298 * This replaces the code:
299 * WriteBuffer(driveInfo, 0, diskBlocksUsed * kBytesPerSector, NULL);
300 * WriteBuffer(driveInfo, driveInfo->totalSectors - 8, 4 * 1024, NULL);
301 * which was used to erase the beginning and end of the filesystem.
310 err
= wipefs_alloc(fd
, 0/*sectorSize*/, &handle
);
312 err
= wipefs_wipe(handle
);
314 wipefs_free(&handle
);
322 * This routine writes an initial HFS Plus volume structure onto a volume.
323 * It is assumed that the disk has already been formatted and verified.
327 make_hfsplus(const DriveInfo
*driveInfo
, hfsparams_t
*defaults
)
330 UInt32 sectorsPerBlock
;
332 UInt32 sectorsPerNode
;
335 UInt32 endOfAttributes
;
336 UInt32 startOfAllocation
;
338 void *nodeBuffer
= NULL
;
339 HFSPlusVolumeHeader
*header
= NULL
;
342 /* Use wipefs() API to clear old metadata from the device.
343 * This should be done before we start writing anything on the
344 * device as wipefs will internally call ioctl(DKIOCDISCARD) on the
347 (void) dowipefs(driveInfo
->fd
);
349 /* --- Create an HFS Plus header: */
351 header
= (HFSPlusVolumeHeader
*)malloc((size_t)kBytesPerSector
);
355 defaults
->encodingHint
= getencodinghint(defaults
->volumeName
);
357 /* VH Initialized in native byte order */
358 InitVH(defaults
, driveInfo
->totalSectors
, header
);
360 sectorsPerBlock
= header
->blockSize
/ kBytesPerSector
;
363 /*--- ZERO OUT BEGINNING OF DISK: */
365 * Clear out the space to be occupied by the bitmap and B-Trees.
366 * The first chunk is the boot sectors, volume header, allocation bitmap,
367 * journal, Extents B-tree, and Attributes B-tree (if any).
368 * The second chunk is the Catalog B-tree.
371 /* Zero out first 1M (to be safe) for volume header */
372 WriteBuffer(driveInfo
, 0, 1024*1024, NULL
);
374 if (NEWFS_HFS_DEBUG
) {
376 * Mark each file extent as used individually, rather than doing it all at once.
377 * Also zero out the entire file.
381 WriteBuffer(driveInfo, \
382 header->f.extents[0].startBlock * sectorsPerBlock, \
383 header->f.totalBlocks * header->blockSize, \
385 if (MarkExtentUsed(driveInfo, header, header->f.extents[0].startBlock, header->f.totalBlocks) == -1) { \
386 errx(1, #f " extent overlap <%u, %u>", header->f.extents[0].startBlock, header->f.totalBlocks); \
394 /* Zero out from start of allocation file to end of attribute file;
395 * will include allocation bitmap, journal, extents btree, and
398 sector
= header
->allocationFile
.extents
[0].startBlock
* sectorsPerBlock
;
399 endOfAttributes
= header
->attributesFile
.extents
[0].startBlock
+ header
->attributesFile
.totalBlocks
;
400 startOfAllocation
= header
->allocationFile
.extents
[0].startBlock
;
401 bytesToZero
= (UInt64
) (endOfAttributes
- startOfAllocation
+ 1) * header
->blockSize
;
402 WriteBuffer(driveInfo
, sector
, bytesToZero
, NULL
);
404 bytesToZero
= (UInt64
) header
->catalogFile
.totalBlocks
* header
->blockSize
;
405 sector
= header
->catalogFile
.extents
[0].startBlock
* sectorsPerBlock
;
406 WriteBuffer(driveInfo
, sector
, bytesToZero
, NULL
);
409 * Allocate a buffer for the rest of our IO.
410 * Note that in some cases we may need to initialize an EA, so we
411 * need to use the attribute B-Tree node size in this calculation.
414 temp
= Largest( defaults
->catalogNodeSize
* 2,
415 (defaults
->attributesNodeSize
* 2),
417 (header
->catalogFile
.extents
[0].startBlock
+ header
->catalogFile
.totalBlocks
+ 7) / 8 );
419 * If size is not a mutiple of 512, round up to nearest sector
421 if ( (temp
& 0x01FF) != 0 )
422 temp
= (temp
+ kBytesPerSector
) & 0xFFFFFE00;
424 nodeBuffer
= valloc((size_t)temp
);
425 if (nodeBuffer
== NULL
)
430 /*--- WRITE ALLOCATION BITMAP BITS TO DISK: */
433 * XXX - this doesn't work well with using arbitrary extents.
435 * To do this, we need to find the appropriate area in the file, and
436 * pass that in to AllocateExtent, which is just a bitmap manipulation
437 * routine. Then we need to write it out at the right place. Note that
438 * we may have to read it in first, as well, which may mean zeroing out
439 * the entirety of the allocation file first.
442 * New function to mark extent as used.
443 * Function should figure out which block(s) for an extent.
444 * Read it in. Mark the bits used. Return.
445 * For now, it can assume the allocation extents are contiguous, but
446 * should be extensible to not do that.
448 sector
= header
->allocationFile
.extents
[0].startBlock
* sectorsPerBlock
;
449 bzero(nodeBuffer
, temp
);
450 /* Mark volume header as allocated */
451 if (header
->blockSize
== 512) {
452 if (MarkExtentUsed(driveInfo
, header
, 0, 4) == -1) {
453 errx(1, "Overlapped extent at <0, 4> (%d)", __LINE__
);
455 } else if (header
->blockSize
== 1024) {
456 if (MarkExtentUsed(driveInfo
, header
, 0, 2) == -1) {
457 errx(1, "Overlapped extent at <0, 2> (%d)", __LINE__
);
460 if (MarkExtentUsed(driveInfo
, header
, 0, 1) == -1) {
461 errx(1, "Overlapped extent at <0, 1> (%d)", __LINE__
);
464 if (NEWFS_HFS_DEBUG
== 0) {
465 /* Mark area from bitmap to end of attributes as allocated */
466 if (MarkExtentUsed(driveInfo
, header
, startOfAllocation
, (endOfAttributes
- startOfAllocation
)) == -1) {
467 errx(1, "Overlapped extent at <%u, %u> (%d)\n", startOfAllocation
, endOfAttributes
- startOfAllocation
, __LINE__
);
471 /* Mark catalog btree blocks as allocated */
472 if (NEWFS_HFS_DEBUG
) {
473 /* Erase the catalog file first */
474 WriteBuffer(driveInfo
,
475 header
->catalogFile
.extents
[0].startBlock
* sectorsPerBlock
,
476 header
->catalogFile
.totalBlocks
* header
->blockSize
,
479 if (MarkExtentUsed(driveInfo
, header
,
480 header
->catalogFile
.extents
[0].startBlock
,
481 header
->catalogFile
.totalBlocks
) == -1) {
482 errx(1, "Overlapped catalog extent at <%u, %u>\n", header
->catalogFile
.extents
[0].startBlock
, header
->catalogFile
.totalBlocks
);
486 * Write alternate Volume Header bitmap bit to allocations file at
487 * 2nd to last sector on HFS+ volume
489 if (MarkExtentUsed(driveInfo
, header
, header
->totalBlocks
- 1, 1) == -1) {
490 errx(1, "Overlapped extent for header at <%u, %u>\n", header
->totalBlocks
- 1, 1);
494 * If the blockSize is 512 bytes, then the last 1kbyte has to be marked
497 if ( header
->blockSize
== 512 ) {
498 if (MarkExtentUsed(driveInfo
, header
, header
->totalBlocks
- 2, 1) == -1) {
499 errx(1, "Overlapped extent for AVH at <%u, %u>\n", header
->totalBlocks
- 2, 1);
504 /*--- WRITE FILE EXTENTS B-TREE TO DISK: */
506 btNodeSize
= defaults
->extentsNodeSize
;
507 sectorsPerNode
= btNodeSize
/kBytesPerSector
;
509 sector
= header
->extentsFile
.extents
[0].startBlock
* sectorsPerBlock
;
510 WriteExtentsFile(driveInfo
, sector
, defaults
, NULL
, nodeBuffer
, &bytesUsed
, &mapNodes
);
513 WriteMapNodes(driveInfo
, (sector
+ bytesUsed
/kBytesPerSector
),
514 bytesUsed
/btNodeSize
, mapNodes
, btNodeSize
, nodeBuffer
);
519 /*--- WRITE FILE ATTRIBUTES B-TREE TO DISK: */
520 if (defaults
->attributesInitialSize
) {
522 btNodeSize
= defaults
->attributesNodeSize
;
523 sectorsPerNode
= btNodeSize
/kBytesPerSector
;
525 sector
= header
->attributesFile
.extents
[0].startBlock
* sectorsPerBlock
;
526 WriteAttributesFile(driveInfo
, sector
, defaults
, NULL
, nodeBuffer
, &bytesUsed
, &mapNodes
);
528 WriteMapNodes(driveInfo
, (sector
+ bytesUsed
/kBytesPerSector
),
529 bytesUsed
/btNodeSize
, mapNodes
, btNodeSize
, nodeBuffer
);
533 /*--- WRITE CATALOG B-TREE TO DISK: */
535 btNodeSize
= defaults
->catalogNodeSize
;
536 sectorsPerNode
= btNodeSize
/kBytesPerSector
;
538 sector
= header
->catalogFile
.extents
[0].startBlock
* sectorsPerBlock
;
539 WriteCatalogFile(driveInfo
, sector
, defaults
, header
, nodeBuffer
, &bytesUsed
, &mapNodes
);
542 WriteMapNodes(driveInfo
, (sector
+ bytesUsed
/kBytesPerSector
),
543 bytesUsed
/btNodeSize
, mapNodes
, btNodeSize
, nodeBuffer
);
546 /*--- JOURNALING SETUP */
547 if (defaults
->journaledHFS
) {
548 sector
= header
->journalInfoBlock
* sectorsPerBlock
;
549 if (NEWFS_HFS_DEBUG
) {
551 * For debug build, the journal may be located somewhere other
552 * than right after the journalInfoBlock.
554 if (MarkExtentUsed(driveInfo
, header
, header
->journalInfoBlock
, 1) == -1) {
555 errx(1, "Extent overlap for journalInfoBlock <%u, 1>", header
->journalInfoBlock
);
558 if (!defaults
->journalDevice
) {
559 UInt32 jStart
= defaults
->journalBlock
? defaults
->journalBlock
: (header
->journalInfoBlock
+ 1);
560 UInt32 jCount
= (UInt32
)(defaults
->journalSize
/ header
->blockSize
);
561 if (MarkExtentUsed(driveInfo
, header
, jStart
, jCount
) == -1) {
562 errx(1, "Extent overlap for journal <%u, %u>", jStart
, jCount
);
566 if (WriteJournalInfo(driveInfo
, sector
, defaults
, header
, nodeBuffer
) != 0) {
567 err(EINVAL
, "Failed to create the journal");
571 /*--- WRITE VOLUME HEADER TO DISK: */
573 /* write header last in case we fail along the way */
575 /* Writes both copies of the volume header */
576 WriteVH (driveInfo
, header
);
577 /* VH is now big-endian */
588 * Writes the Volume Header (VH) to disk.
590 * The VH is byte-swapped if necessary to big endian. Since this
591 * is always the last operation, there's no point in unswapping it.
594 WriteVH (const DriveInfo
*driveInfo
, HFSPlusVolumeHeader
*hp
)
598 WriteBuffer(driveInfo
, 2, kBytesPerSector
, hp
);
599 WriteBuffer(driveInfo
, driveInfo
->totalSectors
- 2, kBytesPerSector
, hp
);
606 * Initialize a Volume Header record.
609 InitVH(hfsparams_t
*defaults
, UInt64 sectors
, HFSPlusVolumeHeader
*hp
)
615 UInt16 burnedBlocksBeforeVH
= 0;
616 UInt16 burnedBlocksAfterAltVH
= 0;
618 UInt32 allocateBlock
;
619 VolumeUUID newVolumeUUID
;
620 VolumeUUID
* finderInfoUUIDPtr
;
621 UInt64 hotFileBandSize
;
625 * 2 MB is the minimum size for the new behavior with
626 * space after the attr b-tree, and hotfile stuff.
628 #define MINVOLSIZE_WITHSPACE 2097152
630 bzero(hp
, kBytesPerSector
);
632 blockSize
= defaults
->blockSize
;
633 blockCount
= sectors
/ (blockSize
>> kLog2SectorSize
);
636 * HFSPlusVolumeHeader is located at sector 2, so we may need
637 * to invalidate blocks before HFSPlusVolumeHeader.
639 if ( blockSize
== 512 ) {
640 burnedBlocksBeforeVH
= 2; /* 2 before VH */
641 burnedBlocksAfterAltVH
= 1; /* 1 after altVH */
642 } else if ( blockSize
== 1024 ) {
643 burnedBlocksBeforeVH
= 1;
645 nextBlock
= burnedBlocksBeforeVH
+ 1; /* +1 for VH itself */
646 if (defaults
->fsStartBlock
) {
648 printf ("Laying down metadata starting at allocation block=%u (totalBlocks=%u)\n", (unsigned int)defaults
->fsStartBlock
, (unsigned int)blockCount
);
649 nextBlock
+= defaults
->fsStartBlock
; /* lay down file system after this allocation block */
652 bitmapBlocks
= defaults
->allocationClumpSize
/ blockSize
;
654 /* note: add 2 for the Alternate VH, and VH */
655 blocksUsed
= 2 + burnedBlocksBeforeVH
+ burnedBlocksAfterAltVH
+ bitmapBlocks
;
657 if (defaults
->flags
& kMakeCaseSensitive
) {
658 hp
->signature
= kHFSXSigWord
;
659 hp
->version
= kHFSXVersion
;
661 hp
->signature
= kHFSPlusSigWord
;
662 hp
->version
= kHFSPlusVersion
;
664 hp
->attributes
= kHFSVolumeUnmountedMask
| kHFSUnusedNodeFixMask
;
665 if (defaults
->flags
& kMakeContentProtect
) {
666 hp
->attributes
|= kHFSContentProtectionMask
;
668 hp
->lastMountedVersion
= kHFSPlusMountVersion
;
670 /* NOTE: create date is in local time, not GMT! */
671 hp
->createDate
= UTCToLocal(defaults
->createDate
);
672 hp
->modifyDate
= defaults
->createDate
;
674 hp
->checkedDate
= defaults
->createDate
;
676 // hp->fileCount = 0;
677 // hp->folderCount = 0;
679 hp
->blockSize
= blockSize
;
680 hp
->totalBlocks
= blockCount
;
681 hp
->freeBlocks
= blockCount
; /* will be adjusted at the end */
683 volsize
= (UInt64
) blockCount
* (UInt64
) blockSize
;
685 hp
->rsrcClumpSize
= defaults
->rsrcClumpSize
;
686 hp
->dataClumpSize
= defaults
->dataClumpSize
;
687 hp
->nextCatalogID
= defaults
->nextFreeFileID
;
688 hp
->encodingsBitmap
= 1 | (1 << ENCODING_TO_BIT(defaults
->encodingHint
));
690 /* set up allocation bitmap file */
691 hp
->allocationFile
.clumpSize
= defaults
->allocationClumpSize
;
692 hp
->allocationFile
.logicalSize
= defaults
->allocationClumpSize
;
693 hp
->allocationFile
.totalBlocks
= bitmapBlocks
;
695 if (NEWFS_HFS_DEBUG
&& defaults
->allocationStartBlock
)
696 allocateBlock
= defaults
->allocationStartBlock
;
698 allocateBlock
= nextBlock
;
699 nextBlock
+= bitmapBlocks
;
702 createExtents(&hp
->allocationFile
, kHFSAllocationFileID
, allocateBlock
, defaults
->allocationExtsCount
, 1);
704 // This works because the files are contiguous for now
706 printf ("allocationFile: (%10u, %10u)\n", hp
->allocationFile
.extents
[0].startBlock
, hp
->allocationFile
.totalBlocks
);
708 /* set up journal files */
709 if (defaults
->journaledHFS
) {
712 hp
->attributes
|= kHFSVolumeJournaledMask
;
713 hp
->nextCatalogID
+= 2;
716 * Allocate 1 block for the journalInfoBlock. The
717 * journal file size is passed in hfsparams_t.
719 if (NEWFS_HFS_DEBUG
&& defaults
->journalInfoBlock
)
720 hp
->journalInfoBlock
= defaults
->journalInfoBlock
;
722 hp
->journalInfoBlock
= nextBlock
++;
723 if (NEWFS_HFS_DEBUG
&& defaults
->journalBlock
)
724 journalBlock
= defaults
->journalBlock
;
726 journalBlock
= hp
->journalInfoBlock
+ 1;
727 nextBlock
+= ((defaults
->journalSize
+blockSize
-1) / blockSize
);
730 if (NEWFS_HFS_DEBUG
) {
731 printf ("journalInfo : (%10u, %10u)\n", (u_int32_t
)hp
->journalInfoBlock
, 1);
732 printf ("journal : (%10u, %10u)\n", (u_int32_t
)journalBlock
, (u_int32_t
)((defaults
->journalSize
+ (blockSize
-1)) / blockSize
));
734 /* XXX What if journal is on a different device? */
735 blocksUsed
+= 1 + ((defaults
->journalSize
+blockSize
-1) / blockSize
);
737 hp
->journalInfoBlock
= 0;
740 /* set up extents b-tree file */
741 hp
->extentsFile
.clumpSize
= defaults
->extentsClumpSize
;
742 hp
->extentsFile
.logicalSize
= defaults
->extentsInitialSize
;
743 hp
->extentsFile
.totalBlocks
= defaults
->extentsInitialSize
/ blockSize
;
744 if (NEWFS_HFS_DEBUG
&& defaults
->extentsStartBlock
)
745 allocateBlock
= defaults
->extentsStartBlock
;
747 allocateBlock
= nextBlock
;
748 nextBlock
+= hp
->extentsFile
.totalBlocks
;
750 createExtents(&hp
->extentsFile
, kHFSExtentsFileID
, allocateBlock
, defaults
->extentsExtsCount
, (defaults
->journaledHFS
&& defaults
->extentsNodeSize
> hp
->blockSize
) ? defaults
->extentsNodeSize
/ hp
->blockSize
: 1);
752 blocksUsed
+= hp
->extentsFile
.totalBlocks
;
755 printf ("extentsFile : (%10u, %10u)\n", hp
->extentsFile
.extents
[0].startBlock
, hp
->extentsFile
.totalBlocks
);
757 /* set up attributes b-tree file */
758 if (defaults
->attributesInitialSize
) {
759 hp
->attributesFile
.clumpSize
= defaults
->attributesClumpSize
;
760 hp
->attributesFile
.logicalSize
= defaults
->attributesInitialSize
;
761 hp
->attributesFile
.totalBlocks
= defaults
->attributesInitialSize
/ blockSize
;
762 if (NEWFS_HFS_DEBUG
&& defaults
->attributesStartBlock
)
763 allocateBlock
= defaults
->attributesStartBlock
;
765 allocateBlock
= nextBlock
;
766 nextBlock
+= hp
->attributesFile
.totalBlocks
;
768 createExtents(&hp
->attributesFile
, kHFSAttributesFileID
, allocateBlock
, defaults
->attributesExtsCount
, (defaults
->journaledHFS
&& defaults
->attributesNodeSize
> hp
->blockSize
) ? defaults
->attributesNodeSize
/ hp
->blockSize
: 1);
769 blocksUsed
+= hp
->attributesFile
.totalBlocks
;
771 if (NEWFS_HFS_DEBUG
) {
772 printf ("attributesFile: (%10u, %10u)\n", hp
->attributesFile
.extents
[0].startBlock
, hp
->attributesFile
.totalBlocks
);
775 * Leave some room for the Attributes B-tree to grow, if the volsize >= 2MB
777 if (volsize
>= MINVOLSIZE_WITHSPACE
&& defaults
->attributesStartBlock
== 0) {
778 nextBlock
+= 10 * (hp
->attributesFile
.clumpSize
/ blockSize
);
782 /* set up catalog b-tree file */
783 hp
->catalogFile
.clumpSize
= defaults
->catalogClumpSize
;
784 hp
->catalogFile
.logicalSize
= defaults
->catalogInitialSize
;
785 hp
->catalogFile
.totalBlocks
= defaults
->catalogInitialSize
/ blockSize
;
786 if (NEWFS_HFS_DEBUG
&& defaults
->catalogStartBlock
)
787 allocateBlock
= defaults
->catalogStartBlock
;
789 allocateBlock
= nextBlock
;
790 nextBlock
+= hp
->catalogFile
.totalBlocks
;
792 createExtents(&hp
->catalogFile
, kHFSCatalogFileID
, allocateBlock
, defaults
->catalogExtsCount
, (defaults
->journaledHFS
&& defaults
->catalogNodeSize
> hp
->blockSize
) ? defaults
->catalogNodeSize
/ hp
->blockSize
: 1);
793 blocksUsed
+= hp
->catalogFile
.totalBlocks
;
796 printf ("catalogFile : (%10u, %10u)\n\n", hp
->catalogFile
.extents
[0].startBlock
, hp
->catalogFile
.totalBlocks
);
798 if ((numOverflowExtents
* sizeof(struct ExtentRecord
)) >
799 (defaults
->extentsNodeSize
- sizeof(BTNodeDescriptor
) - (sizeof(uint16_t) * numOverflowExtents
))) {
800 errx(1, "Too many overflow extent records to fit into a single extent node");
804 * Add some room for the catalog file to grow...
806 nextBlock
+= 10 * (hp
->catalogFile
.clumpSize
/ hp
->blockSize
);
809 * Add some room for the hot file band. This uses the same 5MB per GB
810 * as the kernel. The kernel only uses hotfiles if the volume is larger
811 * than 10GBytes, so do the same here.
813 #define METADATAZONE_MINIMUM_VOLSIZE (10ULL * 1024ULL * 1024ULL * 1024ULL)
814 #define HOTBAND_MINIMUM_SIZE (10*1024*1024)
815 #define HOTBAND_MAXIMUM_SIZE (512*1024*1024)
816 if (volsize
>= METADATAZONE_MINIMUM_VOLSIZE
) {
817 hotFileBandSize
= (UInt64
) blockCount
* blockSize
/ 1024 * 5;
818 if (hotFileBandSize
> HOTBAND_MAXIMUM_SIZE
)
819 hotFileBandSize
= HOTBAND_MAXIMUM_SIZE
;
820 else if (hotFileBandSize
< HOTBAND_MINIMUM_SIZE
)
821 hotFileBandSize
= HOTBAND_MINIMUM_SIZE
;
822 nextBlock
+= hotFileBandSize
/ blockSize
;
824 if (NEWFS_HFS_DEBUG
&& defaults
->nextAllocBlock
)
825 hp
->nextAllocation
= defaults
->nextAllocBlock
;
827 hp
->nextAllocation
= nextBlock
;
829 /* Adjust free blocks to reflect everything we have allocated. */
830 hp
->freeBlocks
-= blocksUsed
;
832 /* Generate and write UUID for the HFS+ disk */
833 GenerateVolumeUUID(&newVolumeUUID
);
834 finderInfoUUIDPtr
= (VolumeUUID
*)(&hp
->finderInfo
[24]);
835 finderInfoUUIDPtr
->v
.high
= OSSwapHostToBigInt32(newVolumeUUID
.v
.high
);
836 finderInfoUUIDPtr
->v
.low
= OSSwapHostToBigInt32(newVolumeUUID
.v
.low
);
842 * Mark the given extent as in-use in the given bitmap buffer.
844 static int AllocateExtent(UInt8
*buffer
, UInt32 startBlock
, UInt32 blockCount
)
848 /* Point to start of extent in bitmap buffer */
849 p
= buffer
+ (startBlock
/ 8);
852 * Important to remember: block 0 is (1 << 7);
853 * block 7 is (1 << 0).
855 /* Partial byte at start of extent */
859 unsigned int lShift
= 0;
860 unsigned int startBit
= startBlock
& 7;
863 * Is startBlock + blockCount entirely in
866 if (blockCount
< (8 - startBit
)) {
867 lShift
= 8 - (startBit
+ blockCount
);
869 mask
= (0xff >> startBit
) & (0xff << lShift
);
870 if (NEWFS_HFS_DEBUG
&& (*p
& mask
)) {
871 fprintf(stderr
, "%s(%d): expected 0, got %x\n", __FUNCTION__
, __LINE__
, *p
& mask
);
876 * We have either set <lShift> or <startBlock & 7> bits.
878 blockCount
-= 8 - (lShift
+ startBit
);
879 // blockCount -= lShift ? blockCount : (8 - startBit);
880 // blockCount -= __builtin_popcount(mask);
883 /* Fill in whole bytes */
886 if (NEWFS_HFS_DEBUG
) {
888 * Put this in ifdef because it'll slow things down.
889 * For non-debug case, we shouldn't have to worry about
890 * an overlap, anyway.
893 for (indx
= 0; indx
< blockCount
/ 8; indx
++) {
895 fprintf(stderr
, "%s(%d): Expected 0 at %zu, got 0x%x\n", __FUNCTION__
, __LINE__
, indx
, p
[indx
]);
901 memset(p
, 0xFF, blockCount
/ 8);
907 /* Partial byte at end of extent */
910 UInt8 mask
= 0xff << (8 - blockCount
);
911 if (NEWFS_HFS_DEBUG
&& (*p
& mask
)) {
912 fprintf(stderr
, "%s(%d): Expected 0, got %x\n", __FUNCTION__
, __LINE__
, *p
& mask
);
921 * Mark an extent as being used.
922 * This involves finding out where the allocations file is,
923 * where in the allocations file the extent starts, and how
926 * One downside to this implementation is that this does
927 * more I/O than the old mechanism, a cost to the flexibility.
928 * May have to consider doing caching of some sort.
932 MarkExtentUsed(const DriveInfo
*driveInfo
,
933 HFSPlusVolumeHeader
*header
,
937 size_t bufSize
= driveInfo
->physSectorSize
;
939 uint32_t blocksLeft
= blockCount
;
940 uint32_t curBlock
= startBlock
;
941 static const int kBitsPerByte
= 8;
944 buf
= valloc(bufSize
);
949 * We loop through physSectorSize blocks.
950 * This allows us to set as many bits as we need.
952 while (blocksLeft
> 0) {
954 uint32_t numBlocks
; // The number of blocks to mark as used in this pass.
955 uint32_t blockOffset
; // This is the block number of the current range, which starts at curBlock
957 memset(buf
, 0, sizeof(buf
));
958 secNum
= curBlock
/ (bufSize
* kBitsPerByte
);
959 blockOffset
= curBlock
% (bufSize
* kBitsPerByte
);
960 numBlocks
= MIN((bufSize
* kBitsPerByte
) - blockOffset
, blocksLeft
);
963 * Okay, now we've got the block number to read,
964 * the offset into the block, and the number of blocks
967 * First we read in the buffer. To do that, we need to
968 * know where to read.
976 * This needs to be changed if/when we support non-contiguous multiple
977 * extents. At that point, it'll probably have to be a function to search
978 * for the requested offset. (How many times must MapFileC be written?)
979 * For now, though, the offset is the physical sector offset from the
980 * start of the allocations file.
982 offset
= (header
->allocationFile
.extents
[0].startBlock
* header
->blockSize
) +
985 nbytes
= pread(driveInfo
->fd
, buf
, bufSize
, offset
);
987 if (nbytes
< (ssize_t
)bufSize
) {
989 err(1, "%s::pread(%d, %p, %zu, %lld)", __FUNCTION__
, driveInfo
->fd
, buf
, bufSize
, offset
);
993 if (AllocateExtent(buf
, blockOffset
, numBlocks
) == -1) {
994 warnx("In-use allocation block in <%u, %u>", blockOffset
, numBlocks
);
997 nwritten
= pwrite(driveInfo
->fd
, buf
, bufSize
, offset
);
999 * Normally I'd check for nwritten to be less than bufSize, but since bufSize is
1000 * the physical sector size, we shouldn't be able to get less. So that most likely
1001 * means a return value of 0 or -1, neither of which I could do anything about.
1003 if (nwritten
!= (ssize_t
)bufSize
)
1006 // And go get the next set, if needed
1007 blocksLeft
-= numBlocks
;
1008 curBlock
+= numBlocks
;
1021 * Initializes and writes out the extents b-tree file.
1023 * Byte swapping is performed in place. The buffer should not be
1024 * accessed through direct casting once it leaves this function.
1027 WriteExtentsFile(const DriveInfo
*driveInfo
, UInt64 startingSector
,
1028 const hfsparams_t
*dp
, HFSExtentDescriptor
*bbextp __unused
, void *buffer
,
1029 UInt32
*bytesUsed
, UInt32
*mapNodes
)
1031 BTNodeDescriptor
*ndp
;
1034 UInt32 nodeBitsInHeader
;
1041 fileSize
= dp
->extentsInitialSize
;
1042 nodeSize
= dp
->extentsNodeSize
;
1044 bzero(buffer
, nodeSize
);
1047 /* FILL IN THE NODE DESCRIPTOR: */
1048 ndp
= (BTNodeDescriptor
*)buffer
;
1049 ndp
->kind
= kBTHeaderNode
;
1050 ndp
->numRecords
= SWAP_BE16 (3);
1051 offset
= sizeof(BTNodeDescriptor
);
1053 SETOFFSET(buffer
, nodeSize
, offset
, 1);
1056 /* FILL IN THE HEADER RECORD: */
1057 bthp
= (BTHeaderRec
*)((UInt8
*)buffer
+ offset
);
1058 if (numOverflowExtents
) {
1059 bthp
->treeDepth
= SWAP_BE16(1);
1060 bthp
->rootNode
= SWAP_BE32(1);
1061 bthp
->firstLeafNode
= SWAP_BE32(1);
1062 bthp
->lastLeafNode
= SWAP_BE32(1);
1063 bthp
->leafRecords
= SWAP_BE32(numOverflowExtents
);
1065 bthp
->treeDepth
= 0;
1067 bthp
->firstLeafNode
= 0;
1068 bthp
->lastLeafNode
= 0;
1069 bthp
->leafRecords
= 0;
1072 bthp
->nodeSize
= SWAP_BE16 (nodeSize
);
1073 bthp
->totalNodes
= SWAP_BE32 (fileSize
/ nodeSize
);
1074 bthp
->freeNodes
= SWAP_BE32 (SWAP_BE32 (bthp
->totalNodes
) - (numOverflowExtents
? 2 : 1)); /* header */
1075 bthp
->clumpSize
= SWAP_BE32 (dp
->extentsClumpSize
);
1077 bthp
->attributes
|= SWAP_BE32 (kBTBigKeysMask
);
1078 bthp
->maxKeyLength
= SWAP_BE16 (kHFSPlusExtentKeyMaximumLength
);
1079 offset
+= sizeof(BTHeaderRec
);
1081 SETOFFSET(buffer
, nodeSize
, offset
, 2);
1083 offset
+= kBTreeHeaderUserBytes
;
1085 SETOFFSET(buffer
, nodeSize
, offset
, 3);
1088 /* FIGURE OUT HOW MANY MAP NODES (IF ANY): */
1089 nodeBitsInHeader
= 8 * (nodeSize
1090 - sizeof(BTNodeDescriptor
)
1091 - sizeof(BTHeaderRec
)
1092 - kBTreeHeaderUserBytes
1093 - (4 * sizeof(SInt16
)) );
1095 if (SWAP_BE32 (bthp
->totalNodes
) > nodeBitsInHeader
) {
1096 UInt32 nodeBitsInMapNode
;
1098 ndp
->fLink
= SWAP_BE32 (SWAP_BE32 (bthp
->lastLeafNode
) + 1);
1099 nodeBitsInMapNode
= 8 * (nodeSize
1100 - sizeof(BTNodeDescriptor
)
1101 - (2 * sizeof(SInt16
))
1103 *mapNodes
= (SWAP_BE32 (bthp
->totalNodes
) - nodeBitsInHeader
+
1104 (nodeBitsInMapNode
- 1)) / nodeBitsInMapNode
;
1105 bthp
->freeNodes
= SWAP_BE32 (SWAP_BE32 (bthp
->freeNodes
) - *mapNodes
);
1110 * FILL IN THE MAP RECORD, MARKING NODES THAT ARE IN USE.
1111 * Note - worst case (32MB alloc blk) will have only 18 nodes in use.
1113 bmp
= ((UInt8
*)buffer
+ offset
);
1114 temp
= SWAP_BE32 (bthp
->totalNodes
) - SWAP_BE32 (bthp
->freeNodes
);
1116 /* Working a byte at a time is endian safe */
1117 while (temp
>= 8) { *bmp
= 0xFF; temp
-= 8; bmp
++; }
1118 *bmp
= ~(0xFF >> temp
);
1119 offset
+= nodeBitsInHeader
/8;
1121 SETOFFSET(buffer
, nodeSize
, offset
, 4);
1123 if (NEWFS_HFS_DEBUG
&& numOverflowExtents
) {
1124 void *node2
= (uint8_t*)buffer
+ nodeSize
;
1126 int (^keyCompare
)(const void *l
, const void *r
) = ^(const void *l
, const void *r
) {
1127 const struct ExtentRecord
*left
= (const struct ExtentRecord
*)l
;
1128 const struct ExtentRecord
*right
= (const struct ExtentRecord
*)r
;
1129 if (SWAP_BE32(left
->key
.fileID
) != SWAP_BE32(right
->key
.fileID
)) {
1130 return (SWAP_BE32(left
->key
.fileID
) > SWAP_BE32(right
->key
.fileID
)) ? 1 : -1;
1132 // forkType will always be 0 for us
1133 if (SWAP_BE32(left
->key
.startBlock
) != SWAP_BE32(right
->key
.startBlock
)) {
1134 return (SWAP_BE32(left
->key
.startBlock
) > SWAP_BE32(right
->key
.startBlock
)) ? 1 : -1;
1139 if (numOverflowExtents
> 1) {
1140 qsort_b(overflowExtents
, numOverflowExtents
, sizeof(*overflowExtents
), keyCompare
);
1142 bzero(node2
, nodeSize
);
1143 ndp
= (BTNodeDescriptor
*)node2
;
1144 ndp
->kind
= kBTLeafNode
;
1145 ndp
->numRecords
= SWAP_BE16(numOverflowExtents
);
1148 offset
= sizeof(BTNodeDescriptor
);
1149 for (i
= 0; i
< numOverflowExtents
; i
++) {
1150 SETOFFSET(node2
, nodeSize
, offset
, 1 + i
);
1151 memcpy(node2
+ offset
, &overflowExtents
[i
], sizeof(*overflowExtents
));
1152 offset
+= sizeof(*overflowExtents
);
1154 SETOFFSET(node2
, nodeSize
, offset
, numOverflowExtents
+ 1);
1157 *bytesUsed
= (SWAP_BE32 (bthp
->totalNodes
) - SWAP_BE32 (bthp
->freeNodes
) - *mapNodes
) * nodeSize
;
1159 WriteBuffer(driveInfo
, startingSector
, *bytesUsed
, buffer
);
1164 * WriteAttributesFile
1166 * Initializes and writes out the attributes b-tree file.
1168 * Byte swapping is performed in place. The buffer should not be
1169 * accessed through direct casting once it leaves this function.
1172 WriteAttributesFile(const DriveInfo
*driveInfo
, UInt64 startingSector
,
1173 const hfsparams_t
*dp
, HFSExtentDescriptor
*bbextp __unused
, void *buffer
,
1174 UInt32
*bytesUsed
, UInt32
*mapNodes
)
1176 BTNodeDescriptor
*ndp
;
1179 UInt32 nodeBitsInHeader
;
1184 int set_cp_level
= 0;
1187 fileSize
= dp
->attributesInitialSize
;
1188 nodeSize
= dp
->attributesNodeSize
;
1192 * If user specified content protection and a protection level,
1193 * then verify the protection level is sane.
1195 if ((dp
->flags
& kMakeContentProtect
) && (dp
->protectlevel
!= 0)) {
1196 if ((dp
->protectlevel
>= 2 ) && (dp
->protectlevel
<= 4)) {
1203 bzero(buffer
, nodeSize
);
1206 /* FILL IN THE NODE DESCRIPTOR: */
1207 ndp
= (BTNodeDescriptor
*)buffer
;
1208 ndp
->kind
= kBTHeaderNode
;
1209 ndp
->numRecords
= SWAP_BE16 (3);
1210 offset
= sizeof(BTNodeDescriptor
);
1212 SETOFFSET(buffer
, nodeSize
, offset
, 1);
1215 /* FILL IN THE HEADER RECORD: */
1216 bthp
= (BTHeaderRec
*)((UInt8
*)buffer
+ offset
);
1218 bthp
->treeDepth
= SWAP_BE16(1);
1219 bthp
->rootNode
= SWAP_BE32(1);
1220 bthp
->firstLeafNode
= SWAP_BE32(1);
1221 bthp
->lastLeafNode
= SWAP_BE32(1);
1222 bthp
->leafRecords
= SWAP_BE32(1);
1225 bthp
->treeDepth
= 0;
1227 bthp
->firstLeafNode
= 0;
1228 bthp
->lastLeafNode
= 0;
1229 bthp
->leafRecords
= 0;
1232 bthp
->nodeSize
= SWAP_BE16 (nodeSize
);
1233 bthp
->totalNodes
= SWAP_BE32 (fileSize
/ nodeSize
);
1235 /* Add 1 node for the first record */
1236 bthp
->freeNodes
= SWAP_BE32 (SWAP_BE32 (bthp
->totalNodes
) - 2);
1239 /* Take the header into account */
1240 bthp
->freeNodes
= SWAP_BE32 (SWAP_BE32 (bthp
->totalNodes
) - 1);
1242 bthp
->clumpSize
= SWAP_BE32 (dp
->attributesClumpSize
);
1244 bthp
->attributes
|= SWAP_BE32 (kBTBigKeysMask
| kBTVariableIndexKeysMask
);
1245 bthp
->maxKeyLength
= SWAP_BE16 (kHFSPlusAttrKeyMaximumLength
);
1247 offset
+= sizeof(BTHeaderRec
);
1249 SETOFFSET(buffer
, nodeSize
, offset
, 2);
1251 offset
+= kBTreeHeaderUserBytes
;
1253 SETOFFSET(buffer
, nodeSize
, offset
, 3);
1256 /* FIGURE OUT HOW MANY MAP NODES (IF ANY): */
1257 nodeBitsInHeader
= 8 * (nodeSize
1258 - sizeof(BTNodeDescriptor
)
1259 - sizeof(BTHeaderRec
)
1260 - kBTreeHeaderUserBytes
1261 - (4 * sizeof(SInt16
)) );
1262 if (SWAP_BE32 (bthp
->totalNodes
) > nodeBitsInHeader
) {
1263 UInt32 nodeBitsInMapNode
;
1265 ndp
->fLink
= SWAP_BE32 (SWAP_BE32 (bthp
->lastLeafNode
) + 1);
1266 nodeBitsInMapNode
= 8 * (nodeSize
1267 - sizeof(BTNodeDescriptor
)
1268 - (2 * sizeof(SInt16
))
1270 *mapNodes
= (SWAP_BE32 (bthp
->totalNodes
) - nodeBitsInHeader
+
1271 (nodeBitsInMapNode
- 1)) / nodeBitsInMapNode
;
1272 bthp
->freeNodes
= SWAP_BE32 (SWAP_BE32 (bthp
->freeNodes
) - *mapNodes
);
1277 * FILL IN THE MAP RECORD, MARKING NODES THAT ARE IN USE.
1278 * Note - worst case (32MB alloc blk) will have only 18 nodes in use.
1280 bmp
= ((UInt8
*)buffer
+ offset
);
1281 temp
= SWAP_BE32 (bthp
->totalNodes
) - SWAP_BE32 (bthp
->freeNodes
);
1283 /* Working a byte at a time is endian safe */
1284 while (temp
>= 8) { *bmp
= 0xFF; temp
-= 8; bmp
++; }
1285 *bmp
= ~(0xFF >> temp
);
1286 offset
+= nodeBitsInHeader
/8;
1288 SETOFFSET(buffer
, nodeSize
, offset
, 4);
1292 /* Stuff in the EA on the root folder */
1293 void *node2
= (uint8_t*)buffer
+ nodeSize
;
1295 struct cp_root_xattr ea
;
1297 uint8_t canonicalName
[256];
1300 HFSPlusAttrData
*attrData
;
1301 HFSPlusAttrKey
*attrKey
;
1302 bzero(node2
, nodeSize
);
1303 ndp
= (BTNodeDescriptor
*)node2
;
1305 ndp
->kind
= kBTLeafNode
;
1306 ndp
->numRecords
= SWAP_BE16(1);
1309 offset
= sizeof(BTNodeDescriptor
);
1310 SETOFFSET(node2
, nodeSize
, offset
, 1);
1312 attrKey
= (HFSPlusAttrKey
*)((uint8_t*)node2
+ offset
);
1313 attrKey
->fileID
= SWAP_BE32(1);
1314 attrKey
->startBlock
= 0;
1315 attrKey
->keyLength
= SWAP_BE16(sizeof(*attrKey
) - sizeof(attrKey
->keyLength
));
1317 cfstr
= CFStringCreateWithCString(kCFAllocatorDefault
, "com.apple.system.cprotect", kCFStringEncodingUTF8
);
1318 if (_CFStringGetFileSystemRepresentation(cfstr
, canonicalName
, sizeof(canonicalName
)) &&
1319 ConvertUTF8toUnicode(canonicalName
,
1320 sizeof(attrKey
->attrName
),
1321 attrKey
->attrName
, &attrKey
->attrNameLen
) == 0) {
1322 attrKey
->attrNameLen
= SWAP_BE16(attrKey
->attrNameLen
);
1323 offset
+= sizeof(*attrKey
);
1325 /* If the offset is odd, move up to the next even value */
1330 attrData
= (HFSPlusAttrData
*)((uint8_t*)node2
+ offset
);
1331 bzero(&ea
, sizeof(ea
));
1332 ea
.vers
= OSSwapHostToLittleInt16(dp
->protectlevel
); //(leave in LittleEndian)
1333 attrData
->recordType
= SWAP_BE32(kHFSPlusAttrInlineData
);
1334 attrData
->attrSize
= SWAP_BE32(sizeof(ea
));
1335 memcpy(attrData
->attrData
, &ea
, sizeof(ea
));
1336 offset
+= sizeof (HFSPlusAttrData
) + sizeof(ea
) - sizeof(attrData
->attrData
);
1338 SETOFFSET (node2
, nodeSize
, offset
, 2);
1343 *bytesUsed
= (SWAP_BE32 (bthp
->totalNodes
) - SWAP_BE32 (bthp
->freeNodes
) - *mapNodes
) * nodeSize
;
1344 WriteBuffer(driveInfo
, startingSector
, *bytesUsed
, buffer
);
1347 #if !TARGET_OS_EMBEDDED
1349 get_dev_uuid(const char *disk_name
, char *dev_uuid_str
, int dev_uuid_len
)
1351 io_service_t service
;
1352 CFStringRef uuid_str
;
1355 if (strncmp(disk_name
, _PATH_DEV
, strlen(_PATH_DEV
)) == 0) {
1356 disk_name
+= strlen(_PATH_DEV
);
1359 dev_uuid_str
[0] = '\0';
1361 service
= IOServiceGetMatchingService(kIOMasterPortDefault
, IOBSDNameMatching(kIOMasterPortDefault
, 0, disk_name
));
1362 if (service
!= IO_OBJECT_NULL
) {
1363 uuid_str
= IORegistryEntryCreateCFProperty(service
, CFSTR(kIOMediaUUIDKey
), kCFAllocatorDefault
, 0);
1365 if (CFStringGetFileSystemRepresentation(uuid_str
, dev_uuid_str
, dev_uuid_len
) != 0) {
1368 CFRelease(uuid_str
);
1370 IOObjectRelease(service
);
1377 clear_journal_dev(const char *dev_name
)
1381 fd
= open(dev_name
, O_RDWR
);
1383 printf("Failed to open the journal device %s (%s)\n", dev_name
, strerror(errno
));
1392 #endif /* !TARGET_OS_EMBEDDED */
1396 WriteJournalInfo(const DriveInfo
*driveInfo
, UInt64 startingSector
,
1397 const hfsparams_t
*dp
, HFSPlusVolumeHeader
*header
,
1400 JournalInfoBlock
*jibp
= buffer
;
1401 UInt32 journalBlock
;
1403 memset(buffer
, 0xdb, driveInfo
->physSectorSize
);
1404 memset(jibp
, 0, sizeof(JournalInfoBlock
));
1406 #if !TARGET_OS_EMBEDDED
1407 if (dp
->journalDevice
) {
1410 if (get_dev_uuid(dp
->journalDevice
, uuid_str
, sizeof(uuid_str
)) == 0) {
1411 strlcpy((char *)&jibp
->reserved
[0], uuid_str
, sizeof(jibp
->reserved
));
1413 // we also need to blast out some zeros to the journal device
1414 // in case it had a file system on it previously. that way
1415 // it's "initialized" in the sense that the previous contents
1416 // won't get mounted accidently. if this fails we'll bail out.
1417 if (clear_journal_dev(dp
->journalDevice
) != 0) {
1421 printf("FAILED to get the device uuid for device %s\n", dp
->journalDevice
);
1422 strlcpy((char *)&jibp
->reserved
[0], "NO-DEV-UUID", sizeof(jibp
->reserved
));
1427 jibp
->flags
= kJIJournalInFSMask
;
1428 #if !TARGET_OS_EMBEDDED
1431 jibp
->flags
|= kJIJournalNeedInitMask
;
1432 if (NEWFS_HFS_DEBUG
&& dp
->journalBlock
)
1433 journalBlock
= dp
->journalBlock
;
1435 journalBlock
= header
->journalInfoBlock
+ 1;
1436 jibp
->offset
= ((UInt64
) journalBlock
) * header
->blockSize
;
1437 jibp
->size
= dp
->journalSize
;
1439 jibp
->flags
= SWAP_BE32(jibp
->flags
);
1440 jibp
->offset
= SWAP_BE64(jibp
->offset
);
1441 jibp
->size
= SWAP_BE64(jibp
->size
);
1443 WriteBuffer(driveInfo
, startingSector
, driveInfo
->physSectorSize
, buffer
);
1445 jibp
->flags
= SWAP_BE32(jibp
->flags
);
1446 jibp
->offset
= SWAP_BE64(jibp
->offset
);
1447 jibp
->size
= SWAP_BE64(jibp
->size
);
1449 if (jibp
->flags
& kJIJournalInFSMask
) {
1451 * Zero out the on-disk content of the journal file.
1453 * This is a really ugly hack. Right now, all of the logic in the code
1454 * that calls us (make_hfsplus), uses the value 'sectorsPerBlock' but it
1455 * is really hardcoded to assume the sector size is 512 bytes. The code
1456 * in WriteBuffer will massage the I/O to use the actual physical sector
1457 * size. Since WriteBuffer takes a sector # relative to 512 byte sectors,
1458 * We need to convert the journal offset in bytes to value that represents
1459 * its start LBA in 512 byte sectors.
1461 * Note further that we swapped to big endian prior to the WriteBuffer call,
1462 * but we have swapped back to native after the call.
1464 WriteBuffer(driveInfo
, jibp
->offset
/ kBytesPerSector
, jibp
->size
, NULL
);
1474 * This routine initializes a Catalog B-Tree.
1476 * Note: Since large volumes can have bigger b-trees they
1477 * might need to have map nodes setup.
1480 WriteCatalogFile(const DriveInfo
*driveInfo
, UInt64 startingSector
,
1481 const hfsparams_t
*dp
, HFSPlusVolumeHeader
*header
, void *buffer
,
1482 UInt32
*bytesUsed
, UInt32
*mapNodes
)
1484 BTNodeDescriptor
*ndp
;
1487 UInt32 nodeBitsInHeader
;
1494 fileSize
= dp
->catalogInitialSize
;
1495 nodeSize
= dp
->catalogNodeSize
;
1497 bzero(buffer
, nodeSize
);
1500 /* FILL IN THE NODE DESCRIPTOR: */
1501 ndp
= (BTNodeDescriptor
*)buffer
;
1502 ndp
->kind
= kBTHeaderNode
;
1503 ndp
->numRecords
= SWAP_BE16 (3);
1504 offset
= sizeof(BTNodeDescriptor
);
1506 SETOFFSET(buffer
, nodeSize
, offset
, 1);
1509 /* FILL IN THE HEADER RECORD: */
1510 bthp
= (BTHeaderRec
*)((UInt8
*)buffer
+ offset
);
1511 bthp
->treeDepth
= SWAP_BE16 (1);
1512 bthp
->rootNode
= SWAP_BE32 (1);
1513 bthp
->firstLeafNode
= SWAP_BE32 (1);
1514 bthp
->lastLeafNode
= SWAP_BE32 (1);
1515 bthp
->leafRecords
= SWAP_BE32 (dp
->journaledHFS
? 6 : 2);
1516 bthp
->nodeSize
= SWAP_BE16 (nodeSize
);
1517 bthp
->totalNodes
= SWAP_BE32 (fileSize
/ nodeSize
);
1518 bthp
->freeNodes
= SWAP_BE32 (SWAP_BE32 (bthp
->totalNodes
) - 2); /* header and root */
1519 bthp
->clumpSize
= SWAP_BE32 (dp
->catalogClumpSize
);
1522 bthp
->attributes
|= SWAP_BE32 (kBTVariableIndexKeysMask
+ kBTBigKeysMask
);
1523 bthp
->maxKeyLength
= SWAP_BE16 (kHFSPlusCatalogKeyMaximumLength
);
1524 if (dp
->flags
& kMakeCaseSensitive
)
1525 bthp
->keyCompareType
= kHFSBinaryCompare
;
1527 bthp
->keyCompareType
= kHFSCaseFolding
;
1529 offset
+= sizeof(BTHeaderRec
);
1531 SETOFFSET(buffer
, nodeSize
, offset
, 2);
1533 offset
+= kBTreeHeaderUserBytes
;
1535 SETOFFSET(buffer
, nodeSize
, offset
, 3);
1537 /* FIGURE OUT HOW MANY MAP NODES (IF ANY): */
1538 nodeBitsInHeader
= 8 * (nodeSize
1539 - sizeof(BTNodeDescriptor
)
1540 - sizeof(BTHeaderRec
)
1541 - kBTreeHeaderUserBytes
1542 - (4 * sizeof(SInt16
)) );
1544 if (SWAP_BE32 (bthp
->totalNodes
) > nodeBitsInHeader
) {
1545 UInt32 nodeBitsInMapNode
;
1547 ndp
->fLink
= SWAP_BE32 (SWAP_BE32 (bthp
->lastLeafNode
) + 1);
1548 nodeBitsInMapNode
= 8 * (nodeSize
1549 - sizeof(BTNodeDescriptor
)
1550 - (2 * sizeof(SInt16
))
1552 *mapNodes
= (SWAP_BE32 (bthp
->totalNodes
) - nodeBitsInHeader
+
1553 (nodeBitsInMapNode
- 1)) / nodeBitsInMapNode
;
1554 bthp
->freeNodes
= SWAP_BE32 (SWAP_BE32 (bthp
->freeNodes
) - *mapNodes
);
1558 * FILL IN THE MAP RECORD, MARKING NODES THAT ARE IN USE.
1559 * Note - worst case (32MB alloc blk) will have only 18 nodes in use.
1561 bmp
= ((UInt8
*)buffer
+ offset
);
1562 temp
= SWAP_BE32 (bthp
->totalNodes
) - SWAP_BE32 (bthp
->freeNodes
);
1564 /* Working a byte at a time is endian safe */
1565 while (temp
>= 8) { *bmp
= 0xFF; temp
-= 8; bmp
++; }
1566 *bmp
= ~(0xFF >> temp
);
1567 offset
+= nodeBitsInHeader
/8;
1569 SETOFFSET(buffer
, nodeSize
, offset
, 4);
1571 InitCatalogRoot_HFSPlus(dp
, header
, buffer
+ nodeSize
);
1573 *bytesUsed
= (SWAP_BE32 (bthp
->totalNodes
) - SWAP_BE32 (bthp
->freeNodes
) - *mapNodes
) * nodeSize
;
1575 WriteBuffer(driveInfo
, startingSector
, *bytesUsed
, buffer
);
1580 InitCatalogRoot_HFSPlus(const hfsparams_t
*dp
, const HFSPlusVolumeHeader
*header
, void * buffer
)
1582 BTNodeDescriptor
*ndp
;
1583 HFSPlusCatalogKey
*ckp
;
1584 HFSPlusCatalogKey
*tkp
;
1585 HFSPlusCatalogFolder
*cdp
;
1586 HFSPlusCatalogFile
*cfp
;
1587 HFSPlusCatalogThread
*ctp
;
1590 size_t unicodeBytes
;
1591 UInt8 canonicalName
[kHFSPlusMaxFileNameBytes
]; // UTF8 character may convert to three bytes, plus a NUL
1596 nodeSize
= dp
->catalogNodeSize
;
1597 bzero(buffer
, nodeSize
);
1600 * All nodes have a node descriptor...
1602 ndp
= (BTNodeDescriptor
*)buffer
;
1603 ndp
->kind
= kBTLeafNode
;
1605 ndp
->numRecords
= SWAP_BE16 (dp
->journaledHFS
? 6 : 2);
1606 offset
= sizeof(BTNodeDescriptor
);
1607 SETOFFSET(buffer
, nodeSize
, offset
, ++index
);
1610 * First record is always the root directory...
1612 ckp
= (HFSPlusCatalogKey
*)((UInt8
*)buffer
+ offset
);
1614 /* Use CFString functions to get a HFSPlus Canonical name */
1615 cfstr
= CFStringCreateWithCString(kCFAllocatorDefault
, (char *)dp
->volumeName
, kCFStringEncodingUTF8
);
1616 cfOK
= _CFStringGetFileSystemRepresentation(cfstr
, canonicalName
, sizeof(canonicalName
));
1618 if (!cfOK
|| ConvertUTF8toUnicode(canonicalName
, sizeof(ckp
->nodeName
.unicode
),
1619 ckp
->nodeName
.unicode
, &ckp
->nodeName
.length
)) {
1621 /* On conversion errors "untitled" is used as a fallback. */
1622 (void) ConvertUTF8toUnicode((UInt8
*)kDefaultVolumeNameStr
,
1623 sizeof(ckp
->nodeName
.unicode
),
1624 ckp
->nodeName
.unicode
,
1625 &ckp
->nodeName
.length
);
1626 warnx("invalid HFS+ name: \"%s\", using \"%s\" instead",
1627 dp
->volumeName
, kDefaultVolumeNameStr
);
1630 ckp
->nodeName
.length
= SWAP_BE16 (ckp
->nodeName
.length
);
1632 unicodeBytes
= sizeof(UniChar
) * SWAP_BE16 (ckp
->nodeName
.length
);
1634 ckp
->keyLength
= SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength
+ unicodeBytes
);
1635 ckp
->parentID
= SWAP_BE32 (kHFSRootParentID
);
1636 offset
+= SWAP_BE16 (ckp
->keyLength
) + 2;
1638 cdp
= (HFSPlusCatalogFolder
*)((UInt8
*)buffer
+ offset
);
1639 cdp
->recordType
= SWAP_BE16 (kHFSPlusFolderRecord
);
1640 /* folder count is only supported on HFSX volumes */
1641 if (dp
->flags
& kMakeCaseSensitive
) {
1642 cdp
->flags
= SWAP_BE16 (kHFSHasFolderCountMask
);
1644 cdp
->valence
= SWAP_BE32 (dp
->journaledHFS
? 2 : 0);
1645 cdp
->folderID
= SWAP_BE32 (kHFSRootFolderID
);
1646 cdp
->createDate
= SWAP_BE32 (dp
->createDate
);
1647 cdp
->contentModDate
= SWAP_BE32 (dp
->createDate
);
1648 cdp
->textEncoding
= SWAP_BE32 (dp
->encodingHint
);
1649 if (dp
->flags
& kUseAccessPerms
) {
1650 cdp
->bsdInfo
.ownerID
= SWAP_BE32 (dp
->owner
);
1651 cdp
->bsdInfo
.groupID
= SWAP_BE32 (dp
->group
);
1652 cdp
->bsdInfo
.fileMode
= SWAP_BE16 (dp
->mask
| S_IFDIR
);
1654 offset
+= sizeof(HFSPlusCatalogFolder
);
1655 SETOFFSET(buffer
, nodeSize
, offset
, ++index
);
1658 * Second record is always the root directory thread...
1660 tkp
= (HFSPlusCatalogKey
*)((UInt8
*)buffer
+ offset
);
1661 tkp
->keyLength
= SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength
);
1662 tkp
->parentID
= SWAP_BE32 (kHFSRootFolderID
);
1663 // tkp->nodeName.length = 0;
1665 offset
+= SWAP_BE16 (tkp
->keyLength
) + 2;
1667 ctp
= (HFSPlusCatalogThread
*)((UInt8
*)buffer
+ offset
);
1668 ctp
->recordType
= SWAP_BE16 (kHFSPlusFolderThreadRecord
);
1669 ctp
->parentID
= SWAP_BE32 (kHFSRootParentID
);
1670 bcopy(&ckp
->nodeName
, &ctp
->nodeName
, sizeof(UInt16
) + unicodeBytes
);
1671 offset
+= (sizeof(HFSPlusCatalogThread
)
1672 - (sizeof(ctp
->nodeName
.unicode
) - unicodeBytes
) );
1674 SETOFFSET(buffer
, nodeSize
, offset
, ++index
);
1677 * Add records for ".journal" and ".journal_info_block" files:
1679 if (dp
->journaledHFS
) {
1680 struct HFSUniStr255
*nodename1
, *nodename2
;
1681 size_t uBytes1
, uBytes2
;
1682 UInt32 journalBlock
;
1684 /* File record #1 */
1685 ckp
= (HFSPlusCatalogKey
*)((UInt8
*)buffer
+ offset
);
1686 (void) ConvertUTF8toUnicode((UInt8
*)HFS_JOURNAL_FILE
, sizeof(ckp
->nodeName
.unicode
),
1687 ckp
->nodeName
.unicode
, &ckp
->nodeName
.length
);
1688 ckp
->nodeName
.length
= SWAP_BE16 (ckp
->nodeName
.length
);
1689 uBytes1
= sizeof(UniChar
) * SWAP_BE16 (ckp
->nodeName
.length
);
1690 ckp
->keyLength
= SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength
+ uBytes1
);
1691 ckp
->parentID
= SWAP_BE32 (kHFSRootFolderID
);
1692 offset
+= SWAP_BE16 (ckp
->keyLength
) + 2;
1694 cfp
= (HFSPlusCatalogFile
*)((UInt8
*)buffer
+ offset
);
1695 cfp
->recordType
= SWAP_BE16 (kHFSPlusFileRecord
);
1696 cfp
->flags
= SWAP_BE16 (kHFSThreadExistsMask
);
1697 cfp
->fileID
= SWAP_BE32 (dp
->nextFreeFileID
);
1698 cfp
->createDate
= SWAP_BE32 (dp
->createDate
+ 1);
1699 cfp
->contentModDate
= SWAP_BE32 (dp
->createDate
+ 1);
1700 cfp
->textEncoding
= 0;
1702 cfp
->bsdInfo
.fileMode
= SWAP_BE16 (S_IFREG
);
1703 cfp
->bsdInfo
.ownerFlags
= (uint8_t) SWAP_BE16 (((uint16_t)UF_NODUMP
));
1704 cfp
->bsdInfo
.special
.linkCount
= SWAP_BE32(1);
1705 cfp
->userInfo
.fdType
= SWAP_BE32 (kJournalFileType
);
1706 cfp
->userInfo
.fdCreator
= SWAP_BE32 (kHFSPlusCreator
);
1707 cfp
->userInfo
.fdFlags
= SWAP_BE16 (kIsInvisible
+ kNameLocked
);
1708 cfp
->dataFork
.logicalSize
= SWAP_BE64 (dp
->journalSize
);
1709 cfp
->dataFork
.totalBlocks
= SWAP_BE32 ((dp
->journalSize
+dp
->blockSize
-1) / dp
->blockSize
);
1711 if (NEWFS_HFS_DEBUG
&& dp
->journalBlock
)
1712 journalBlock
= dp
->journalBlock
;
1714 journalBlock
= header
->journalInfoBlock
+ 1;
1715 cfp
->dataFork
.extents
[0].startBlock
= SWAP_BE32 (journalBlock
);
1716 cfp
->dataFork
.extents
[0].blockCount
= cfp
->dataFork
.totalBlocks
;
1718 offset
+= sizeof(HFSPlusCatalogFile
);
1719 SETOFFSET(buffer
, nodeSize
, offset
, ++index
);
1720 nodename1
= &ckp
->nodeName
;
1722 /* File record #2 */
1723 ckp
= (HFSPlusCatalogKey
*)((UInt8
*)buffer
+ offset
);
1724 (void) ConvertUTF8toUnicode((UInt8
*)HFS_JOURNAL_INFO
, sizeof(ckp
->nodeName
.unicode
),
1725 ckp
->nodeName
.unicode
, &ckp
->nodeName
.length
);
1726 ckp
->nodeName
.length
= SWAP_BE16 (ckp
->nodeName
.length
);
1727 uBytes2
= sizeof(UniChar
) * SWAP_BE16 (ckp
->nodeName
.length
);
1728 ckp
->keyLength
= SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength
+ uBytes2
);
1729 ckp
->parentID
= SWAP_BE32 (kHFSRootFolderID
);
1730 offset
+= SWAP_BE16 (ckp
->keyLength
) + 2;
1732 cfp
= (HFSPlusCatalogFile
*)((UInt8
*)buffer
+ offset
);
1733 cfp
->recordType
= SWAP_BE16 (kHFSPlusFileRecord
);
1734 cfp
->flags
= SWAP_BE16 (kHFSThreadExistsMask
);
1735 cfp
->fileID
= SWAP_BE32 (dp
->nextFreeFileID
+ 1);
1736 cfp
->createDate
= SWAP_BE32 (dp
->createDate
);
1737 cfp
->contentModDate
= SWAP_BE32 (dp
->createDate
);
1738 cfp
->textEncoding
= 0;
1740 cfp
->bsdInfo
.fileMode
= SWAP_BE16 (S_IFREG
);
1741 cfp
->bsdInfo
.ownerFlags
= (uint8_t) SWAP_BE16 (((uint16_t)UF_NODUMP
));
1742 cfp
->bsdInfo
.special
.linkCount
= SWAP_BE32(1);
1743 cfp
->userInfo
.fdType
= SWAP_BE32 (kJournalFileType
);
1744 cfp
->userInfo
.fdCreator
= SWAP_BE32 (kHFSPlusCreator
);
1745 cfp
->userInfo
.fdFlags
= SWAP_BE16 (kIsInvisible
+ kNameLocked
);
1746 cfp
->dataFork
.logicalSize
= SWAP_BE64(dp
->blockSize
);;
1747 cfp
->dataFork
.totalBlocks
= SWAP_BE32(1);
1749 cfp
->dataFork
.extents
[0].startBlock
= SWAP_BE32 (header
->journalInfoBlock
);
1750 cfp
->dataFork
.extents
[0].blockCount
= cfp
->dataFork
.totalBlocks
;
1752 offset
+= sizeof(HFSPlusCatalogFile
);
1753 SETOFFSET(buffer
, nodeSize
, offset
, ++index
);
1754 nodename2
= &ckp
->nodeName
;
1756 /* Thread record for file #1 */
1757 tkp
= (HFSPlusCatalogKey
*)((UInt8
*)buffer
+ offset
);
1758 tkp
->keyLength
= SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength
);
1759 tkp
->parentID
= SWAP_BE32 (dp
->nextFreeFileID
);
1760 tkp
->nodeName
.length
= 0;
1761 offset
+= SWAP_BE16 (tkp
->keyLength
) + 2;
1763 ctp
= (HFSPlusCatalogThread
*)((UInt8
*)buffer
+ offset
);
1764 ctp
->recordType
= SWAP_BE16 (kHFSPlusFileThreadRecord
);
1765 ctp
->parentID
= SWAP_BE32 (kHFSRootFolderID
);
1766 bcopy(nodename1
, &ctp
->nodeName
, sizeof(UInt16
) + uBytes1
);
1767 offset
+= (sizeof(HFSPlusCatalogThread
)
1768 - (sizeof(ctp
->nodeName
.unicode
) - uBytes1
) );
1769 SETOFFSET(buffer
, nodeSize
, offset
, ++index
);
1771 /* Thread record for file #2 */
1772 tkp
= (HFSPlusCatalogKey
*)((UInt8
*)buffer
+ offset
);
1773 tkp
->keyLength
= SWAP_BE16 (kHFSPlusCatalogKeyMinimumLength
);
1774 tkp
->parentID
= SWAP_BE32 (dp
->nextFreeFileID
+ 1);
1775 tkp
->nodeName
.length
= 0;
1776 offset
+= SWAP_BE16 (tkp
->keyLength
) + 2;
1778 ctp
= (HFSPlusCatalogThread
*)((UInt8
*)buffer
+ offset
);
1779 ctp
->recordType
= SWAP_BE16 (kHFSPlusFileThreadRecord
);
1780 ctp
->parentID
= SWAP_BE32 (kHFSRootFolderID
);
1781 bcopy(nodename2
, &ctp
->nodeName
, sizeof(UInt16
) + uBytes2
);
1782 offset
+= (sizeof(HFSPlusCatalogThread
)
1783 - (sizeof(ctp
->nodeName
.unicode
) - uBytes2
) );
1784 SETOFFSET(buffer
, nodeSize
, offset
, ++index
);
1791 * Initializes a B-tree map node and writes it out to disk.
1794 WriteMapNodes(const DriveInfo
*driveInfo
, UInt64 diskStart
, UInt32 firstMapNode
,
1795 UInt32 mapNodes
, UInt16 btNodeSize
, void *buffer
)
1797 UInt32 sectorsPerNode
;
1798 UInt32 mapRecordBytes
;
1800 BTNodeDescriptor
*nd
= (BTNodeDescriptor
*)buffer
;
1802 bzero(buffer
, btNodeSize
);
1804 nd
->kind
= kBTMapNode
;
1805 nd
->numRecords
= SWAP_BE16 (1);
1807 /* note: must belong word aligned (hence the extra -2) */
1808 mapRecordBytes
= btNodeSize
- sizeof(BTNodeDescriptor
) - 2*sizeof(SInt16
) - 2;
1810 SETOFFSET(buffer
, btNodeSize
, sizeof(BTNodeDescriptor
), 1);
1811 SETOFFSET(buffer
, btNodeSize
, sizeof(BTNodeDescriptor
) + mapRecordBytes
, 2);
1813 sectorsPerNode
= btNodeSize
/kBytesPerSector
;
1816 * Note - worst case (32MB alloc blk) will have
1817 * only 18 map nodes. So don't bother optimizing
1818 * this section to do multiblock writes!
1820 for (i
= 0; i
< mapNodes
; i
++) {
1821 if ((i
+ 1) < mapNodes
)
1822 nd
->fLink
= SWAP_BE32 (++firstMapNode
); /* point to next map node */
1824 nd
->fLink
= 0; /* this is the last map node */
1826 WriteBuffer(driveInfo
, diskStart
, btNodeSize
, buffer
);
1828 diskStart
+= sectorsPerNode
;
1833 * @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1834 * NOTE: IF buffer IS NULL, THIS FUNCTION WILL WRITE ZERO'S.
1836 * startingSector is in terms of 512-byte sectors.
1837 * @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1840 WriteBuffer(const DriveInfo
*driveInfo
, UInt64 startingSector
, UInt64 byteCount
,
1844 off_t physSector
= 0;
1845 off_t byteOffsetInPhysSector
;
1846 UInt32 numBytesToIO
;
1847 UInt32 numPhysSectorsToIO
;
1848 UInt32 tempbufSizeInPhysSectors
;
1850 UInt32 fd
= driveInfo
->fd
;
1851 UInt32 physSectorSize
= driveInfo
->physSectorSize
;
1852 void *tempbuf
= NULL
;
1853 int sectorSizeRatio
= driveInfo
->physSectorSize
/ kBytesPerSector
;
1854 int status
= 0; /* 0: no error; 1: alloc; 2: read; 3: write */
1856 if (0 == byteCount
) {
1860 /*@@@@@@@@@@ buffer allocation @@@@@@@@@@*/
1861 /* try a buffer size for optimal IO, __UP TO 4MB__. if that
1862 fails, then try with the minimum allowed buffer size, which
1863 is equal to physSectorSize */
1864 tempbufSizeInPhysSectors
= MIN ( (byteCount
- 1 + physSectorSize
) / physSectorSize
,
1865 driveInfo
->physSectorsPerIO
);
1867 tempbufSizeInPhysSectors
= MIN ( tempbufSizeInPhysSectors
, (4 * 1024 * 1024) / physSectorSize
);
1868 tempbufSize
= tempbufSizeInPhysSectors
* physSectorSize
;
1870 if ((tempbuf
= valloc(tempbufSize
)) == NULL
) {
1871 /* try allocation of smallest allowed size: one
1873 NOTE: the previous valloc tempbufSize might have
1874 already been one physical sector. we don't want to
1875 check if that was the case, so just try again.
1877 tempbufSizeInPhysSectors
= 1;
1878 tempbufSize
= physSectorSize
;
1879 if ((tempbuf
= valloc(tempbufSize
)) == NULL
) {
1885 /*@@@@@@@@@@ io @@@@@@@@@@*/
1886 sector
= driveInfo
->sectorOffset
+ startingSector
;
1887 physSector
= sector
/ sectorSizeRatio
;
1888 byteOffsetInPhysSector
= (sector
% sectorSizeRatio
) * kBytesPerSector
;
1890 while (byteCount
> 0) {
1891 numPhysSectorsToIO
= MIN ( (byteCount
- 1 + physSectorSize
) / physSectorSize
,
1892 tempbufSizeInPhysSectors
);
1893 numBytesToIO
= MIN(byteCount
, (unsigned)((numPhysSectorsToIO
* physSectorSize
) - byteOffsetInPhysSector
));
1895 /* if IO does not align with physical sector boundaries */
1896 if ((0 != byteOffsetInPhysSector
) || ((numBytesToIO
% physSectorSize
) != 0)) {
1897 if (pread(fd
, tempbuf
, numPhysSectorsToIO
* physSectorSize
, physSector
* physSectorSize
) < 0) {
1903 if (NULL
!= buffer
) {
1904 memcpy(tempbuf
+ byteOffsetInPhysSector
, buffer
, numBytesToIO
);
1907 bzero(tempbuf
+ byteOffsetInPhysSector
, numBytesToIO
);
1910 if (pwrite(fd
, tempbuf
, numPhysSectorsToIO
* physSectorSize
, physSector
* physSectorSize
) < 0) {
1911 warn("%s: pwrite(%d, %p, %zu, %lld)", __FUNCTION__
, fd
, tempbuf
, (size_t)(numPhysSectorsToIO
* physSectorSize
), (long long)(physSector
* physSectorSize
));
1916 byteOffsetInPhysSector
= 0;
1917 byteCount
-= numBytesToIO
;
1918 physSector
+= numPhysSectorsToIO
;
1919 if (NULL
!= buffer
) {
1920 buffer
+= numBytesToIO
;
1933 else if (2 == status
) {
1934 err(1, "read (sector %llu)", physSector
);
1936 else if (3 == status
) {
1937 err(1, "write (sector %llu)", physSector
);
1944 static UInt32
Largest( UInt32 a
, UInt32 b
, UInt32 c
, UInt32 d
)
1953 /* return max(a,c) */
1961 * UTCToLocal - convert from Mac OS GMT time to Mac OS local time
1963 static UInt32
UTCToLocal(UInt32 utcTime
)
1965 UInt32 localTime
= utcTime
;
1966 struct timezone timeZone
;
1967 struct timeval timeVal
;
1969 if (localTime
!= 0) {
1971 /* HFS volumes need timezone info to convert local to GMT */
1972 (void)gettimeofday( &timeVal
, &timeZone
);
1975 localTime
-= (timeZone
.tz_minuteswest
* 60);
1976 if (timeZone
.tz_dsttime
)
1983 #define __kCFUserEncodingFileName ("/.CFUserTextEncoding")
1986 GetDefaultEncoding()
1988 struct passwd
*passwdp
;
1990 if ((passwdp
= getpwuid(0))) { // root account
1991 char buffer
[MAXPATHLEN
+ 1];
1994 strlcpy(buffer
, passwdp
->pw_dir
, sizeof(buffer
));
1995 strlcat(buffer
, __kCFUserEncodingFileName
, sizeof(buffer
));
1997 if ((fd
= open(buffer
, O_RDONLY
, 0)) > 0) {
2000 readSize
= read(fd
, buffer
, MAXPATHLEN
);
2001 buffer
[(readSize
< 0 ? 0 : readSize
)] = '\0';
2003 return strtol(buffer
, NULL
, 0);
2011 ConvertUTF8toUnicode(const UInt8
* source
, size_t bufsize
, UniChar
* unibuf
,
2020 targetEnd
= (UniChar
*)((UInt8
*)unibuf
+ bufsize
);
2022 while ((byte
= *source
++)) {
2024 /* check for single-byte ascii */
2026 if (byte
== ':') /* ':' is mapped to '/' */
2029 *target
++ = SWAP_BE16 (byte
);
2032 UInt8 seq
= (byte
>> 4);
2035 case 0xc: /* double-byte sequence (1100 and 1101) */
2037 ch
= (byte
& 0x1F) << 6; /* get 5 bits */
2038 if (((byte
= *source
++) >> 6) != 2)
2042 case 0xe: /* triple-byte sequence (1110) */
2043 ch
= (byte
& 0x0F) << 6; /* get 4 bits */
2044 if (((byte
= *source
++) >> 6) != 2)
2046 ch
+= (byte
& 0x3F); ch
<<= 6; /* get 6 bits */
2047 if (((byte
= *source
++) >> 6) != 2)
2052 return (EINVAL
); /* malformed sequence */
2055 ch
+= (byte
& 0x3F); /* get last 6 bits */
2057 if (target
>= targetEnd
)
2060 *target
++ = SWAP_BE16 (ch
);
2064 *charcount
= target
- unibuf
;
2070 * Derive the encoding hint for the given name.
2073 getencodinghint(unsigned char *name
)
2076 size_t buflen
= sizeof(int);
2080 if (getvfsbyname("hfs", &vfc
) < 0)
2084 mib
[1] = vfc
.vfc_typenum
;
2085 mib
[2] = HFS_ENCODINGHINT
;
2087 if (sysctl(mib
, 3, &hint
, &buflen
, name
, strlen((char *)name
) + 1) < 0)
2091 hint
= GetDefaultEncoding();
2096 /* Generate Volume UUID - similar to code existing in hfs_util */
2097 void GenerateVolumeUUID(VolumeUUID
*newVolumeID
) {
2099 char randomInputBuffer
[26];
2100 unsigned char digest
[20];
2105 char sysctlstring
[128];
2107 double sysloadavg
[3];
2108 struct vmtotal sysvmtotal
;
2111 /* Initialize the SHA-1 context for processing: */
2112 SHA1_Init(&context
);
2114 /* Now process successive bits of "random" input to seed the process: */
2116 /* The current system's uptime: */
2118 SHA1_Update(&context
, &uptime
, sizeof(uptime
));
2120 /* The kernel's boot time: */
2122 mib
[1] = KERN_BOOTTIME
;
2123 datalen
= sizeof(sysdata
);
2124 sysctl(mib
, 2, &sysdata
, &datalen
, NULL
, 0);
2125 SHA1_Update(&context
, &sysdata
, datalen
);
2127 /* The system's host id: */
2129 mib
[1] = KERN_HOSTID
;
2130 datalen
= sizeof(sysdata
);
2131 sysctl(mib
, 2, &sysdata
, &datalen
, NULL
, 0);
2132 SHA1_Update(&context
, &sysdata
, datalen
);
2134 /* The system's host name: */
2136 mib
[1] = KERN_HOSTNAME
;
2137 datalen
= sizeof(sysctlstring
);
2138 sysctl(mib
, 2, sysctlstring
, &datalen
, NULL
, 0);
2139 SHA1_Update(&context
, sysctlstring
, datalen
);
2141 /* The running kernel's OS release string: */
2143 mib
[1] = KERN_OSRELEASE
;
2144 datalen
= sizeof(sysctlstring
);
2145 sysctl(mib
, 2, sysctlstring
, &datalen
, NULL
, 0);
2146 SHA1_Update(&context
, sysctlstring
, datalen
);
2148 /* The running kernel's version string: */
2150 mib
[1] = KERN_VERSION
;
2151 datalen
= sizeof(sysctlstring
);
2152 sysctl(mib
, 2, sysctlstring
, &datalen
, NULL
, 0);
2153 SHA1_Update(&context
, sysctlstring
, datalen
);
2155 /* The system's load average: */
2156 datalen
= sizeof(sysloadavg
);
2157 getloadavg(sysloadavg
, 3);
2158 SHA1_Update(&context
, &sysloadavg
, datalen
);
2160 /* The system's VM statistics: */
2163 datalen
= sizeof(sysvmtotal
);
2164 sysctl(mib
, 2, &sysvmtotal
, &datalen
, NULL
, 0);
2165 SHA1_Update(&context
, &sysvmtotal
, datalen
);
2167 /* The current GMT (26 ASCII characters): */
2169 strncpy(randomInputBuffer
, asctime(gmtime(&now
)), 26); /* "Mon Mar 27 13:46:26 2000" */
2170 SHA1_Update(&context
, randomInputBuffer
, 26);
2172 /* Pad the accumulated input and extract the final digest hash: */
2173 SHA1_Final(digest
, &context
);
2175 memcpy(newVolumeID
, digest
, sizeof(*newVolumeID
));
2176 } while ((newVolumeID
->v
.high
== 0) || (newVolumeID
->v
.low
== 0));