1 /* Copyright © 2017-2018 Apple Inc. All rights reserved.
6 * Created by Or Haimovich on 18/3/18.
13 #include <mach/mach.h>
17 #include "lf_hfs_locks.h"
18 #include "lf_hfs_format.h"
20 #include "lf_hfs_endian.h"
21 #include "lf_hfs_logger.h"
22 #include "lf_hfs_mount.h"
23 #include "lf_hfs_utils.h"
24 #include "lf_hfs_logger.h"
25 #include "lf_hfs_raw_read_write.h"
26 #include "lf_hfs_vfsutils.h"
27 #include "lf_hfs_vfsops.h"
28 #include "lf_hfs_file_mgr_internal.h"
29 #include "lf_hfs_btrees_internal.h"
30 #include "lf_hfs_format.h"
31 #include "lf_hfs_file_extent_mapping.h"
32 #include "lf_hfs_sbunicode.h"
33 #include "lf_hfs_xattr.h"
34 #include "lf_hfs_unicode_wrappers.h"
35 #include "lf_hfs_link.h"
36 #include "lf_hfs_btree.h"
37 #include "lf_hfs_journal.h"
39 static int hfs_late_journal_init(struct hfsmount
*hfsmp
, HFSPlusVolumeHeader
*vhp
, void *_args
);
40 u_int32_t
GetFileInfo(ExtendedVCB
*vcb
, const char *name
,
41 struct cat_attr
*fattr
, struct cat_fork
*forkinfo
);
43 //*******************************************************************************
44 // Routine: hfs_MountHFSVolume
47 //*******************************************************************************
48 unsigned char hfs_catname
[] = "Catalog B-tree";
49 unsigned char hfs_extname
[] = "Extents B-tree";
50 unsigned char hfs_vbmname
[] = "Volume Bitmap";
51 unsigned char hfs_attrname
[] = "Attribute B-tree";
52 unsigned char hfs_startupname
[] = "Startup File";
54 //*******************************************************************************
56 // Sanity check Volume Header Block:
57 // Input argument *vhp is a pointer to a HFSPlusVolumeHeader block that has
58 // not been endian-swapped and represents the on-disk contents of this sector.
59 // This routine will not change the endianness of vhp block.
61 //*******************************************************************************
62 int hfs_ValidateHFSPlusVolumeHeader(struct hfsmount
*hfsmp
, HFSPlusVolumeHeader
*vhp
)
64 u_int16_t signature
= SWAP_BE16(vhp
->signature
);
65 u_int16_t hfs_version
= SWAP_BE16(vhp
->version
);
67 if (signature
== kHFSPlusSigWord
)
69 if (hfs_version
!= kHFSPlusVersion
)
71 LFHFS_LOG(LEVEL_ERROR
, "hfs_ValidateHFSPlusVolumeHeader: invalid HFS+ version: %x\n", hfs_version
);
75 } else if (signature
== kHFSXSigWord
)
77 if (hfs_version
!= kHFSXVersion
)
79 LFHFS_LOG(LEVEL_ERROR
, "hfs_ValidateHFSPlusVolumeHeader: invalid HFSX version: %x\n", hfs_version
);
84 /* Removed printf for invalid HFS+ signature because it gives
85 * false error for UFS root volume
87 LFHFS_LOG(LEVEL_DEBUG
, "hfs_ValidateHFSPlusVolumeHeader: unknown Volume Signature : %x\n", signature
);
91 /* Block size must be at least 512 and a power of 2 */
92 u_int32_t blockSize
= SWAP_BE32(vhp
->blockSize
);
93 if (blockSize
< 512 || !powerof2(blockSize
))
95 LFHFS_LOG(LEVEL_DEBUG
, "hfs_ValidateHFSPlusVolumeHeader: invalid blocksize (%d) \n", blockSize
);
99 if (blockSize
< hfsmp
->hfs_logical_block_size
)
101 LFHFS_LOG(LEVEL_DEBUG
, "hfs_ValidateHFSPlusVolumeHeader: invalid physical blocksize (%d), hfs_logical_blocksize (%d) \n",
102 blockSize
, hfsmp
->hfs_logical_block_size
);
108 //*******************************************************************************
109 // Routine: hfs_MountHFSPlusVolume
112 //*******************************************************************************
114 int hfs_CollectBtreeStats(struct hfsmount
*hfsmp
, HFSPlusVolumeHeader
*vhp
, off_t embeddedOffset
, void *args
)
117 register ExtendedVCB
*vcb
= HFSTOVCB(hfsmp
);
118 u_int32_t blockSize
; blockSize
= SWAP_BE32(vhp
->blockSize
);
121 * pull in the volume UUID while we are still single-threaded.
122 * This brings the volume UUID into the cached one dangling off of the HFSMP
123 * Otherwise it would have to be computed on first access.
126 hfs_getvoluuid (hfsmp
, throwaway
);
129 * We now always initiate a full bitmap scan even if the volume is read-only because this is
130 * our only shot to do I/Os of dramaticallly different sizes than what the buffer cache ordinarily
131 * expects. TRIMs will not be delivered to the underlying media if the volume is not
136 hfs_scan_blocks(hfsmp
);
138 if (hfsmp
->jnl
&& (hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0)
140 hfs_flushvolumeheader(hfsmp
, 0);
143 /* kHFSHasFolderCount is only supported/updated on HFSX volumes */
144 if ((hfsmp
->hfs_flags
& HFS_X
) != 0)
146 hfsmp
->hfs_flags
|= HFS_FOLDERCOUNT
;
149 // Check if we need to do late journal initialization. This only
150 // happens if a previous version of MacOS X (or 9) touched the disk.
151 // In that case hfs_late_journal_init() will go re-locate the journal
152 // and journal_info_block files and validate that they're still kosher.
153 if ( (vcb
->vcbAtrb
& kHFSVolumeJournaledMask
) &&
154 (SWAP_BE32(vhp
->lastMountedVersion
) != kHFSJMountVersion
) &&
155 (hfsmp
->jnl
== NULL
))
158 retval
= hfs_late_journal_init(hfsmp
, vhp
, args
);
163 // EROFS is a special error code that means the volume has an external
164 // journal which we couldn't find. in that case we do not want to
165 // rewrite the volume header - we'll just refuse to mount the volume.
166 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: hfs_late_journal_init returned (%d), maybe an external jnl?\n", retval
);
173 // if the journal failed to open, then set the lastMountedVersion
174 // to be "FSK!" which fsck_hfs will see and force the fsck instead
175 // of just bailing out because the volume is journaled.
176 if (!(hfsmp
->hfs_flags
& HFS_READ_ONLY
))
178 hfsmp
->hfs_flags
|= HFS_NEED_JNL_RESET
;
180 uint64_t mdb_offset
= (uint64_t)((embeddedOffset
/ blockSize
) + HFS_PRI_SECTOR(blockSize
));
182 void *pvBuffer
= hfs_malloc(hfsmp
->hfs_physical_block_size
);
183 if (pvBuffer
== NULL
)
189 retval
= raw_readwrite_read_mount( hfsmp
->hfs_devvp
, HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
), hfsmp
->hfs_physical_block_size
, pvBuffer
, hfsmp
->hfs_physical_block_size
, NULL
, NULL
);
192 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: JNL header raw_readwrite_read_mount failed with %d\n", retval
);
197 HFSPlusVolumeHeader
*jvhp
= (HFSPlusVolumeHeader
*)(pvBuffer
+ HFS_PRI_OFFSET(hfsmp
->hfs_physical_block_size
));
199 if (SWAP_BE16(jvhp
->signature
) == kHFSPlusSigWord
|| SWAP_BE16(jvhp
->signature
) == kHFSXSigWord
)
201 LFHFS_LOG(LEVEL_ERROR
, "hfs_MountHFSPlusVolume: Journal replay fail. Writing lastMountVersion as FSK!\n");
202 jvhp
->lastMountedVersion
= SWAP_BE32(kFSKMountVersion
);
204 retval
= raw_readwrite_write_mount( hfsmp
->hfs_devvp
, HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
), hfsmp
->hfs_physical_block_size
, pvBuffer
, hfsmp
->hfs_physical_block_size
, NULL
, NULL
);
208 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: JNL header raw_readwrite_write_mount failed with %d\n", retval
);
218 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: hfs_late_journal_init returned (%d)\n", retval
);
224 hfsmp
->hfs_mp
->mnt_flag
|= MNT_JOURNALED
;
227 else if (hfsmp
->jnl
|| ((vcb
->vcbAtrb
& kHFSVolumeJournaledMask
) && (hfsmp
->hfs_flags
& HFS_READ_ONLY
)))
229 struct cat_attr jinfo_attr
, jnl_attr
;
230 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
)
232 vcb
->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
235 // if we're here we need to fill in the fileid's for the
236 // journal and journal_info_block.
237 hfsmp
->hfs_jnlinfoblkid
= GetFileInfo(vcb
, ".journal_info_block", &jinfo_attr
, NULL
);
238 hfsmp
->hfs_jnlfileid
= GetFileInfo(vcb
, ".journal", &jnl_attr
, NULL
);
239 if (hfsmp
->hfs_jnlinfoblkid
== 0 || hfsmp
->hfs_jnlfileid
== 0)
241 LFHFS_LOG(LEVEL_DEFAULT
, "hfs_MountHFSPlusVolume: danger! couldn't find the file-id's for the journal or journal_info_block\n");
242 LFHFS_LOG(LEVEL_DEFAULT
, "hfs_MountHFSPlusVolume: jnlfileid %llu, jnlinfoblkid %llu\n", hfsmp
->hfs_jnlfileid
, hfsmp
->hfs_jnlinfoblkid
);
245 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
)
247 vcb
->vcbAtrb
|= kHFSVolumeJournaledMask
;
250 if (hfsmp
->jnl
== NULL
)
252 hfsmp
->hfs_mp
->mnt_flag
&= ~(u_int64_t
)((unsigned int)MNT_JOURNALED
);
256 if ( !(vcb
->vcbAtrb
& kHFSVolumeHardwareLockMask
) ) // if the disk is not write protected
258 MarkVCBDirty( vcb
); // mark VCB dirty so it will be written
262 * Distinguish 3 potential cases involving content protection:
263 * 1. mount point bit set; vcbAtrb does not support it. Fail.
264 * 2. mount point bit set; vcbattrb supports it. we're good.
265 * 3. mount point bit not set; vcbatrb supports it, turn bit on, then good.
267 if (hfsmp
->hfs_mp
->mnt_flag
& MNT_CPROTECT
)
269 /* Does the mount point support it ? */
270 if ((vcb
->vcbAtrb
& kHFSContentProtectionMask
) == 0)
279 /* not requested in the mount point. Is it in FS? */
280 if (vcb
->vcbAtrb
& kHFSContentProtectionMask
)
283 hfsmp
->hfs_mp
->mnt_flag
|= MNT_CPROTECT
;
287 #if LF_HFS_CHECK_UNMAPPED // TBD:
289 * Establish a metadata allocation zone.
291 hfs_metadatazone_init(hfsmp
, false);
295 * Make any metadata zone adjustments.
297 if (hfsmp
->hfs_flags
& HFS_METADATA_ZONE
)
299 /* Keep the roving allocator out of the metadata zone. */
300 if (vcb
->nextAllocation
>= hfsmp
->hfs_metazone_start
&&
301 vcb
->nextAllocation
<= hfsmp
->hfs_metazone_end
)
303 HFS_UPDATE_NEXT_ALLOCATION(hfsmp
, hfsmp
->hfs_metazone_end
+ 1);
309 if (vcb
->nextAllocation
<= 1)
311 vcb
->nextAllocation
= hfsmp
->hfs_min_alloc_start
;
314 vcb
->sparseAllocation
= hfsmp
->hfs_min_alloc_start
;
316 /* Setup private/hidden directories for hardlinks. */
317 hfs_privatedir_init(hfsmp
, FILE_HARDLINKS
);
318 hfs_privatedir_init(hfsmp
, DIR_HARDLINKS
);
320 hfs_remove_orphans(hfsmp
);
322 /* See if we need to erase unused Catalog nodes due to <rdar://problem/6947811>. */
323 retval
= hfs_erase_unused_nodes(hfsmp
);
326 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: hfs_erase_unused_nodes returned (%d) for %s \n", retval
, hfsmp
->vcbVN
);
330 /* Enable extent-based extended attributes by default */
331 hfsmp
->hfs_flags
|= HFS_XATTR_EXTENTS
;
337 * A fatal error occurred and the volume cannot be mounted, so
338 * release any resources that we acquired...
342 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: encountered error (%d)\n", retval
);
348 int hfs_MountHFSPlusVolume(struct hfsmount
*hfsmp
, HFSPlusVolumeHeader
*vhp
, off_t embeddedOffset
, u_int64_t disksize
, bool bFailForDirty
)
352 register ExtendedVCB
*vcb
;
353 struct cat_desc cndesc
;
354 struct cat_attr cnattr
;
355 struct cat_fork cfork
;
357 uint64_t spare_sectors
;
358 int newvnode_flags
= 0;
361 u_int16_t signature
= SWAP_BE16(vhp
->signature
);
363 retval
= hfs_ValidateHFSPlusVolumeHeader(hfsmp
, vhp
);
367 if (signature
== kHFSXSigWord
)
369 /* The in-memory signature is always 'H+'. */
370 signature
= kHFSPlusSigWord
;
371 hfsmp
->hfs_flags
|= HFS_X
;
374 blockSize
= SWAP_BE32(vhp
->blockSize
);
375 /* don't mount a writable volume if its dirty, it must be cleaned by fsck_hfs */
376 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0 &&
377 hfsmp
->jnl
== NULL
&&
378 (SWAP_BE32(vhp
->attributes
) & kHFSVolumeUnmountedMask
) == 0 &&
381 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: cannot mount dirty non-journaled volumes\n");
385 /* Make sure we can live with the physical block size. */
386 if ((disksize
& (hfsmp
->hfs_logical_block_size
- 1)) ||
387 (embeddedOffset
& (hfsmp
->hfs_logical_block_size
- 1)))
389 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: hfs_logical_blocksize (%d) \n",hfsmp
->hfs_logical_block_size
);
394 * If allocation block size is less than the physical block size,
395 * same data could be cached in two places and leads to corruption.
397 * HFS Plus reserves one allocation block for the Volume Header.
398 * If the physical size is larger, then when we read the volume header,
399 * we will also end up reading in the next allocation block(s).
400 * If those other allocation block(s) is/are modified, and then the volume
401 * header is modified, the write of the volume header's buffer will write
402 * out the old contents of the other allocation blocks.
404 * We assume that the physical block size is same as logical block size.
405 * The physical block size value is used to round down the offsets for
406 * reading and writing the primary and alternate volume headers.
408 * The same logic to ensure good hfs_physical_block_size is also in
409 * hfs_mountfs so that hfs_mountfs, hfs_MountHFSPlusVolume and
410 * later are doing the I/Os using same block size.
412 if (blockSize
< hfsmp
->hfs_physical_block_size
)
414 hfsmp
->hfs_physical_block_size
= hfsmp
->hfs_logical_block_size
;
415 hfsmp
->hfs_log_per_phys
= 1;
419 * The VolumeHeader seems OK: transfer info from it into VCB
420 * Note - the VCB starts out clear (all zeros)
422 vcb
= HFSTOVCB(hfsmp
);
424 vcb
->vcbSigWord
= signature
;
425 vcb
->vcbJinfoBlock
= SWAP_BE32(vhp
->journalInfoBlock
);
426 vcb
->vcbLsMod
= to_bsd_time(SWAP_BE32(vhp
->modifyDate
));
427 vcb
->vcbAtrb
= SWAP_BE32(vhp
->attributes
);
428 vcb
->vcbClpSiz
= SWAP_BE32(vhp
->rsrcClumpSize
);
429 vcb
->vcbNxtCNID
= SWAP_BE32(vhp
->nextCatalogID
);
430 vcb
->vcbVolBkUp
= to_bsd_time(SWAP_BE32(vhp
->backupDate
));
431 vcb
->vcbWrCnt
= SWAP_BE32(vhp
->writeCount
);
432 vcb
->vcbFilCnt
= SWAP_BE32(vhp
->fileCount
);
433 vcb
->vcbDirCnt
= SWAP_BE32(vhp
->folderCount
);
435 /* copy 32 bytes of Finder info */
436 bcopy(vhp
->finderInfo
, vcb
->vcbFndrInfo
, sizeof(vhp
->finderInfo
));
438 vcb
->vcbAlBlSt
= 0; /* hfs+ allocation blocks start at first block of volume */
439 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) == 0)
441 vcb
->vcbWrCnt
++; /* compensate for write of Volume Header on last flush */
444 /* Now fill in the Extended VCB info */
445 vcb
->nextAllocation
= SWAP_BE32(vhp
->nextAllocation
);
446 vcb
->totalBlocks
= SWAP_BE32(vhp
->totalBlocks
);
447 vcb
->allocLimit
= vcb
->totalBlocks
;
448 vcb
->freeBlocks
= SWAP_BE32(vhp
->freeBlocks
);
449 vcb
->blockSize
= blockSize
;
450 vcb
->encodingsBitmap
= SWAP_BE64(vhp
->encodingsBitmap
);
451 vcb
->localCreateDate
= SWAP_BE32(vhp
->createDate
);
453 vcb
->hfsPlusIOPosOffset
= (uint32_t) embeddedOffset
;
455 /* Default to no free block reserve */
456 vcb
->reserveBlocks
= 0;
459 * Update the logical block size in the mount struct
460 * (currently set up from the wrapper MDB) using the
461 * new blocksize value:
463 hfsmp
->hfs_logBlockSize
= BestBlockSizeFit(vcb
->blockSize
, MAXBSIZE
, hfsmp
->hfs_logical_block_size
);
464 vcb
->vcbVBMIOSize
= MIN(vcb
->blockSize
, MAXPHYSIO
);
467 * Validate and initialize the location of the alternate volume header.
469 * Note that there may be spare sectors beyond the end of the filesystem that still
470 * belong to our partition.
472 spare_sectors
= hfsmp
->hfs_logical_block_count
- (((uint64_t)vcb
->totalBlocks
* blockSize
) / hfsmp
->hfs_logical_block_size
);
475 * Differentiate between "innocuous" spare sectors and the more unusual
478 * *** Innocuous spare sectors exist if:
480 * A) the number of bytes assigned to the partition (by multiplying logical
481 * block size * logical block count) is greater than the filesystem size
482 * (by multiplying allocation block count and allocation block size)
486 * B) the remainder is less than the size of a full allocation block's worth of bytes.
488 * This handles the normal case where there may be a few extra sectors, but the two
489 * are fundamentally in sync.
491 * *** Degenerate spare sectors exist if:
492 * A) The number of bytes assigned to the partition (by multiplying logical
493 * block size * logical block count) is greater than the filesystem size
494 * (by multiplying allocation block count and block size).
498 * B) the remainder is greater than a full allocation's block worth of bytes.
499 * In this case, a smaller file system exists in a larger partition.
500 * This can happen in various ways, including when volume is resized but the
501 * partition is yet to be resized. Under this condition, we have to assume that
502 * a partition management software may resize the partition to match
503 * the file system size in the future. Therefore we should update
504 * alternate volume header at two locations on the disk,
505 * a. 1024 bytes before end of the partition
506 * b. 1024 bytes before end of the file system
509 if (spare_sectors
> (uint64_t)(blockSize
/ hfsmp
->hfs_logical_block_size
))
512 * Handle the degenerate case above. FS < partition size.
513 * AVH located at 1024 bytes from the end of the partition
515 hfsmp
->hfs_partition_avh_sector
= (hfsmp
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) + HFS_ALT_SECTOR(hfsmp
->hfs_logical_block_size
, hfsmp
->hfs_logical_block_count
);
517 /* AVH located at 1024 bytes from the end of the filesystem */
518 hfsmp
->hfs_fs_avh_sector
= (hfsmp
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) + HFS_ALT_SECTOR(hfsmp
->hfs_logical_block_size
, (((uint64_t)vcb
->totalBlocks
* blockSize
) / hfsmp
->hfs_logical_block_size
));
522 /* Innocuous spare sectors; Partition & FS notion are in sync */
523 hfsmp
->hfs_partition_avh_sector
= (hfsmp
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
) + HFS_ALT_SECTOR(hfsmp
->hfs_logical_block_size
, hfsmp
->hfs_logical_block_count
);
525 hfsmp
->hfs_fs_avh_sector
= hfsmp
->hfs_partition_avh_sector
;
528 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: partition_avh_sector=%qu, fs_avh_sector=%qu\n", hfsmp
->hfs_partition_avh_sector
, hfsmp
->hfs_fs_avh_sector
);
530 bzero(&cndesc
, sizeof(cndesc
));
531 cndesc
.cd_parentcnid
= kHFSRootParentID
;
532 cndesc
.cd_flags
|= CD_ISMETA
;
533 bzero(&cnattr
, sizeof(cnattr
));
534 cnattr
.ca_linkcount
= 1;
535 cnattr
.ca_mode
= S_IFREG
;
538 * Set up Extents B-tree vnode
540 cndesc
.cd_nameptr
= hfs_extname
;
541 cndesc
.cd_namelen
= strlen((char *)hfs_extname
);
542 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSExtentsFileID
;
544 cfork
.cf_size
= SWAP_BE64 (vhp
->extentsFile
.logicalSize
);
545 cfork
.cf_new_size
= 0;
546 cfork
.cf_clump
= SWAP_BE32 (vhp
->extentsFile
.clumpSize
);
547 cfork
.cf_blocks
= SWAP_BE32 (vhp
->extentsFile
.totalBlocks
);
548 cfork
.cf_vblocks
= 0;
549 cnattr
.ca_blocks
= cfork
.cf_blocks
;
551 for (int iExtentCounter
= 0; iExtentCounter
< kHFSPlusExtentDensity
; iExtentCounter
++)
553 cfork
.cf_extents
[iExtentCounter
].startBlock
= SWAP_BE32 (vhp
->extentsFile
.extents
[iExtentCounter
].startBlock
);
554 cfork
.cf_extents
[iExtentCounter
].blockCount
= SWAP_BE32 (vhp
->extentsFile
.extents
[iExtentCounter
].blockCount
);
557 retval
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &cfork
, &hfsmp
->hfs_extents_vp
, &newvnode_flags
);
560 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting extentoverflow BT\n", retval
);
564 hfsmp
->hfs_extents_cp
= VTOC(hfsmp
->hfs_extents_vp
);
565 retval
= MacToVFSError(BTOpenPath(VTOF(hfsmp
->hfs_extents_vp
), (KeyCompareProcPtr
) CompareExtentKeysPlus
));
567 hfs_unlock(hfsmp
->hfs_extents_cp
);
571 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: BTOpenPath returned (%d) getting extentoverflow BT\n", retval
);
576 * Set up Catalog B-tree vnode
578 cndesc
.cd_nameptr
= hfs_catname
;
579 cndesc
.cd_namelen
= strlen((char *)hfs_catname
);
580 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSCatalogFileID
;
582 cfork
.cf_size
= SWAP_BE64 (vhp
->catalogFile
.logicalSize
);
583 cfork
.cf_clump
= SWAP_BE32 (vhp
->catalogFile
.clumpSize
);
584 cfork
.cf_blocks
= SWAP_BE32 (vhp
->catalogFile
.totalBlocks
);
585 cfork
.cf_vblocks
= 0;
586 cnattr
.ca_blocks
= cfork
.cf_blocks
;
588 for (int iExtentCounter
= 0; iExtentCounter
< kHFSPlusExtentDensity
; iExtentCounter
++)
590 cfork
.cf_extents
[iExtentCounter
].startBlock
= SWAP_BE32 (vhp
->catalogFile
.extents
[iExtentCounter
].startBlock
);
591 cfork
.cf_extents
[iExtentCounter
].blockCount
= SWAP_BE32 (vhp
->catalogFile
.extents
[iExtentCounter
].blockCount
);
594 retval
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &cfork
, &hfsmp
->hfs_catalog_vp
, &newvnode_flags
);
597 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting catalog BT\n", retval
);
600 hfsmp
->hfs_catalog_cp
= VTOC(hfsmp
->hfs_catalog_vp
);
601 retval
= MacToVFSError(BTOpenPath(VTOF(hfsmp
->hfs_catalog_vp
), (KeyCompareProcPtr
) CompareExtendedCatalogKeys
));
605 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: BTOpenPath returned (%d) getting catalog BT\n", retval
);
606 hfs_unlock(hfsmp
->hfs_catalog_cp
);
610 if ((hfsmp
->hfs_flags
& HFS_X
) &&
611 BTGetInformation(VTOF(hfsmp
->hfs_catalog_vp
), 0, &btinfo
) == 0)
613 if (btinfo
.keyCompareType
== kHFSBinaryCompare
)
615 hfsmp
->hfs_flags
|= HFS_CASE_SENSITIVE
;
616 /* Install a case-sensitive key compare */
617 (void) BTOpenPath(VTOF(hfsmp
->hfs_catalog_vp
), (KeyCompareProcPtr
)cat_binarykeycompare
);
621 hfs_unlock(hfsmp
->hfs_catalog_cp
);
624 * Set up Allocation file vnode
626 cndesc
.cd_nameptr
= hfs_vbmname
;
627 cndesc
.cd_namelen
= strlen((char *)hfs_vbmname
);
628 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSAllocationFileID
;
630 cfork
.cf_size
= SWAP_BE64 (vhp
->allocationFile
.logicalSize
);
631 cfork
.cf_clump
= SWAP_BE32 (vhp
->allocationFile
.clumpSize
);
632 cfork
.cf_blocks
= SWAP_BE32 (vhp
->allocationFile
.totalBlocks
);
633 cfork
.cf_vblocks
= 0;
634 cnattr
.ca_blocks
= cfork
.cf_blocks
;
636 for (int iExtentCounter
= 0; iExtentCounter
< kHFSPlusExtentDensity
; iExtentCounter
++)
638 cfork
.cf_extents
[iExtentCounter
].startBlock
= SWAP_BE32 (vhp
->allocationFile
.extents
[iExtentCounter
].startBlock
);
639 cfork
.cf_extents
[iExtentCounter
].blockCount
= SWAP_BE32 (vhp
->allocationFile
.extents
[iExtentCounter
].blockCount
);
642 retval
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &cfork
, &hfsmp
->hfs_allocation_vp
, &newvnode_flags
);
645 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting bitmap\n", retval
);
648 hfsmp
->hfs_allocation_cp
= VTOC(hfsmp
->hfs_allocation_vp
);
649 hfs_unlock(hfsmp
->hfs_allocation_cp
);
652 * Set up Attribute B-tree vnode
654 if (vhp
->attributesFile
.totalBlocks
!= 0) {
655 cndesc
.cd_nameptr
= hfs_attrname
;
656 cndesc
.cd_namelen
= strlen((char *)hfs_attrname
);
657 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSAttributesFileID
;
659 cfork
.cf_size
= SWAP_BE64 (vhp
->attributesFile
.logicalSize
);
660 cfork
.cf_clump
= SWAP_BE32 (vhp
->attributesFile
.clumpSize
);
661 cfork
.cf_blocks
= SWAP_BE32 (vhp
->attributesFile
.totalBlocks
);
662 cfork
.cf_vblocks
= 0;
663 cnattr
.ca_blocks
= cfork
.cf_blocks
;
665 for (int iExtentCounter
= 0; iExtentCounter
< kHFSPlusExtentDensity
; iExtentCounter
++)
667 cfork
.cf_extents
[iExtentCounter
].startBlock
= SWAP_BE32 (vhp
->attributesFile
.extents
[iExtentCounter
].startBlock
);
668 cfork
.cf_extents
[iExtentCounter
].blockCount
= SWAP_BE32 (vhp
->attributesFile
.extents
[iExtentCounter
].blockCount
);
670 retval
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &cfork
, &hfsmp
->hfs_attribute_vp
, &newvnode_flags
);
673 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting EA BT\n", retval
);
676 hfsmp
->hfs_attribute_cp
= VTOC(hfsmp
->hfs_attribute_vp
);
678 retval
= MacToVFSError(BTOpenPath(VTOF(hfsmp
->hfs_attribute_vp
),(KeyCompareProcPtr
) hfs_attrkeycompare
));
679 hfs_unlock(hfsmp
->hfs_attribute_cp
);
682 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: BTOpenPath returned (%d) getting EA BT\n", retval
);
686 /* Initialize vnode for virtual attribute data file that spans the
687 * entire file system space for performing I/O to attribute btree
688 * We hold iocount on the attrdata vnode for the entire duration
689 * of mount (similar to btree vnodes)
691 retval
= init_attrdata_vnode(hfsmp
);
694 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: init_attrdata_vnode returned (%d) for virtual EA file\n", retval
);
700 * Set up Startup file vnode
702 if (vhp
->startupFile
.totalBlocks
!= 0) {
703 cndesc
.cd_nameptr
= hfs_startupname
;
704 cndesc
.cd_namelen
= strlen((char *)hfs_startupname
);
705 cndesc
.cd_cnid
= cnattr
.ca_fileid
= kHFSStartupFileID
;
707 cfork
.cf_size
= SWAP_BE64 (vhp
->startupFile
.logicalSize
);
708 cfork
.cf_clump
= SWAP_BE32 (vhp
->startupFile
.clumpSize
);
709 cfork
.cf_blocks
= SWAP_BE32 (vhp
->startupFile
.totalBlocks
);
710 cfork
.cf_vblocks
= 0;
711 cnattr
.ca_blocks
= cfork
.cf_blocks
;
712 for (int iExtentCounter
= 0; iExtentCounter
< kHFSPlusExtentDensity
; iExtentCounter
++)
714 cfork
.cf_extents
[iExtentCounter
].startBlock
= SWAP_BE32 (vhp
->startupFile
.extents
[iExtentCounter
].startBlock
);
715 cfork
.cf_extents
[iExtentCounter
].blockCount
= SWAP_BE32 (vhp
->startupFile
.extents
[iExtentCounter
].blockCount
);
718 retval
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
, &cfork
, &hfsmp
->hfs_startup_vp
, &newvnode_flags
);
721 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: hfs_getnewvnode returned (%d) getting startup file\n", retval
);
724 hfsmp
->hfs_startup_cp
= VTOC(hfsmp
->hfs_startup_vp
);
725 hfs_unlock(hfsmp
->hfs_startup_cp
);
729 * Pick up volume name and create date
731 * Acquiring the volume name should not manipulate the bitmap, only the catalog
732 * btree and possibly the extents overflow b-tree.
734 retval
= cat_idlookup(hfsmp
, kHFSRootFolderID
, 0, 0, &cndesc
, &cnattr
, NULL
);
737 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: cat_idlookup returned (%d) getting rootfolder \n", retval
);
740 vcb
->hfs_itime
= cnattr
.ca_itime
;
741 vcb
->volumeNameEncodingHint
= cndesc
.cd_encoding
;
742 bcopy(cndesc
.cd_nameptr
, vcb
->vcbVN
, min(255, cndesc
.cd_namelen
));
743 cat_releasedesc(&cndesc
);
749 * A fatal error occurred and the volume cannot be mounted, so
750 * release any resources that we acquired...
754 LFHFS_LOG(LEVEL_DEBUG
, "hfs_MountHFSPlusVolume: encountered error (%d)\n", retval
);
759 u_int32_t
BestBlockSizeFit(u_int32_t allocationBlockSize
, u_int32_t blockSizeLimit
, u_int32_t baseMultiple
) {
761 Compute the optimal (largest) block size (no larger than allocationBlockSize) that is less than the
762 specified limit but still an even multiple of the baseMultiple.
764 int baseBlockCount
, blockCount
;
765 u_int32_t trialBlockSize
;
767 if (allocationBlockSize
% baseMultiple
!= 0) {
769 Whoops: the allocation blocks aren't even multiples of the specified base:
770 no amount of dividing them into even parts will be a multiple, either then!
772 return 512; /* Hope for the best */
775 /* Try the obvious winner first, to prevent 12K allocation blocks, for instance,
776 from being handled as two 6K logical blocks instead of 3 4K logical blocks.
777 Even though the former (the result of the loop below) is the larger allocation
778 block size, the latter is more efficient: */
779 if (allocationBlockSize
% PAGE_SIZE
== 0) return (u_int32_t
)PAGE_SIZE
;
781 /* No clear winner exists: pick the largest even fraction <= MAXBSIZE: */
782 baseBlockCount
= allocationBlockSize
/ baseMultiple
; /* Now guaranteed to be an even multiple */
784 for (blockCount
= baseBlockCount
; blockCount
> 0; --blockCount
) {
785 trialBlockSize
= blockCount
* baseMultiple
;
786 if (allocationBlockSize
% trialBlockSize
== 0) { /* An even multiple? */
787 if ((trialBlockSize
<= blockSizeLimit
) &&
788 (trialBlockSize
% baseMultiple
== 0)) {
789 return trialBlockSize
;
794 /* Note: we should never get here, since blockCount = 1 should always work,
795 but this is nice and safe and makes the compiler happy, too ... */
800 * Lock the HFS global journal lock
803 hfs_lock_global (struct hfsmount
*hfsmp
, enum hfs_locktype locktype
)
805 pthread_t thread
= pthread_self();
807 if (hfsmp
->hfs_global_lockowner
== thread
) {
808 LFHFS_LOG(LEVEL_ERROR
, "hfs_lock_global: locking against myself!");
812 if (locktype
== HFS_SHARED_LOCK
) {
813 lf_lck_rw_lock_shared (&hfsmp
->hfs_global_lock
);
814 hfsmp
->hfs_global_lockowner
= HFS_SHARED_OWNER
;
817 lf_lck_rw_lock_exclusive (&hfsmp
->hfs_global_lock
);
818 hfsmp
->hfs_global_lockowner
= thread
;
825 * Unlock the HFS global journal lock
828 hfs_unlock_global (struct hfsmount
*hfsmp
)
830 pthread_t thread
= pthread_self();
832 /* HFS_LOCK_EXCLUSIVE */
833 if (hfsmp
->hfs_global_lockowner
== thread
) {
834 hfsmp
->hfs_global_lockowner
= NULL
;
835 lf_lck_rw_unlock_exclusive(&hfsmp
->hfs_global_lock
);
837 /* HFS_LOCK_SHARED */
839 lf_lck_rw_unlock_shared(&hfsmp
->hfs_global_lock
);
844 hfs_start_transaction(struct hfsmount
*hfsmp
)
846 int ret
= 0, unlock_on_err
= 0;
847 pthread_t thread
= pthread_self();
849 #ifdef HFS_CHECK_LOCK_ORDER
851 * You cannot start a transaction while holding a system
852 * file lock. (unless the transaction is nested.)
854 if (hfsmp
->jnl
&& journal_owner(hfsmp
->jnl
) != thread
) {
855 if (hfsmp
->hfs_catalog_cp
&& hfsmp
->hfs_catalog_cp
->c_lockowner
== thread
) {
856 LFHFS_LOG(LEVEL_ERROR
, "hfs_start_transaction: bad lock order (cat before jnl)\n");
859 if (hfsmp
->hfs_attribute_cp
&& hfsmp
->hfs_attribute_cp
->c_lockowner
== thread
) {
860 LFHFS_LOG(LEVEL_ERROR
, "hfs_start_transaction: bad lock order (attr before jnl)\n");
863 if (hfsmp
->hfs_extents_cp
&& hfsmp
->hfs_extents_cp
->c_lockowner
== thread
) {
864 LFHFS_LOG(LEVEL_ERROR
, "hfs_start_transaction: bad lock order (ext before jnl)\n");
868 #endif /* HFS_CHECK_LOCK_ORDER */
873 if (journal_owner(hfsmp
->jnl
) != thread
)
876 * The global lock should be held shared if journal is
877 * active to prevent disabling. If we're not the owner
878 * of the journal lock, verify that we're not already
879 * holding the global lock exclusive before moving on.
881 if (hfsmp
->hfs_global_lockowner
== thread
) {
886 hfs_lock_global (hfsmp
, HFS_SHARED_LOCK
);
888 // Things could have changed
890 hfs_unlock_global(hfsmp
);
899 if (hfsmp
->hfs_global_lockowner
!= thread
) {
900 hfs_lock_global(hfsmp
, HFS_EXCLUSIVE_LOCK
);
902 // Things could have changed
904 hfs_unlock_global(hfsmp
);
908 ExtendedVCB
* vcb
= HFSTOVCB(hfsmp
);
909 if (vcb
->vcbAtrb
& kHFSVolumeUnmountedMask
) {
910 // clear kHFSVolumeUnmountedMask
911 hfs_flushvolumeheader(hfsmp
, HFS_FVH_SKIP_TRANSACTION
);
919 ret
= journal_start_transaction(hfsmp
->jnl
);
927 ++hfsmp
->hfs_transaction_nesting
;
932 if (ret
!= 0 && unlock_on_err
) {
933 hfs_unlock_global (hfsmp
);
940 hfs_end_transaction(struct hfsmount
*hfsmp
)
944 hfs_assert(!hfsmp
->jnl
|| journal_owner(hfsmp
->jnl
) == pthread_self());
945 hfs_assert(hfsmp
->hfs_transaction_nesting
> 0);
947 if (hfsmp
->jnl
&& hfsmp
->hfs_transaction_nesting
== 1)
948 hfs_flushvolumeheader(hfsmp
, HFS_FVH_FLUSH_IF_DIRTY
);
950 bool need_unlock
= !--hfsmp
->hfs_transaction_nesting
;
954 ret
= journal_end_transaction(hfsmp
->jnl
);
962 hfs_unlock_global (hfsmp
);
970 * Flush the contents of the journal to the disk.
972 * - HFS_FLUSH_JOURNAL
973 * Wait to write in-memory journal to the disk consistently.
974 * This means that the journal still contains uncommitted
975 * transactions and the file system metadata blocks in
976 * the journal transactions might be written asynchronously
977 * to the disk. But there is no guarantee that they are
978 * written to the disk before returning to the caller.
979 * Note that this option is sufficient for file system
980 * data integrity as it guarantees consistent journal
981 * content on the disk.
983 * - HFS_FLUSH_JOURNAL_META
984 * Wait to write in-memory journal to the disk
985 * consistently, and also wait to write all asynchronous
986 * metadata blocks to its corresponding locations
987 * consistently on the disk. This is overkill in normal
988 * scenarios but is useful whenever the metadata blocks
989 * are required to be consistent on-disk instead of
990 * just the journalbeing consistent; like before live
991 * verification and live volume resizing. The update of the
992 * metadata doesn't include a barrier of track cache flush.
995 * HFS_FLUSH_JOURNAL + force a track cache flush to media
998 * Force a track cache flush to media.
1000 * - HFS_FLUSH_BARRIER
1001 * Barrier-only flush to ensure write order
1004 errno_t
hfs_flush(struct hfsmount
*hfsmp
, hfs_flush_mode_t mode
) {
1007 dk_synchronize_t sync_req
= { .options
= DK_SYNCHRONIZE_OPTION_BARRIER
};
1010 case HFS_FLUSH_JOURNAL_META
:
1011 // wait for journal, metadata blocks and previous async flush to finish
1012 SET(options
, JOURNAL_WAIT_FOR_IO
);
1016 case HFS_FLUSH_JOURNAL
:
1017 case HFS_FLUSH_JOURNAL_BARRIER
:
1018 case HFS_FLUSH_FULL
:
1020 if (mode
== HFS_FLUSH_JOURNAL_BARRIER
&&
1021 !(hfsmp
->hfs_flags
& HFS_FEATURE_BARRIER
))
1022 mode
= HFS_FLUSH_FULL
;
1024 if (mode
== HFS_FLUSH_FULL
)
1025 SET(options
, JOURNAL_FLUSH_FULL
);
1027 /* Only peek at hfsmp->jnl while holding the global lock */
1028 hfs_lock_global (hfsmp
, HFS_SHARED_LOCK
);
1031 ExtendedVCB
* vcb
= HFSTOVCB(hfsmp
);
1032 if (!(vcb
->vcbAtrb
& kHFSVolumeUnmountedMask
)) {
1033 // Set kHFSVolumeUnmountedMask
1034 hfs_flushvolumeheader(hfsmp
, HFS_FVH_MARK_UNMOUNT
);
1036 error
= journal_flush(hfsmp
->jnl
, options
);
1039 hfs_unlock_global (hfsmp
);
1042 * This may result in a double barrier as
1043 * journal_flush may have issued a barrier itself
1045 if (mode
== HFS_FLUSH_JOURNAL_BARRIER
)
1046 error
= ioctl(hfsmp
->hfs_devvp
->psFSRecord
->iFD
, DKIOCSYNCHRONIZE
, (caddr_t
)&sync_req
);
1049 case HFS_FLUSH_CACHE
:
1051 sync_req
.options
= 0;
1055 case HFS_FLUSH_BARRIER
:
1056 // If barrier only flush doesn't support, fall back to use full flush.
1057 if (!(hfsmp
->hfs_flags
& HFS_FEATURE_BARRIER
))
1058 sync_req
.options
= 0;
1060 error
= ioctl(hfsmp
->hfs_devvp
->psFSRecord
->iFD
, DKIOCSYNCHRONIZE
, (caddr_t
)&sync_req
);
1071 #define MALLOC_TRACER 0
1074 #define MALLOC_TRACER_SIZE 100000
1079 MallocTracer_S gpsMallocTracer
[MALLOC_TRACER_SIZE
];
1080 MallocTracer_S gpsFreeTracer
[MALLOC_TRACER_SIZE
];
1081 uint32_t guIndex
= 0, guOutdex
= 0, guSize
=0, guTotal
= 0;
1082 uint64_t guTotalConsumption
= 0;
1086 hfs_malloc(size_t size
)
1089 panic("Malloc size is 0");
1091 void *pv
= malloc(size
);
1094 gpsMallocTracer
[guIndex
].pv
= pv
;
1095 gpsMallocTracer
[guIndex
].uSize
= (uint32_t)size
;
1096 guIndex
= (guIndex
+1) % MALLOC_TRACER_SIZE
;
1099 guTotalConsumption
+= size
;
1113 gpsFreeTracer
[guOutdex
].pv
= ptr
;
1117 u
= (u
)?(u
-1):(MALLOC_TRACER_SIZE
-1);
1118 if (gpsMallocTracer
[u
].pv
== ptr
) {
1121 bCont
= (guTotal
<MALLOC_TRACER_SIZE
)?(u
):(u
!= guIndex
);
1125 panic("undetectable free");
1128 //gpsFreeTracer[guOutdex].uSize = gpsMallocTracer[u].uSize;
1129 //gpsFreeTracer[guOutdex].uSize = guSize;
1130 gpsFreeTracer
[guOutdex
].uSize
= guIndex
;
1132 guOutdex
= (guOutdex
+1) % MALLOC_TRACER_SIZE
;
1134 guTotalConsumption
-= gpsMallocTracer
[u
].uSize
;
1139 hfs_mallocz(size_t size
)
1141 void *ptr
= hfs_malloc(size
);
1149 * Lock the HFS mount lock
1151 * Note: this is a mutex, not a rw lock!
1154 hfs_lock_mount (struct hfsmount
*hfsmp
)
1156 lf_lck_mtx_lock (&(hfsmp
->hfs_mutex
));
1160 * Unlock the HFS mount lock
1162 * Note: this is a mutex, not a rw lock!
1164 void hfs_unlock_mount (struct hfsmount
*hfsmp
)
1166 lf_lck_mtx_unlock (&(hfsmp
->hfs_mutex
));
1170 * ReleaseMetaFileVNode
1174 static void ReleaseMetaFileVNode(struct vnode
*vp
)
1176 struct filefork
*fp
;
1178 if (vp
&& (fp
= VTOF(vp
)))
1180 if (fp
->fcbBTCBPtr
!= NULL
)
1182 (void)hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
1183 (void) BTClosePath(fp
);
1184 hfs_unlock(VTOC(vp
));
1187 /* release the node even if BTClosePath fails */
1188 hfs_vnop_reclaim(vp
);
1192 /*************************************************************
1194 * Unmounts a hfs volume.
1195 * At this point vflush() has been called (to dump all non-metadata files)
1197 *************************************************************/
1200 hfsUnmount( register struct hfsmount
*hfsmp
)
1203 /* Get rid of our attribute data vnode (if any). This is done
1204 * after the vflush() during mount, so we don't need to worry
1207 if (hfsmp
->hfs_attrdata_vp
) {
1208 ReleaseMetaFileVNode(hfsmp
->hfs_attrdata_vp
);
1209 hfsmp
->hfs_attrdata_vp
= NULL
;
1212 if (hfsmp
->hfs_startup_vp
) {
1213 ReleaseMetaFileVNode(hfsmp
->hfs_startup_vp
);
1214 hfsmp
->hfs_startup_cp
= NULL
;
1215 hfsmp
->hfs_startup_vp
= NULL
;
1218 if (hfsmp
->hfs_attribute_vp
) {
1219 ReleaseMetaFileVNode(hfsmp
->hfs_attribute_vp
);
1220 hfsmp
->hfs_attribute_cp
= NULL
;
1221 hfsmp
->hfs_attribute_vp
= NULL
;
1224 if (hfsmp
->hfs_catalog_vp
) {
1225 ReleaseMetaFileVNode(hfsmp
->hfs_catalog_vp
);
1226 hfsmp
->hfs_catalog_cp
= NULL
;
1227 hfsmp
->hfs_catalog_vp
= NULL
;
1230 if (hfsmp
->hfs_extents_vp
) {
1231 ReleaseMetaFileVNode(hfsmp
->hfs_extents_vp
);
1232 hfsmp
->hfs_extents_cp
= NULL
;
1233 hfsmp
->hfs_extents_vp
= NULL
;
1236 if (hfsmp
->hfs_allocation_vp
) {
1237 ReleaseMetaFileVNode(hfsmp
->hfs_allocation_vp
);
1238 hfsmp
->hfs_allocation_cp
= NULL
;
1239 hfsmp
->hfs_allocation_vp
= NULL
;
1247 * Check to see if a vnode is locked in the current context
1248 * This is to be used for debugging purposes only!!
1250 void RequireFileLock(FileReference vp
, int shareable
)
1254 /* The extents btree and allocation bitmap are always exclusive. */
1255 if (VTOC(vp
)->c_fileid
== kHFSExtentsFileID
||
1256 VTOC(vp
)->c_fileid
== kHFSAllocationFileID
) {
1260 locked
= VTOC(vp
)->c_lockowner
== pthread_self();
1262 if (!locked
&& !shareable
)
1264 switch (VTOC(vp
)->c_fileid
) {
1265 case kHFSExtentsFileID
:
1266 LFHFS_LOG(LEVEL_ERROR
, "RequireFileLock: extents btree not locked! v: 0x%08X\n #\n", (u_int
)vp
);
1268 case kHFSCatalogFileID
:
1269 LFHFS_LOG(LEVEL_ERROR
, "RequireFileLock: catalog btree not locked! v: 0x%08X\n #\n", (u_int
)vp
);
1271 case kHFSAllocationFileID
:
1272 /* The allocation file can hide behind the jornal lock. */
1273 if (VTOHFS(vp
)->jnl
== NULL
)
1275 LFHFS_LOG(LEVEL_ERROR
, "RequireFileLock: allocation file not locked! v: 0x%08X\n #\n", (u_int
)vp
);
1278 case kHFSStartupFileID
:
1279 LFHFS_LOG(LEVEL_ERROR
, "RequireFileLock: startup file not locked! v: 0x%08X\n #\n", (u_int
)vp
);
1281 case kHFSAttributesFileID
:
1282 LFHFS_LOG(LEVEL_ERROR
, "RequireFileLock: attributes btree not locked! v: 0x%08X\n #\n", (u_int
)vp
);
1292 * Test if fork has overflow extents.
1295 * non-zero - overflow extents exist
1296 * zero - overflow extents do not exist
1298 bool overflow_extents(struct filefork
*fp
)
1302 if (fp
->ff_extents
[7].blockCount
== 0)
1305 blocks
= fp
->ff_extents
[0].blockCount
+
1306 fp
->ff_extents
[1].blockCount
+
1307 fp
->ff_extents
[2].blockCount
+
1308 fp
->ff_extents
[3].blockCount
+
1309 fp
->ff_extents
[4].blockCount
+
1310 fp
->ff_extents
[5].blockCount
+
1311 fp
->ff_extents
[6].blockCount
+
1312 fp
->ff_extents
[7].blockCount
;
1314 return fp
->ff_blocks
> blocks
;
1319 * Lock HFS system file(s).
1321 * This function accepts a @flags parameter which indicates which
1322 * system file locks are required. The value it returns should be
1323 * used in a subsequent call to hfs_systemfile_unlock. The caller
1324 * should treat this value as opaque; it may or may not have a
1325 * relation to the @flags field that is passed in. The *only*
1326 * guarantee that we make is that a value of zero means that no locks
1327 * were taken and that there is no need to call hfs_systemfile_unlock
1328 * (although it is harmless to do so). Recursion is supported but
1329 * care must still be taken to ensure correct lock ordering. Note
1330 * that requests for certain locks may cause other locks to also be
1331 * taken, including locks that are not possible to ask for via the
1335 hfs_systemfile_lock(struct hfsmount
*hfsmp
, int flags
, enum hfs_locktype locktype
)
1337 pthread_t thread
= pthread_self();
1340 * Locking order is Catalog file, Attributes file, Startup file, Bitmap file, Extents file
1342 if (flags
& SFL_CATALOG
) {
1343 if (hfsmp
->hfs_catalog_cp
1344 && hfsmp
->hfs_catalog_cp
->c_lockowner
!= thread
) {
1345 #ifdef HFS_CHECK_LOCK_ORDER
1346 if (hfsmp
->hfs_attribute_cp
&& hfsmp
->hfs_attribute_cp
->c_lockowner
== current_thread()) {
1347 LFHFS_LOG(LEVEL_ERROR
, "hfs_systemfile_lock: bad lock order (Attributes before Catalog)");
1350 if (hfsmp
->hfs_startup_cp
&& hfsmp
->hfs_startup_cp
->c_lockowner
== current_thread()) {
1351 LFHFS_LOG(LEVEL_ERROR
, "hfs_systemfile_lock: bad lock order (Startup before Catalog)");
1354 if (hfsmp
-> hfs_extents_cp
&& hfsmp
->hfs_extents_cp
->c_lockowner
== current_thread()) {
1355 LFHFS_LOG(LEVEL_ERROR
, "hfs_systemfile_lock: bad lock order (Extents before Catalog)");
1358 #endif /* HFS_CHECK_LOCK_ORDER */
1360 (void) hfs_lock(hfsmp
->hfs_catalog_cp
, locktype
, HFS_LOCK_DEFAULT
);
1362 * When the catalog file has overflow extents then
1363 * also acquire the extents b-tree lock if its not
1364 * already requested.
1366 if (((flags
& SFL_EXTENTS
) == 0) &&
1367 (hfsmp
->hfs_catalog_vp
!= NULL
) &&
1368 (overflow_extents(VTOF(hfsmp
->hfs_catalog_vp
)))) {
1369 flags
|= SFL_EXTENTS
;
1372 flags
&= ~SFL_CATALOG
;
1376 if (flags
& SFL_ATTRIBUTE
) {
1377 if (hfsmp
->hfs_attribute_cp
1378 && hfsmp
->hfs_attribute_cp
->c_lockowner
!= thread
) {
1379 #ifdef HFS_CHECK_LOCK_ORDER
1380 if (hfsmp
->hfs_startup_cp
&& hfsmp
->hfs_startup_cp
->c_lockowner
== current_thread()) {
1381 LFHFS_LOG(LEVEL_ERROR
, "hfs_systemfile_lock: bad lock order (Startup before Attributes)");
1384 if (hfsmp
->hfs_extents_cp
&& hfsmp
->hfs_extents_cp
->c_lockowner
== current_thread()) {
1385 LFHFS_LOG(LEVEL_ERROR
, "hfs_systemfile_lock: bad lock order (Extents before Attributes)");
1388 #endif /* HFS_CHECK_LOCK_ORDER */
1390 (void) hfs_lock(hfsmp
->hfs_attribute_cp
, locktype
, HFS_LOCK_DEFAULT
);
1392 * When the attribute file has overflow extents then
1393 * also acquire the extents b-tree lock if its not
1394 * already requested.
1396 if (((flags
& SFL_EXTENTS
) == 0) &&
1397 (hfsmp
->hfs_attribute_vp
!= NULL
) &&
1398 (overflow_extents(VTOF(hfsmp
->hfs_attribute_vp
)))) {
1399 flags
|= SFL_EXTENTS
;
1402 flags
&= ~SFL_ATTRIBUTE
;
1406 if (flags
& SFL_STARTUP
) {
1407 if (hfsmp
->hfs_startup_cp
1408 && hfsmp
->hfs_startup_cp
->c_lockowner
!= thread
) {
1409 #ifdef HFS_CHECK_LOCK_ORDER
1410 if (hfsmp
-> hfs_extents_cp
&& hfsmp
->hfs_extents_cp
->c_lockowner
== current_thread()) {
1411 LFHFS_LOG(LEVEL_ERROR
, "hfs_systemfile_lock: bad lock order (Extents before Startup)");
1414 #endif /* HFS_CHECK_LOCK_ORDER */
1416 (void) hfs_lock(hfsmp
->hfs_startup_cp
, locktype
, HFS_LOCK_DEFAULT
);
1418 * When the startup file has overflow extents then
1419 * also acquire the extents b-tree lock if its not
1420 * already requested.
1422 if (((flags
& SFL_EXTENTS
) == 0) &&
1423 (hfsmp
->hfs_startup_vp
!= NULL
) &&
1424 (overflow_extents(VTOF(hfsmp
->hfs_startup_vp
)))) {
1425 flags
|= SFL_EXTENTS
;
1428 flags
&= ~SFL_STARTUP
;
1433 * To prevent locks being taken in the wrong order, the extent lock
1434 * gets a bitmap lock as well.
1436 if (flags
& (SFL_BITMAP
| SFL_EXTENTS
)) {
1437 if (hfsmp
->hfs_allocation_cp
) {
1438 (void) hfs_lock(hfsmp
->hfs_allocation_cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
1440 * The bitmap lock is also grabbed when only extent lock
1441 * was requested. Set the bitmap lock bit in the lock
1442 * flags which callers will use during unlock.
1444 flags
|= SFL_BITMAP
;
1447 flags
&= ~SFL_BITMAP
;
1451 if (flags
& SFL_EXTENTS
) {
1453 * Since the extents btree lock is recursive we always
1454 * need exclusive access.
1456 if (hfsmp
->hfs_extents_cp
) {
1457 (void) hfs_lock(hfsmp
->hfs_extents_cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
1459 flags
&= ~SFL_EXTENTS
;
1467 * unlock HFS system file(s).
1470 hfs_systemfile_unlock(struct hfsmount
*hfsmp
, int flags
)
1475 if (flags
& SFL_STARTUP
&& hfsmp
->hfs_startup_cp
) {
1476 hfs_unlock(hfsmp
->hfs_startup_cp
);
1478 if (flags
& SFL_ATTRIBUTE
&& hfsmp
->hfs_attribute_cp
) {
1479 hfs_unlock(hfsmp
->hfs_attribute_cp
);
1481 if (flags
& SFL_CATALOG
&& hfsmp
->hfs_catalog_cp
) {
1482 hfs_unlock(hfsmp
->hfs_catalog_cp
);
1484 if (flags
& SFL_BITMAP
&& hfsmp
->hfs_allocation_cp
) {
1485 hfs_unlock(hfsmp
->hfs_allocation_cp
);
1487 if (flags
& SFL_EXTENTS
&& hfsmp
->hfs_extents_cp
) {
1488 hfs_unlock(hfsmp
->hfs_extents_cp
);
1493 hfs_freeblks(struct hfsmount
* hfsmp
, int wantreserve
)
1500 * We don't bother taking the mount lock
1501 * to look at these values since the values
1502 * themselves are each updated atomically
1503 * on aligned addresses.
1505 freeblks
= hfsmp
->freeBlocks
;
1506 rsrvblks
= hfsmp
->reserveBlocks
;
1507 loanblks
= hfsmp
->loanedBlocks
+ hfsmp
->lockedBlocks
;
1509 if (freeblks
> rsrvblks
)
1510 freeblks
-= rsrvblks
;
1514 if (freeblks
> loanblks
)
1515 freeblks
-= loanblks
;
1523 * Map HFS Common errors (negative) to BSD error codes (positive).
1524 * Positive errors (ie BSD errors) are passed through unchanged.
1526 short MacToVFSError(OSErr err
)
1531 /* BSD/VFS internal errnos */
1533 case HFS_ERESERVEDNAME
: /* -8 */
1538 case dskFulErr
: /* -34 */
1539 case btNoSpaceAvail
: /* -32733 */
1541 case fxOvFlErr
: /* -32750 */
1544 case btBadNode
: /* -32731 */
1547 case memFullErr
: /* -108 */
1548 return ENOMEM
; /* +12 */
1550 case cmExists
: /* -32718 */
1551 case btExists
: /* -32734 */
1552 return EEXIST
; /* +17 */
1554 case cmNotFound
: /* -32719 */
1555 case btNotFound
: /* -32735 */
1556 return ENOENT
; /* 28 */
1558 case cmNotEmpty
: /* -32717 */
1559 return ENOTEMPTY
; /* 66 */
1561 case cmFThdDirErr
: /* -32714 */
1562 return EISDIR
; /* 21 */
1564 case fxRangeErr
: /* -32751 */
1567 case bdNamErr
: /* -37 */
1568 return ENAMETOOLONG
; /* 63 */
1570 case paramErr
: /* -50 */
1571 case fileBoundsErr
: /* -1309 */
1572 return EINVAL
; /* +22 */
1574 case fsBTBadNodeSize
:
1578 return EIO
; /* +5 */
1583 * Find the current thread's directory hint for a given index.
1585 * Requires an exclusive lock on directory cnode.
1587 * Use detach if the cnode lock must be dropped while the hint is still active.
1590 hfs_getdirhint(struct cnode
*dcp
, int index
, int detach
)
1593 directoryhint_t
*hint
;
1594 boolean_t need_remove
, need_init
;
1595 const u_int8_t
* name
;
1600 * Look for an existing hint first. If not found, create a new one (when
1601 * the list is not full) or recycle the oldest hint. Since new hints are
1602 * always added to the head of the list, the last hint is always the
1605 TAILQ_FOREACH(hint
, &dcp
->c_hintlist
, dh_link
)
1607 if (hint
->dh_index
== index
)
1611 { /* found an existing hint */
1616 { /* cannot find an existing hint */
1618 if (dcp
->c_dirhintcnt
< HFS_MAXDIRHINTS
)
1619 { /* we don't need recycling */
1620 /* Create a default directory hint */
1621 hint
= hfs_malloc(sizeof(struct directoryhint
));
1622 ++dcp
->c_dirhintcnt
;
1623 need_remove
= false;
1627 /* recycle the last (i.e., the oldest) hint */
1628 hint
= TAILQ_LAST(&dcp
->c_hintlist
, hfs_hinthead
);
1629 if ((hint
->dh_desc
.cd_flags
& CD_HASBUF
) && (name
= hint
->dh_desc
.cd_nameptr
))
1631 hint
->dh_desc
.cd_nameptr
= NULL
;
1632 hint
->dh_desc
.cd_namelen
= 0;
1633 hint
->dh_desc
.cd_flags
&= ~CD_HASBUF
;
1634 hfs_free((void*)name
);
1641 TAILQ_REMOVE(&dcp
->c_hintlist
, hint
, dh_link
);
1644 --dcp
->c_dirhintcnt
;
1646 TAILQ_INSERT_HEAD(&dcp
->c_hintlist
, hint
, dh_link
);
1650 hint
->dh_index
= index
;
1651 hint
->dh_desc
.cd_flags
= 0;
1652 hint
->dh_desc
.cd_encoding
= 0;
1653 hint
->dh_desc
.cd_namelen
= 0;
1654 hint
->dh_desc
.cd_nameptr
= NULL
;
1655 hint
->dh_desc
.cd_parentcnid
= dcp
->c_fileid
;
1656 hint
->dh_desc
.cd_hint
= dcp
->c_childhint
;
1657 hint
->dh_desc
.cd_cnid
= 0;
1659 hint
->dh_time
= (uint32_t) tv
.tv_sec
;
1664 * Insert a detached directory hint back into the list of dirhints.
1666 * Requires an exclusive lock on directory cnode.
1669 hfs_insertdirhint(struct cnode
*dcp
, directoryhint_t
* hint
)
1671 directoryhint_t
*test
;
1673 TAILQ_FOREACH(test
, &dcp
->c_hintlist
, dh_link
)
1677 LFHFS_LOG(LEVEL_ERROR
, "hfs_insertdirhint: hint %p already on list!", hint
);
1682 TAILQ_INSERT_HEAD(&dcp
->c_hintlist
, hint
, dh_link
);
1683 ++dcp
->c_dirhintcnt
;
1687 * Release a single directory hint.
1689 * Requires an exclusive lock on directory cnode.
1692 hfs_reldirhint(struct cnode
*dcp
, directoryhint_t
* relhint
)
1694 const u_int8_t
* name
;
1695 directoryhint_t
*hint
;
1697 /* Check if item is on list (could be detached) */
1698 TAILQ_FOREACH(hint
, &dcp
->c_hintlist
, dh_link
)
1700 if (hint
== relhint
)
1702 TAILQ_REMOVE(&dcp
->c_hintlist
, relhint
, dh_link
);
1703 --dcp
->c_dirhintcnt
;
1707 name
= relhint
->dh_desc
.cd_nameptr
;
1708 if ((relhint
->dh_desc
.cd_flags
& CD_HASBUF
) && (name
!= NULL
))
1710 relhint
->dh_desc
.cd_nameptr
= NULL
;
1711 relhint
->dh_desc
.cd_namelen
= 0;
1712 relhint
->dh_desc
.cd_flags
&= ~CD_HASBUF
;
1713 hfs_free((void*)name
);
1719 * Perform a case-insensitive compare of two UTF-8 filenames.
1721 * Returns 0 if the strings match.
1724 hfs_namecmp(const u_int8_t
*str1
, size_t len1
, const u_int8_t
*str2
, size_t len2
)
1726 u_int16_t
*ustr1
, *ustr2
;
1727 size_t ulen1
, ulen2
;
1734 maxbytes
= kHFSPlusMaxFileNameChars
<< 1;
1735 ustr1
= hfs_malloc(maxbytes
<< 1);
1736 ustr2
= ustr1
+ (maxbytes
>> 1);
1738 if (utf8_decodestr(str1
, len1
, ustr1
, &ulen1
, maxbytes
, ':', UTF_DECOMPOSED
| UTF_ESCAPE_ILLEGAL
) != 0)
1740 if (utf8_decodestr(str2
, len2
, ustr2
, &ulen2
, maxbytes
, ':', UTF_DECOMPOSED
| UTF_ESCAPE_ILLEGAL
) != 0)
1743 ulen1
= ulen1
/ sizeof(UniChar
);
1744 ulen2
= ulen2
/ sizeof(UniChar
);
1745 cmp
= FastUnicodeCompare(ustr1
, ulen1
, ustr2
, ulen2
);
1752 * Perform a case-insensitive apendix cmp of two UTF-8 filenames.
1754 * Returns 0 if the str2 is the same as the end of str1.
1757 hfs_apendixcmp(const u_int8_t
*str1
, size_t len1
, const u_int8_t
*str2
, size_t len2
)
1759 u_int16_t
*ustr1
, *ustr2
, *original_allocation
;
1760 size_t ulen1
, ulen2
;
1764 maxbytes
= kHFSPlusMaxFileNameChars
<< 1;
1765 ustr1
= hfs_malloc(maxbytes
<< 1);
1766 ustr2
= ustr1
+ (maxbytes
>> 1);
1767 original_allocation
= ustr1
;
1769 if (utf8_decodestr(str1
, len1
, ustr1
, &ulen1
, maxbytes
, ':', UTF_DECOMPOSED
| UTF_ESCAPE_ILLEGAL
) != 0)
1771 if (utf8_decodestr(str2
, len2
, ustr2
, &ulen2
, maxbytes
, ':', UTF_DECOMPOSED
| UTF_ESCAPE_ILLEGAL
) != 0)
1774 ulen1
= ulen1
/ sizeof(UniChar
);
1775 ulen2
= ulen2
/ sizeof(UniChar
);
1776 ustr1
+= ulen1
- ulen2
;
1777 cmp
= FastUnicodeCompare(ustr1
, ulen2
, ustr2
, ulen2
);
1779 hfs_free(original_allocation
);
1784 * Perform a case-insensitive strstr of two UTF-8 filenames.
1786 * Returns 0 if the str2 in str1 match.
1789 hfs_strstr(const u_int8_t
*str1
, size_t len1
, const u_int8_t
*str2
, size_t len2
)
1791 u_int16_t
*ustr1
, *ustr2
, *original_allocation
;
1792 size_t ulen1
, ulen2
;
1796 maxbytes
= kHFSPlusMaxFileNameChars
<< 1;
1797 ustr1
= hfs_malloc(maxbytes
<< 1);
1798 ustr2
= ustr1
+ (maxbytes
>> 1);
1799 original_allocation
= ustr1
;
1800 if (utf8_decodestr(str1
, len1
, ustr1
, &ulen1
, maxbytes
, ':', UTF_DECOMPOSED
| UTF_ESCAPE_ILLEGAL
) != 0)
1804 if (utf8_decodestr(str2
, len2
, ustr2
, &ulen2
, maxbytes
, ':', UTF_DECOMPOSED
| UTF_ESCAPE_ILLEGAL
) != 0)
1809 ulen1
= ulen1
/ sizeof(UniChar
);
1810 ulen2
= ulen2
/ sizeof(UniChar
);
1813 if (ulen1
-- < ulen2
)
1818 } while (FastUnicodeCompare(ustr1
++, ulen2
, ustr2
, ulen2
) != 0);
1821 hfs_free(original_allocation
);
1826 * Release directory hints for given directory
1828 * Requires an exclusive lock on directory cnode.
1831 hfs_reldirhints(struct cnode
*dcp
, int stale_hints_only
)
1834 directoryhint_t
*hint
, *prev
;
1835 const u_int8_t
* name
;
1837 if (stale_hints_only
)
1840 /* searching from the oldest to the newest, so we can stop early when releasing stale hints only */
1841 TAILQ_FOREACH_REVERSE_SAFE(hint
, &dcp
->c_hintlist
, hfs_hinthead
, dh_link
, prev
) {
1842 if (stale_hints_only
&& (tv
.tv_sec
- hint
->dh_time
) < HFS_DIRHINT_TTL
)
1843 break; /* stop here if this entry is too new */
1844 name
= hint
->dh_desc
.cd_nameptr
;
1845 if ((hint
->dh_desc
.cd_flags
& CD_HASBUF
) && (name
!= NULL
)) {
1846 hint
->dh_desc
.cd_nameptr
= NULL
;
1847 hint
->dh_desc
.cd_namelen
= 0;
1848 hint
->dh_desc
.cd_flags
&= ~CD_HASBUF
;
1849 hfs_free((void *)name
);
1851 TAILQ_REMOVE(&dcp
->c_hintlist
, hint
, dh_link
);
1853 --dcp
->c_dirhintcnt
;
1857 /* hfs_erase_unused_nodes
1859 * Check wheter a volume may suffer from unused Catalog B-tree nodes that
1860 * are not zeroed (due to <rdar://problem/6947811>). If so, just write
1861 * zeroes to the unused nodes.
1863 * How do we detect when a volume needs this repair? We can't always be
1864 * certain. If a volume was created after a certain date, then it may have
1865 * been created with the faulty newfs_hfs. Since newfs_hfs only created one
1866 * clump, we can assume that if a Catalog B-tree is larger than its clump size,
1867 * that means that the entire first clump must have been written to, which means
1868 * there shouldn't be unused and unwritten nodes in that first clump, and this
1869 * repair is not needed.
1871 * We have defined a bit in the Volume Header's attributes to indicate when the
1872 * unused nodes have been repaired. A newer newfs_hfs will set this bit.
1873 * As will fsck_hfs when it repairs the unused nodes.
1875 int hfs_erase_unused_nodes(struct hfsmount
*hfsmp
)
1878 struct filefork
*catalog
;
1881 if (hfsmp
->vcbAtrb
& kHFSUnusedNodeFixMask
)
1883 /* This volume has already been checked and repaired. */
1887 if ((hfsmp
->localCreateDate
< kHFSUnusedNodesFixDate
))
1889 /* This volume is too old to have had the problem. */
1890 hfsmp
->vcbAtrb
|= kHFSUnusedNodeFixMask
;
1894 catalog
= hfsmp
->hfs_catalog_cp
->c_datafork
;
1895 if (catalog
->ff_size
> catalog
->ff_clumpsize
)
1897 /* The entire first clump must have been in use at some point. */
1898 hfsmp
->vcbAtrb
|= kHFSUnusedNodeFixMask
;
1903 * If we get here, we need to zero out those unused nodes.
1905 * We start a transaction and lock the catalog since we're going to be
1906 * making on-disk changes. But note that BTZeroUnusedNodes doens't actually
1907 * do its writing via the journal, because that would be too much I/O
1908 * to fit in a transaction, and it's a pain to break it up into multiple
1909 * transactions. (It behaves more like growing a B-tree would.)
1911 LFHFS_LOG(LEVEL_DEBUG
, "hfs_erase_unused_nodes: updating volume %s.\n", hfsmp
->vcbVN
);
1912 result
= hfs_start_transaction(hfsmp
);
1915 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
1916 result
= BTZeroUnusedNodes(catalog
);
1917 // vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_erase_unused_nodes");
1918 hfs_systemfile_unlock(hfsmp
, lockflags
);
1919 hfs_end_transaction(hfsmp
);
1921 hfsmp
->vcbAtrb
|= kHFSUnusedNodeFixMask
;
1923 LFHFS_LOG(LEVEL_DEBUG
, "hfs_erase_unused_nodes: done updating volume %s.\n", hfsmp
->vcbVN
);
1930 * On HFS Plus Volumes, there can be orphaned files or directories
1931 * These are files or directories that were unlinked while busy.
1932 * If the volume was not cleanly unmounted then some of these may
1933 * have persisted and need to be removed.
1936 hfs_remove_orphans(struct hfsmount
* hfsmp
)
1938 BTreeIterator
* iterator
= NULL
;
1939 FSBufferDescriptor btdata
;
1940 struct HFSPlusCatalogFile filerec
;
1941 struct HFSPlusCatalogKey
* keyp
;
1947 cat_cookie_t cookie
;
1950 bool started_tr
= false;
1953 int orphaned_files
= 0;
1954 int orphaned_dirs
= 0;
1956 bzero(&cookie
, sizeof(cookie
));
1958 if (hfsmp
->hfs_flags
& HFS_CLEANED_ORPHANS
)
1961 vcb
= HFSTOVCB(hfsmp
);
1962 fcb
= VTOF(hfsmp
->hfs_catalog_vp
);
1964 btdata
.bufferAddress
= &filerec
;
1965 btdata
.itemSize
= sizeof(filerec
);
1966 btdata
.itemCount
= 1;
1968 iterator
= hfs_mallocz(sizeof(BTreeIterator
));
1969 if (iterator
== NULL
)
1972 /* Build a key to "temp" */
1973 keyp
= (HFSPlusCatalogKey
*)&iterator
->key
;
1974 keyp
->parentID
= hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
;
1975 keyp
->nodeName
.length
= 4; /* "temp" */
1976 keyp
->keyLength
= kHFSPlusCatalogKeyMinimumLength
+ keyp
->nodeName
.length
* 2;
1977 keyp
->nodeName
.unicode
[0] = 't';
1978 keyp
->nodeName
.unicode
[1] = 'e';
1979 keyp
->nodeName
.unicode
[2] = 'm';
1980 keyp
->nodeName
.unicode
[3] = 'p';
1983 * Position the iterator just before the first real temp file/dir.
1985 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
1986 (void) BTSearchRecord(fcb
, iterator
, NULL
, NULL
, iterator
);
1987 hfs_systemfile_unlock(hfsmp
, lockflags
);
1989 /* Visit all the temp files/dirs in the HFS+ private directory. */
1991 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
1992 result
= BTIterateRecord(fcb
, kBTreeNextRecord
, iterator
, &btdata
, NULL
);
1993 hfs_systemfile_unlock(hfsmp
, lockflags
);
1996 if (keyp
->parentID
!= hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
)
1999 (void) utf8_encodestr(keyp
->nodeName
.unicode
, keyp
->nodeName
.length
* 2,
2000 (u_int8_t
*)filename
, &namelen
, sizeof(filename
), 0, UTF_ADD_NULL_TERM
);
2002 (void) snprintf(tempname
, sizeof(tempname
), "%s%d", HFS_DELETE_PREFIX
, filerec
.fileID
);
2005 * Delete all files (and directories) named "tempxxx",
2006 * where xxx is the file's cnid in decimal.
2009 if (bcmp(tempname
, filename
, namelen
+ 1) != 0)
2012 struct filefork dfork
;
2013 struct filefork rfork
;
2017 bzero(&dfork
, sizeof(dfork
));
2018 bzero(&rfork
, sizeof(rfork
));
2019 bzero(&cnode
, sizeof(cnode
));
2021 if (hfs_start_transaction(hfsmp
) != 0) {
2022 LFHFS_LOG(LEVEL_ERROR
, "hfs_remove_orphans: failed to start transaction\n");
2028 * Reserve some space in the Catalog file.
2030 if (cat_preflight(hfsmp
, CAT_DELETE
, &cookie
) != 0) {
2031 LFHFS_LOG(LEVEL_ERROR
, "hfs_remove_orphans: cat_preflight failed\n");
2036 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_ATTRIBUTE
| SFL_EXTENTS
| SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
2039 /* Build a fake cnode */
2040 cat_convertattr(hfsmp
, (CatalogRecord
*)&filerec
, &cnode
.c_attr
, &dfork
.ff_data
, &rfork
.ff_data
);
2041 cnode
.c_desc
.cd_parentcnid
= hfsmp
->hfs_private_desc
[FILE_HARDLINKS
].cd_cnid
;
2042 cnode
.c_desc
.cd_nameptr
= (const u_int8_t
*)filename
;
2043 cnode
.c_desc
.cd_namelen
= namelen
;
2044 cnode
.c_desc
.cd_cnid
= cnode
.c_attr
.ca_fileid
;
2045 cnode
.c_blocks
= dfork
.ff_blocks
+ rfork
.ff_blocks
;
2047 /* Position iterator at previous entry */
2048 if (BTIterateRecord(fcb
, kBTreePrevRecord
, iterator
,
2053 /* Truncate the file to zero (both forks) */
2054 if (dfork
.ff_blocks
> 0) {
2057 dfork
.ff_cp
= &cnode
;
2058 cnode
.c_datafork
= &dfork
;
2059 cnode
.c_rsrcfork
= NULL
;
2060 fsize
= (u_int64_t
)dfork
.ff_blocks
* (u_int64_t
)HFSTOVCB(hfsmp
)->blockSize
;
2062 if (fsize
> HFS_BIGFILE_SIZE
) {
2063 fsize
-= HFS_BIGFILE_SIZE
;
2068 if (TruncateFileC(vcb
, (FCB
*)&dfork
, fsize
, 1, 0, cnode
.c_attr
.ca_fileid
, false) != 0) {
2069 LFHFS_LOG(LEVEL_ERROR
, "hfs_remove_orphans: error truncating data fork!\n");
2074 // if we're iteratively truncating this file down,
2075 // then end the transaction and start a new one so
2076 // that no one transaction gets too big.
2079 /* Drop system file locks before starting
2080 * another transaction to preserve lock order.
2082 hfs_systemfile_unlock(hfsmp
, lockflags
);
2084 hfs_end_transaction(hfsmp
);
2086 if (hfs_start_transaction(hfsmp
) != 0) {
2090 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_ATTRIBUTE
| SFL_EXTENTS
| SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
2096 if (rfork
.ff_blocks
> 0) {
2097 rfork
.ff_cp
= &cnode
;
2098 cnode
.c_datafork
= NULL
;
2099 cnode
.c_rsrcfork
= &rfork
;
2100 if (TruncateFileC(vcb
, (FCB
*)&rfork
, 0, 1, 1, cnode
.c_attr
.ca_fileid
, false) != 0) {
2101 LFHFS_LOG(LEVEL_ERROR
, "hfs_remove_orphans: error truncating rsrc fork!\n");
2106 // Deal with extended attributes
2107 if (ISSET(cnode
.c_attr
.ca_recflags
, kHFSHasAttributesMask
)) {
2108 // hfs_removeallattr uses its own transactions
2109 hfs_systemfile_unlock(hfsmp
, lockflags
);
2111 hfs_end_transaction(hfsmp
);
2113 hfs_removeallattr(hfsmp
, cnode
.c_attr
.ca_fileid
, &started_tr
);
2116 if (hfs_start_transaction(hfsmp
) != 0) {
2117 LFHFS_LOG(LEVEL_ERROR
, "hfs_remove_orphans:: failed to start transaction\n");
2123 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_ATTRIBUTE
| SFL_EXTENTS
| SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
2126 /* Remove the file or folder record from the Catalog */
2127 if (cat_delete(hfsmp
, &cnode
.c_desc
, &cnode
.c_attr
) != 0) {
2128 LFHFS_LOG(LEVEL_ERROR
, "hfs_remove_orphans: error deleting cat rec for id %d!\n", cnode
.c_desc
.cd_cnid
);
2129 hfs_systemfile_unlock(hfsmp
, lockflags
);
2131 hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
2135 mode
= cnode
.c_attr
.ca_mode
& S_IFMT
;
2137 if (mode
== S_IFDIR
) {
2144 /* Update parent and volume counts */
2145 hfsmp
->hfs_private_attr
[FILE_HARDLINKS
].ca_entries
--;
2146 if (mode
== S_IFDIR
) {
2147 DEC_FOLDERCOUNT(hfsmp
, hfsmp
->hfs_private_attr
[FILE_HARDLINKS
]);
2150 (void)cat_update(hfsmp
, &hfsmp
->hfs_private_desc
[FILE_HARDLINKS
],
2151 &hfsmp
->hfs_private_attr
[FILE_HARDLINKS
], NULL
, NULL
);
2153 /* Drop locks and end the transaction */
2154 hfs_systemfile_unlock(hfsmp
, lockflags
);
2155 cat_postflight(hfsmp
, &cookie
);
2156 catlock
= catreserve
= 0;
2159 Now that Catalog is unlocked, update the volume info, making
2160 sure to differentiate between files and directories
2162 if (mode
== S_IFDIR
) {
2163 hfs_volupdate(hfsmp
, VOL_RMDIR
, 0);
2166 hfs_volupdate(hfsmp
, VOL_RMFILE
, 0);
2169 hfs_end_transaction(hfsmp
);
2175 if (orphaned_files
> 0 || orphaned_dirs
> 0)
2176 LFHFS_LOG(LEVEL_ERROR
, "hfs_remove_orphans: Removed %d orphaned / unlinked files and %d directories \n", orphaned_files
, orphaned_dirs
);
2179 hfs_systemfile_unlock(hfsmp
, lockflags
);
2182 cat_postflight(hfsmp
, &cookie
);
2185 hfs_end_transaction(hfsmp
);
2189 hfsmp
->hfs_flags
|= HFS_CLEANED_ORPHANS
;
2193 u_int32_t
GetFileInfo(ExtendedVCB
*vcb
, const char *name
,
2194 struct cat_attr
*fattr
, struct cat_fork
*forkinfo
) {
2196 struct hfsmount
* hfsmp
;
2197 struct cat_desc jdesc
;
2201 if (vcb
->vcbSigWord
!= kHFSPlusSigWord
)
2204 hfsmp
= VCBTOHFS(vcb
);
2206 memset(&jdesc
, 0, sizeof(struct cat_desc
));
2207 jdesc
.cd_parentcnid
= kRootDirID
;
2208 jdesc
.cd_nameptr
= (const u_int8_t
*)name
;
2209 jdesc
.cd_namelen
= strlen(name
);
2211 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
2212 error
= cat_lookup(hfsmp
, &jdesc
, 0, NULL
, fattr
, forkinfo
, NULL
);
2213 hfs_systemfile_unlock(hfsmp
, lockflags
);
2216 return (fattr
->ca_fileid
);
2217 } else if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2221 return (0); /* XXX what callers expect on an error */
2225 int hfs_early_journal_init(struct hfsmount
*hfsmp
, HFSPlusVolumeHeader
*vhp
,
2226 void *_args
, off_t embeddedOffset
, daddr64_t mdb_offset
,
2227 HFSMasterDirectoryBlock
*mdbp
) {
2229 JournalInfoBlock
*jibp
;
2231 void *jinfo_bp
= NULL
;
2232 int sectors_per_fsblock
, arg_flags
=0, arg_tbufsz
=0;
2234 uint32_t blksize
= hfsmp
->hfs_logical_block_size
;
2235 struct vnode
*devvp
;
2236 struct hfs_mount_args
*args
= _args
;
2237 u_int32_t jib_flags
;
2238 u_int64_t jib_offset
;
2241 devvp
= hfsmp
->hfs_devvp
;
2243 if (args
!= NULL
&& (args
->flags
& HFSFSMNT_EXTENDED_ARGS
)) {
2244 arg_flags
= args
->journal_flags
;
2245 arg_tbufsz
= args
->journal_tbuffer_size
;
2248 sectors_per_fsblock
= SWAP_BE32(vhp
->blockSize
) / blksize
;
2250 // Read Journal Info
2251 jinfo_bp
= hfs_malloc(hfsmp
->hfs_physical_block_size
);
2253 goto cleanup_dev_name
;
2256 uint32_t ujournalInfoBlock
= SWAP_BE32(vhp
->journalInfoBlock
);
2257 uint64_t u64JournalOffset
=
2258 (daddr64_t
)((embeddedOffset
/blksize
) + ((u_int64_t
)ujournalInfoBlock
*sectors_per_fsblock
));
2259 retval
= raw_readwrite_read_mount(devvp
, u64JournalOffset
, hfsmp
->hfs_physical_block_size
,
2260 jinfo_bp
, hfsmp
->hfs_physical_block_size
, NULL
, NULL
);
2263 goto cleanup_dev_name
;
2267 jib_flags
= SWAP_BE32(jibp
->flags
);
2268 jib_size
= SWAP_BE64(jibp
->size
);
2270 if (!(jib_flags
& kJIJournalInFSMask
)) {
2271 goto cleanup_dev_name
;
2274 hfsmp
->jvp
= hfsmp
->hfs_devvp
;
2275 jib_offset
= SWAP_BE64(jibp
->offset
);
2277 // save this off for the hack-y check in hfs_remove()
2278 hfsmp
->jnl_start
= jib_offset
/ SWAP_BE32(vhp
->blockSize
);
2279 hfsmp
->jnl_size
= jib_size
;
2281 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) && (hfsmp
->hfs_mp
->mnt_flag
& MNT_ROOTFS
) == 0) {
2282 // if the file system is read-only, check if the journal is empty.
2283 // if it is, then we can allow the mount. otherwise we have to
2285 retval
= journal_is_clean(hfsmp
->jvp
,
2286 jib_offset
+ embeddedOffset
,
2289 hfsmp
->hfs_logical_block_size
,
2298 LFHFS_LOG(LEVEL_ERROR
, "hfs: early journal init: the volume is read-only and journal is dirty. Can not mount volume.\n");
2301 goto cleanup_dev_name
;
2304 if (jib_flags
& kJIJournalNeedInitMask
) {
2305 LFHFS_LOG(LEVEL_ERROR
, "hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2306 jib_offset
+ embeddedOffset
, jib_size
);
2307 hfsmp
->jnl
= journal_create(hfsmp
->jvp
,
2308 jib_offset
+ embeddedOffset
,
2318 // no need to start a transaction here... if this were to fail
2319 // we'd just re-init it on the next mount.
2320 jib_flags
&= ~kJIJournalNeedInitMask
;
2321 jibp
->flags
= SWAP_BE32(jib_flags
);
2322 raw_readwrite_write_mount(devvp
, u64JournalOffset
, hfsmp
->hfs_physical_block_size
,
2323 jinfo_bp
, hfsmp
->hfs_physical_block_size
, NULL
, NULL
);
2327 LFHFS_LOG(LEVEL_DEFAULT
, "hfs: Opening the journal (jib_offset 0x%llx size 0x%llx vhp_blksize %d)...\n",
2328 jib_offset
+ embeddedOffset
,
2329 jib_size
, SWAP_BE32(vhp
->blockSize
));
2331 hfsmp
->jnl
= journal_open(hfsmp
->jvp
,
2332 jib_offset
+ embeddedOffset
,
2342 if (hfsmp
->jnl
&& mdbp
) {
2343 // reload the mdb because it could have changed
2344 // if the journal had to be replayed.
2345 if (mdb_offset
== 0) {
2346 mdb_offset
= (daddr64_t
)((embeddedOffset
/ blksize
) + HFS_PRI_SECTOR(blksize
));
2349 bp
= hfs_malloc(hfsmp
->hfs_physical_block_size
);
2351 goto cleanup_dev_name
;
2354 uint64_t u64MDBOffset
= HFS_PHYSBLK_ROUNDDOWN(mdb_offset
, hfsmp
->hfs_log_per_phys
);
2355 retval
= raw_readwrite_read_mount(devvp
, u64MDBOffset
, hfsmp
->hfs_physical_block_size
, bp
, hfsmp
->hfs_physical_block_size
, NULL
, NULL
);
2358 LFHFS_LOG(LEVEL_ERROR
, "hfs: failed to reload the mdb after opening the journal (retval %d)!\n", retval
);
2359 goto cleanup_dev_name
;
2362 bcopy(bp
+ HFS_PRI_OFFSET(hfsmp
->hfs_physical_block_size
), mdbp
, 512);
2366 // if we expected the journal to be there and we couldn't
2367 // create it or open it then we have to bail out.
2368 if (hfsmp
->jnl
== NULL
) {
2369 LFHFS_LOG(LEVEL_ERROR
, "hfs: early jnl init: failed to open/create the journal (retval %d).\n", retval
);
2371 goto cleanup_dev_name
;
2385 // This function will go and re-locate the .journal_info_block and
2386 // the .journal files in case they moved (which can happen if you
2387 // run Norton SpeedDisk). If we fail to find either file we just
2388 // disable journaling for this volume and return. We turn off the
2389 // journaling bit in the vcb and assume it will get written to disk
2390 // later (if it doesn't on the next mount we'd do the same thing
2391 // again which is harmless). If we disable journaling we don't
2392 // return an error so that the volume is still mountable.
2394 // If the info we find for the .journal_info_block and .journal files
2395 // isn't what we had stored, we re-set our cached info and proceed
2396 // with opening the journal normally.
2398 static int hfs_late_journal_init(struct hfsmount
*hfsmp
, HFSPlusVolumeHeader
*vhp
, void *_args
) {
2399 JournalInfoBlock
*jibp
;
2400 void *jinfo_bp
= NULL
;
2401 int sectors_per_fsblock
, arg_flags
=0, arg_tbufsz
=0;
2402 int retval
, write_jibp
= 0, recreate_journal
= 0;
2403 struct vnode
*devvp
;
2404 struct cat_attr jib_attr
, jattr
;
2405 struct cat_fork jib_fork
, jfork
;
2408 struct hfs_mount_args
*args
= _args
;
2409 u_int32_t jib_flags
;
2410 u_int64_t jib_offset
;
2413 devvp
= hfsmp
->hfs_devvp
;
2414 vcb
= HFSTOVCB(hfsmp
);
2416 if (args
!= NULL
&& (args
->flags
& HFSFSMNT_EXTENDED_ARGS
)) {
2417 if (args
->journal_disable
) {
2421 arg_flags
= args
->journal_flags
;
2422 arg_tbufsz
= args
->journal_tbuffer_size
;
2425 fid
= GetFileInfo(vcb
, ".journal_info_block", &jib_attr
, &jib_fork
);
2426 if (fid
== 0 || jib_fork
.cf_extents
[0].startBlock
== 0 || jib_fork
.cf_size
== 0) {
2427 LFHFS_LOG(LEVEL_ERROR
, "hfs: can't find the .journal_info_block! disabling journaling (start: %d).\n",
2428 fid
? jib_fork
.cf_extents
[0].startBlock
: 0);
2429 vcb
->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
2432 hfsmp
->hfs_jnlinfoblkid
= fid
;
2434 // make sure the journal_info_block begins where we think it should.
2435 if (SWAP_BE32(vhp
->journalInfoBlock
) != jib_fork
.cf_extents
[0].startBlock
) {
2436 LFHFS_LOG(LEVEL_ERROR
, "hfs: The journal_info_block moved (was: %d; is: %d). Fixing up\n",
2437 SWAP_BE32(vhp
->journalInfoBlock
), jib_fork
.cf_extents
[0].startBlock
);
2439 vcb
->vcbJinfoBlock
= jib_fork
.cf_extents
[0].startBlock
;
2440 vhp
->journalInfoBlock
= SWAP_BE32(jib_fork
.cf_extents
[0].startBlock
);
2441 recreate_journal
= 1;
2445 sectors_per_fsblock
= SWAP_BE32(vhp
->blockSize
) / hfsmp
->hfs_logical_block_size
;
2447 // Read journal info
2448 jinfo_bp
= hfs_malloc(hfsmp
->hfs_physical_block_size
);
2450 LFHFS_LOG(LEVEL_ERROR
, "hfs: can't alloc memory.\n");
2451 vcb
->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
2455 uint64_t u64JournalOffset
=
2456 (vcb
->hfsPlusIOPosOffset
/ hfsmp
->hfs_logical_block_size
+
2457 ((u_int64_t
)SWAP_BE32(vhp
->journalInfoBlock
)*sectors_per_fsblock
));
2459 retval
= raw_readwrite_read_mount(devvp
, u64JournalOffset
, hfsmp
->hfs_physical_block_size
, jinfo_bp
, hfsmp
->hfs_physical_block_size
, NULL
, NULL
);
2465 LFHFS_LOG(LEVEL_ERROR
, "hfs: can't read journal info block. disabling journaling.\n");
2466 vcb
->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
2471 jib_flags
= SWAP_BE32(jibp
->flags
);
2472 jib_offset
= SWAP_BE64(jibp
->offset
);
2473 jib_size
= SWAP_BE64(jibp
->size
);
2475 fid
= GetFileInfo(vcb
, ".journal", &jattr
, &jfork
);
2476 if (fid
== 0 || jfork
.cf_extents
[0].startBlock
== 0 || jfork
.cf_size
== 0) {
2477 LFHFS_LOG(LEVEL_ERROR
, "hfs: can't find the journal file! disabling journaling (start: %d)\n",
2478 fid
? jfork
.cf_extents
[0].startBlock
: 0);
2480 vcb
->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
2483 hfsmp
->hfs_jnlfileid
= fid
;
2485 // make sure the journal file begins where we think it should.
2486 if ((jib_flags
& kJIJournalInFSMask
) && (jib_offset
/ (u_int64_t
)vcb
->blockSize
) != jfork
.cf_extents
[0].startBlock
) {
2487 LFHFS_LOG(LEVEL_ERROR
, "hfs: The journal file moved (was: %lld; is: %d). Fixing up\n",
2488 (jib_offset
/ (u_int64_t
)vcb
->blockSize
), jfork
.cf_extents
[0].startBlock
);
2490 jib_offset
= (u_int64_t
)jfork
.cf_extents
[0].startBlock
* (u_int64_t
)vcb
->blockSize
;
2492 recreate_journal
= 1;
2495 // check the size of the journal file.
2496 if (jib_size
!= (u_int64_t
)jfork
.cf_extents
[0].blockCount
*vcb
->blockSize
) {
2497 LFHFS_LOG(LEVEL_ERROR
, "hfs: The journal file changed size! (was %lld; is %lld). Fixing up.\n",
2498 jib_size
, (u_int64_t
)jfork
.cf_extents
[0].blockCount
*vcb
->blockSize
);
2500 jib_size
= (u_int64_t
)jfork
.cf_extents
[0].blockCount
* vcb
->blockSize
;
2502 recreate_journal
= 1;
2505 if (!(jib_flags
& kJIJournalInFSMask
)) {
2506 LFHFS_LOG(LEVEL_ERROR
, "hfs: No support for journal on a different volume\n");
2508 vcb
->vcbAtrb
&= ~kHFSVolumeJournaledMask
;
2512 hfsmp
->jvp
= hfsmp
->hfs_devvp
;
2513 jib_offset
+= (off_t
)vcb
->hfsPlusIOPosOffset
;
2515 // save this off for the hack-y check in hfs_remove()
2516 hfsmp
->jnl_start
= jib_offset
/ SWAP_BE32(vhp
->blockSize
);
2517 hfsmp
->jnl_size
= jib_size
;
2519 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) && (hfsmp
->hfs_mp
->mnt_flag
& MNT_ROOTFS
) == 0) {
2520 // if the file system is read-only, check if the journal is empty.
2521 // if it is, then we can allow the mount. otherwise we have to
2523 retval
= journal_is_clean(hfsmp
->jvp
,
2527 hfsmp
->hfs_logical_block_size
,
2535 LFHFS_LOG(LEVEL_ERROR
, "hfs_late_journal_init: volume on is read-only and journal is dirty. Can not mount volume.\n");
2541 if ((jib_flags
& kJIJournalNeedInitMask
) || recreate_journal
) {
2542 LFHFS_LOG(LEVEL_ERROR
, "hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2543 jib_offset
, jib_size
);
2544 hfsmp
->jnl
= journal_create(hfsmp
->jvp
,
2548 hfsmp
->hfs_logical_block_size
,
2555 // no need to start a transaction here... if this were to fail
2556 // we'd just re-init it on the next mount.
2557 jib_flags
&= ~kJIJournalNeedInitMask
;
2562 // if we weren't the last person to mount this volume
2563 // then we need to throw away the journal because it
2564 // is likely that someone else mucked with the disk.
2565 // if the journal is empty this is no big deal. if the
2566 // disk is dirty this prevents us from replaying the
2567 // journal over top of changes that someone else made.
2569 arg_flags
|= JOURNAL_RESET
;
2571 //printf("hfs: Opening the journal (joffset 0x%llx sz 0x%llx vhp_blksize %d)...\n",
2573 // jib_size, SWAP_BE32(vhp->blockSize));
2575 hfsmp
->jnl
= journal_open(hfsmp
->jvp
,
2579 hfsmp
->hfs_logical_block_size
,
2589 jibp
->flags
= SWAP_BE32(jib_flags
);
2590 jibp
->offset
= SWAP_BE64(jib_offset
);
2591 jibp
->size
= SWAP_BE64(jib_size
);
2593 uint64_t uActualWrite
= 0;
2594 retval
= raw_readwrite_write_mount(devvp
, u64JournalOffset
, hfsmp
->hfs_physical_block_size
, jinfo_bp
, hfsmp
->hfs_physical_block_size
, &uActualWrite
, NULL
);
2601 // if we expected the journal to be there and we couldn't
2602 // create it or open it then we have to bail out.
2603 if (hfsmp
->jnl
== NULL
) {
2604 LFHFS_LOG(LEVEL_ERROR
, "hfs: late jnl init: failed to open/create the journal (retval %d).\n", retval
);